github.com/pingcap/br@v5.3.0-alpha.0.20220125034240-ec59c7b6ce30+incompatible/cmd/br/debug.go (about) 1 // Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. 2 3 package main 4 5 import ( 6 "bytes" 7 "context" 8 "crypto/sha256" 9 "encoding/hex" 10 "encoding/json" 11 "path" 12 "reflect" 13 14 "github.com/gogo/protobuf/proto" 15 "github.com/pingcap/errors" 16 backuppb "github.com/pingcap/kvproto/pkg/backup" 17 "github.com/pingcap/kvproto/pkg/import_sstpb" 18 "github.com/pingcap/log" 19 "github.com/pingcap/parser/model" 20 "github.com/spf13/cobra" 21 "go.uber.org/zap" 22 23 berrors "github.com/pingcap/br/pkg/errors" 24 "github.com/pingcap/br/pkg/logutil" 25 "github.com/pingcap/br/pkg/metautil" 26 "github.com/pingcap/br/pkg/mock/mockid" 27 "github.com/pingcap/br/pkg/restore" 28 "github.com/pingcap/br/pkg/rtree" 29 "github.com/pingcap/br/pkg/task" 30 "github.com/pingcap/br/pkg/utils" 31 "github.com/pingcap/br/pkg/version/build" 32 ) 33 34 // NewDebugCommand return a debug subcommand. 35 func NewDebugCommand() *cobra.Command { 36 meta := &cobra.Command{ 37 Use: "debug <subcommand>", 38 Short: "commands to check/debug backup data", 39 SilenceUsage: false, 40 PersistentPreRunE: func(c *cobra.Command, args []string) error { 41 if err := Init(c); err != nil { 42 return errors.Trace(err) 43 } 44 build.LogInfo(build.BR) 45 utils.LogEnvVariables() 46 task.LogArguments(c) 47 return nil 48 }, 49 // To be compatible with older BR. 50 Aliases: []string{"validate"}, 51 } 52 meta.AddCommand(newCheckSumCommand()) 53 meta.AddCommand(newBackupMetaCommand()) 54 meta.AddCommand(decodeBackupMetaCommand()) 55 meta.AddCommand(encodeBackupMetaCommand()) 56 meta.AddCommand(setPDConfigCommand()) 57 meta.Hidden = true 58 59 return meta 60 } 61 62 func newCheckSumCommand() *cobra.Command { 63 command := &cobra.Command{ 64 Use: "checksum", 65 Short: "check the backup data", 66 Args: cobra.NoArgs, 67 RunE: func(cmd *cobra.Command, _ []string) error { 68 ctx, cancel := context.WithCancel(GetDefaultContext()) 69 defer cancel() 70 71 var cfg task.Config 72 if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { 73 return errors.Trace(err) 74 } 75 76 _, s, backupMeta, err := task.ReadBackupMeta(ctx, metautil.MetaFile, &cfg) 77 if err != nil { 78 return errors.Trace(err) 79 } 80 81 reader := metautil.NewMetaReader(backupMeta, s) 82 dbs, err := utils.LoadBackupTables(ctx, reader) 83 if err != nil { 84 return errors.Trace(err) 85 } 86 87 for _, schema := range backupMeta.Schemas { 88 dbInfo := &model.DBInfo{} 89 err = json.Unmarshal(schema.Db, dbInfo) 90 if err != nil { 91 return errors.Trace(err) 92 } 93 tblInfo := &model.TableInfo{} 94 err = json.Unmarshal(schema.Table, tblInfo) 95 if err != nil { 96 return errors.Trace(err) 97 } 98 tbl := dbs[dbInfo.Name.String()].GetTable(tblInfo.Name.String()) 99 100 var calCRC64 uint64 101 var totalKVs uint64 102 var totalBytes uint64 103 for _, file := range tbl.Files { 104 calCRC64 ^= file.Crc64Xor 105 totalKVs += file.GetTotalKvs() 106 totalBytes += file.GetTotalBytes() 107 log.Info("file info", zap.Stringer("table", tblInfo.Name), 108 zap.String("file", file.GetName()), 109 zap.Uint64("crc64xor", file.GetCrc64Xor()), 110 zap.Uint64("totalKvs", file.GetTotalKvs()), 111 zap.Uint64("totalBytes", file.GetTotalBytes()), 112 zap.Uint64("startVersion", file.GetStartVersion()), 113 zap.Uint64("endVersion", file.GetEndVersion()), 114 logutil.Key("startKey", file.GetStartKey()), 115 logutil.Key("endKey", file.GetEndKey()), 116 ) 117 118 var data []byte 119 data, err = s.ReadFile(ctx, file.Name) 120 if err != nil { 121 return errors.Trace(err) 122 } 123 s := sha256.Sum256(data) 124 if !bytes.Equal(s[:], file.Sha256) { 125 return errors.Annotatef(berrors.ErrBackupChecksumMismatch, ` 126 backup data checksum failed: %s may be changed 127 calculated sha256 is %s, 128 origin sha256 is %s`, 129 file.Name, hex.EncodeToString(s[:]), hex.EncodeToString(file.Sha256)) 130 } 131 } 132 log.Info("table info", zap.Stringer("table", tblInfo.Name), 133 zap.Uint64("CRC64", calCRC64), 134 zap.Uint64("totalKvs", totalKVs), 135 zap.Uint64("totalBytes", totalBytes), 136 zap.Uint64("schemaTotalKvs", schema.TotalKvs), 137 zap.Uint64("schemaTotalBytes", schema.TotalBytes), 138 zap.Uint64("schemaCRC64", schema.Crc64Xor)) 139 } 140 cmd.Println("backup data checksum succeed!") 141 return nil 142 }, 143 } 144 command.Hidden = true 145 return command 146 } 147 148 func newBackupMetaCommand() *cobra.Command { 149 command := &cobra.Command{ 150 Use: "backupmeta", 151 Short: "utilities of backupmeta", 152 SilenceUsage: false, 153 } 154 command.AddCommand(newBackupMetaValidateCommand()) 155 return command 156 } 157 158 func newBackupMetaValidateCommand() *cobra.Command { 159 command := &cobra.Command{ 160 Use: "validate", 161 Short: "validate key range and rewrite rules of backupmeta", 162 RunE: func(cmd *cobra.Command, _ []string) error { 163 ctx, cancel := context.WithCancel(GetDefaultContext()) 164 defer cancel() 165 166 tableIDOffset, err := cmd.Flags().GetUint64("offset") 167 if err != nil { 168 return errors.Trace(err) 169 } 170 171 var cfg task.Config 172 if err = cfg.ParseFromFlags(cmd.Flags()); err != nil { 173 return errors.Trace(err) 174 } 175 _, s, backupMeta, err := task.ReadBackupMeta(ctx, metautil.MetaFile, &cfg) 176 if err != nil { 177 log.Error("read backupmeta failed", zap.Error(err)) 178 return errors.Trace(err) 179 } 180 reader := metautil.NewMetaReader(backupMeta, s) 181 dbs, err := utils.LoadBackupTables(ctx, reader) 182 if err != nil { 183 log.Error("load tables failed", zap.Error(err)) 184 return errors.Trace(err) 185 } 186 files := make([]*backuppb.File, 0) 187 tables := make([]*metautil.Table, 0) 188 for _, db := range dbs { 189 for _, table := range db.Tables { 190 files = append(files, table.Files...) 191 } 192 tables = append(tables, db.Tables...) 193 } 194 // Check if the ranges of files overlapped 195 rangeTree := rtree.NewRangeTree() 196 for _, file := range files { 197 if out := rangeTree.InsertRange(rtree.Range{ 198 StartKey: file.GetStartKey(), 199 EndKey: file.GetEndKey(), 200 }); out != nil { 201 log.Error( 202 "file ranges overlapped", 203 zap.Stringer("out", out), 204 logutil.File(file), 205 ) 206 } 207 } 208 209 tableIDAllocator := mockid.NewIDAllocator() 210 // Advance table ID allocator to the offset. 211 for offset := uint64(0); offset < tableIDOffset; offset++ { 212 _, _ = tableIDAllocator.Alloc() // Ignore error 213 } 214 rewriteRules := &restore.RewriteRules{ 215 Data: make([]*import_sstpb.RewriteRule, 0), 216 } 217 tableIDMap := make(map[int64]int64) 218 // Simulate to create table 219 for _, table := range tables { 220 indexIDAllocator := mockid.NewIDAllocator() 221 newTable := new(model.TableInfo) 222 tableID, _ := tableIDAllocator.Alloc() 223 newTable.ID = int64(tableID) 224 newTable.Name = table.Info.Name 225 newTable.Indices = make([]*model.IndexInfo, len(table.Info.Indices)) 226 for i, indexInfo := range table.Info.Indices { 227 indexID, _ := indexIDAllocator.Alloc() 228 newTable.Indices[i] = &model.IndexInfo{ 229 ID: int64(indexID), 230 Name: indexInfo.Name, 231 } 232 } 233 rules := restore.GetRewriteRules(newTable, table.Info, 0) 234 rewriteRules.Data = append(rewriteRules.Data, rules.Data...) 235 tableIDMap[table.Info.ID] = int64(tableID) 236 } 237 // Validate rewrite rules 238 for _, file := range files { 239 err = restore.ValidateFileRewriteRule(file, rewriteRules) 240 if err != nil { 241 return errors.Trace(err) 242 } 243 } 244 cmd.Println("Check backupmeta done") 245 return nil 246 }, 247 } 248 command.Flags().Uint64("offset", 0, "the offset of table id alloctor") 249 return command 250 } 251 252 func decodeBackupMetaCommand() *cobra.Command { 253 decodeBackupMetaCmd := &cobra.Command{ 254 Use: "decode", 255 Short: "decode backupmeta to json", 256 Args: cobra.NoArgs, 257 RunE: func(cmd *cobra.Command, args []string) error { 258 ctx, cancel := context.WithCancel(GetDefaultContext()) 259 defer cancel() 260 261 var cfg task.Config 262 if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { 263 return errors.Trace(err) 264 } 265 _, s, backupMeta, err := task.ReadBackupMeta(ctx, metautil.MetaFile, &cfg) 266 if err != nil { 267 return errors.Trace(err) 268 } 269 270 fieldName, _ := cmd.Flags().GetString("field") 271 if fieldName == "" { 272 // No field flag, write backupmeta to external storage in JSON format. 273 backupMetaJSON, err := utils.MarshalBackupMeta(backupMeta) 274 if err != nil { 275 return errors.Trace(err) 276 } 277 err = s.WriteFile(ctx, metautil.MetaJSONFile, backupMetaJSON) 278 if err != nil { 279 return errors.Trace(err) 280 } 281 cmd.Printf("backupmeta decoded at %s\n", path.Join(cfg.Storage, metautil.MetaJSONFile)) 282 return nil 283 } 284 285 switch fieldName { 286 // To be compatible with older BR. 287 case "start-version": 288 fieldName = "StartVersion" 289 case "end-version": 290 fieldName = "EndVersion" 291 } 292 293 _, found := reflect.TypeOf(*backupMeta).FieldByName(fieldName) 294 if !found { 295 cmd.Printf("field '%s' not found\n", fieldName) 296 return nil 297 } 298 field := reflect.ValueOf(*backupMeta).FieldByName(fieldName) 299 if !field.CanInterface() { 300 cmd.Printf("field '%s' can not print\n", fieldName) 301 } else { 302 cmd.Printf("%v\n", field.Interface()) 303 } 304 return nil 305 }, 306 } 307 308 decodeBackupMetaCmd.Flags().String("field", "", "decode specified field") 309 310 return decodeBackupMetaCmd 311 } 312 313 func encodeBackupMetaCommand() *cobra.Command { 314 encodeBackupMetaCmd := &cobra.Command{ 315 Use: "encode", 316 Short: "encode backupmeta json file to backupmeta", 317 Args: cobra.NoArgs, 318 RunE: func(cmd *cobra.Command, args []string) error { 319 ctx, cancel := context.WithCancel(GetDefaultContext()) 320 defer cancel() 321 322 var cfg task.Config 323 if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { 324 return errors.Trace(err) 325 } 326 _, s, err := task.GetStorage(ctx, &cfg) 327 if err != nil { 328 return errors.Trace(err) 329 } 330 331 metaData, err := s.ReadFile(ctx, metautil.MetaJSONFile) 332 if err != nil { 333 return errors.Trace(err) 334 } 335 336 backupMetaJSON, err := utils.UnmarshalBackupMeta(metaData) 337 if err != nil { 338 return errors.Trace(err) 339 } 340 backupMeta, err := proto.Marshal(backupMetaJSON) 341 if err != nil { 342 return errors.Trace(err) 343 } 344 345 fileName := metautil.MetaFile 346 if ok, _ := s.FileExists(ctx, fileName); ok { 347 // Do not overwrite origin meta file 348 fileName += "_from_json" 349 } 350 err = s.WriteFile(ctx, fileName, backupMeta) 351 if err != nil { 352 return errors.Trace(err) 353 } 354 return nil 355 }, 356 } 357 return encodeBackupMetaCmd 358 } 359 360 func setPDConfigCommand() *cobra.Command { 361 pdConfigCmd := &cobra.Command{ 362 Use: "reset-pd-config-as-default", 363 Short: "reset pd config adjusted by BR to default value", 364 Args: cobra.NoArgs, 365 RunE: func(cmd *cobra.Command, args []string) error { 366 ctx, cancel := context.WithCancel(GetDefaultContext()) 367 defer cancel() 368 369 var cfg task.Config 370 if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { 371 return errors.Trace(err) 372 } 373 374 mgr, err := task.NewMgr(ctx, tidbGlue, cfg.PD, cfg.TLS, task.GetKeepalive(&cfg), cfg.CheckRequirements, false) 375 if err != nil { 376 return errors.Trace(err) 377 } 378 defer mgr.Close() 379 380 if err := mgr.UpdatePDScheduleConfig(ctx); err != nil { 381 return errors.Annotate(err, "fail to update PD merge config") 382 } 383 log.Info("add pd configs succeed") 384 return nil 385 }, 386 } 387 return pdConfigCmd 388 }