github.com/pingcap/br@v5.3.0-alpha.0.20220125034240-ec59c7b6ce30+incompatible/pkg/lightning/checkpoints/glue_checkpoint.go (about) 1 // Copyright 2020 PingCAP, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package checkpoints 15 16 import ( 17 "context" 18 "encoding/json" 19 "fmt" 20 "io" 21 "strings" 22 23 "github.com/pingcap/errors" 24 "github.com/pingcap/parser/ast" 25 "github.com/pingcap/tidb/types" 26 "github.com/pingcap/tidb/util/chunk" 27 "github.com/pingcap/tidb/util/sqlexec" 28 "go.uber.org/zap" 29 30 "github.com/pingcap/br/pkg/lightning/common" 31 "github.com/pingcap/br/pkg/lightning/config" 32 "github.com/pingcap/br/pkg/lightning/log" 33 "github.com/pingcap/br/pkg/lightning/mydump" 34 verify "github.com/pingcap/br/pkg/lightning/verification" 35 "github.com/pingcap/br/pkg/version/build" 36 ) 37 38 type Session interface { 39 Close() 40 Execute(context.Context, string) ([]sqlexec.RecordSet, error) 41 CommitTxn(context.Context) error 42 RollbackTxn(context.Context) 43 PrepareStmt(sql string) (stmtID uint32, paramCount int, fields []*ast.ResultField, err error) 44 ExecutePreparedStmt(ctx context.Context, stmtID uint32, param []types.Datum) (sqlexec.RecordSet, error) 45 DropPreparedStmt(stmtID uint32) error 46 } 47 48 // GlueCheckpointsDB is almost same with MySQLCheckpointsDB, but it uses TiDB's internal data structure which requires a 49 // lot to keep same with database/sql. 50 // TODO: Encapsulate Begin/Commit/Rollback txn, form SQL with args and query/iter/scan TiDB's RecordSet into a interface 51 // to reuse MySQLCheckpointsDB. 52 type GlueCheckpointsDB struct { 53 // getSessionFunc will get a new session from TiDB 54 getSessionFunc func() (Session, error) 55 schema string 56 } 57 58 var _ DB = (*GlueCheckpointsDB)(nil) 59 60 // dropPreparedStmt drops the statement and when meet an error, 61 // print an error message. 62 func dropPreparedStmt(session Session, stmtID uint32) { 63 if err := session.DropPreparedStmt(stmtID); err != nil { 64 log.L().Error("failed to drop prepared statement", log.ShortError(err)) 65 } 66 } 67 68 func NewGlueCheckpointsDB(ctx context.Context, se Session, f func() (Session, error), schemaName string) (*GlueCheckpointsDB, error) { 69 var escapedSchemaName strings.Builder 70 common.WriteMySQLIdentifier(&escapedSchemaName, schemaName) 71 schema := escapedSchemaName.String() 72 logger := log.With(zap.String("schema", schemaName)) 73 74 sql := fmt.Sprintf(CreateDBTemplate, schema) 75 err := common.Retry("create checkpoints database", logger, func() error { 76 _, err := se.Execute(ctx, sql) 77 return err 78 }) 79 if err != nil { 80 return nil, errors.Trace(err) 81 } 82 83 sql = fmt.Sprintf(CreateTaskTableTemplate, schema, CheckpointTableNameTask) 84 err = common.Retry("create task checkpoints table", logger, func() error { 85 _, err := se.Execute(ctx, sql) 86 return err 87 }) 88 if err != nil { 89 return nil, errors.Trace(err) 90 } 91 92 sql = fmt.Sprintf(CreateTableTableTemplate, schema, CheckpointTableNameTable) 93 err = common.Retry("create table checkpoints table", logger, func() error { 94 _, err := se.Execute(ctx, sql) 95 return err 96 }) 97 if err != nil { 98 return nil, errors.Trace(err) 99 } 100 101 sql = fmt.Sprintf(CreateEngineTableTemplate, schema, CheckpointTableNameEngine) 102 err = common.Retry("create engine checkpoints table", logger, func() error { 103 _, err := se.Execute(ctx, sql) 104 return err 105 }) 106 if err != nil { 107 return nil, errors.Trace(err) 108 } 109 110 sql = fmt.Sprintf(CreateChunkTableTemplate, schema, CheckpointTableNameChunk) 111 err = common.Retry("create chunks checkpoints table", logger, func() error { 112 _, err := se.Execute(ctx, sql) 113 return err 114 }) 115 if err != nil { 116 return nil, errors.Trace(err) 117 } 118 119 return &GlueCheckpointsDB{ 120 getSessionFunc: f, 121 schema: schema, 122 }, nil 123 } 124 125 func (g GlueCheckpointsDB) Initialize(ctx context.Context, cfg *config.Config, dbInfo map[string]*TidbDBInfo) error { 126 logger := log.L() 127 se, err := g.getSessionFunc() 128 if err != nil { 129 return errors.Trace(err) 130 } 131 defer se.Close() 132 133 err = Transact(ctx, "insert checkpoints", se, logger, func(c context.Context, s Session) error { 134 stmtID, _, _, err := s.PrepareStmt(fmt.Sprintf(InitTaskTemplate, g.schema, CheckpointTableNameTask)) 135 if err != nil { 136 return errors.Trace(err) 137 } 138 defer dropPreparedStmt(s, stmtID) 139 _, err = s.ExecutePreparedStmt(c, stmtID, []types.Datum{ 140 types.NewIntDatum(cfg.TaskID), 141 types.NewStringDatum(cfg.Mydumper.SourceDir), 142 types.NewStringDatum(cfg.TikvImporter.Backend), 143 types.NewStringDatum(cfg.TikvImporter.Addr), 144 types.NewStringDatum(cfg.TiDB.Host), 145 types.NewIntDatum(int64(cfg.TiDB.Port)), 146 types.NewStringDatum(cfg.TiDB.PdAddr), 147 types.NewStringDatum(cfg.TikvImporter.SortedKVDir), 148 types.NewStringDatum(build.ReleaseVersion), 149 }) 150 if err != nil { 151 return errors.Trace(err) 152 } 153 154 stmtID2, _, _, err := s.PrepareStmt(fmt.Sprintf(InitTableTemplate, g.schema, CheckpointTableNameTable)) 155 if err != nil { 156 return errors.Trace(err) 157 } 158 defer dropPreparedStmt(s, stmtID2) 159 160 for _, db := range dbInfo { 161 for _, table := range db.Tables { 162 tableName := common.UniqueTable(db.Name, table.Name) 163 _, err = s.ExecutePreparedStmt(c, stmtID2, []types.Datum{ 164 types.NewIntDatum(cfg.TaskID), 165 types.NewStringDatum(tableName), 166 types.NewIntDatum(0), 167 types.NewIntDatum(table.ID), 168 }) 169 if err != nil { 170 return errors.Trace(err) 171 } 172 } 173 } 174 return nil 175 }) 176 return errors.Trace(err) 177 } 178 179 func (g GlueCheckpointsDB) TaskCheckpoint(ctx context.Context) (*TaskCheckpoint, error) { 180 logger := log.L() 181 sql := fmt.Sprintf(ReadTaskTemplate, g.schema, CheckpointTableNameTask) 182 se, err := g.getSessionFunc() 183 if err != nil { 184 return nil, errors.Trace(err) 185 } 186 defer se.Close() 187 188 var taskCp *TaskCheckpoint 189 err = common.Retry("fetch task checkpoint", logger, func() error { 190 rs, err := se.Execute(ctx, sql) 191 if err != nil { 192 return errors.Trace(err) 193 } 194 r := rs[0] 195 defer r.Close() 196 req := r.NewChunk() 197 err = r.Next(ctx, req) 198 if err != nil { 199 return err 200 } 201 if req.NumRows() == 0 { 202 return nil 203 } 204 205 row := req.GetRow(0) 206 taskCp = &TaskCheckpoint{} 207 taskCp.TaskID = row.GetInt64(0) 208 taskCp.SourceDir = row.GetString(1) 209 taskCp.Backend = row.GetString(2) 210 taskCp.ImporterAddr = row.GetString(3) 211 taskCp.TiDBHost = row.GetString(4) 212 taskCp.TiDBPort = int(row.GetInt64(5)) 213 taskCp.PdAddr = row.GetString(6) 214 taskCp.SortedKVDir = row.GetString(7) 215 taskCp.LightningVer = row.GetString(8) 216 return nil 217 }) 218 if err != nil { 219 return nil, errors.Trace(err) 220 } 221 return taskCp, nil 222 } 223 224 func (g GlueCheckpointsDB) Get(ctx context.Context, tableName string) (*TableCheckpoint, error) { 225 cp := &TableCheckpoint{ 226 Engines: map[int32]*EngineCheckpoint{}, 227 } 228 logger := log.With(zap.String("table", tableName)) 229 se, err := g.getSessionFunc() 230 if err != nil { 231 return nil, errors.Trace(err) 232 } 233 defer se.Close() 234 235 tableName = common.InterpolateMySQLString(tableName) 236 err = Transact(ctx, "read checkpoint", se, logger, func(c context.Context, s Session) error { 237 // 1. Populate the engines. 238 sql := fmt.Sprintf(ReadEngineTemplate, g.schema, CheckpointTableNameEngine) 239 sql = strings.ReplaceAll(sql, "?", tableName) 240 rs, err := s.Execute(ctx, sql) 241 if err != nil { 242 return errors.Trace(err) 243 } 244 r := rs[0] 245 req := r.NewChunk() 246 it := chunk.NewIterator4Chunk(req) 247 for { 248 err = r.Next(ctx, req) 249 if err != nil { 250 r.Close() 251 return err 252 } 253 if req.NumRows() == 0 { 254 break 255 } 256 257 for row := it.Begin(); row != it.End(); row = it.Next() { 258 engineID := int32(row.GetInt64(0)) 259 status := uint8(row.GetUint64(1)) 260 cp.Engines[engineID] = &EngineCheckpoint{ 261 Status: CheckpointStatus(status), 262 } 263 } 264 } 265 r.Close() 266 267 // 2. Populate the chunks. 268 sql = fmt.Sprintf(ReadChunkTemplate, g.schema, CheckpointTableNameChunk) 269 sql = strings.ReplaceAll(sql, "?", tableName) 270 rs, err = s.Execute(ctx, sql) 271 if err != nil { 272 return errors.Trace(err) 273 } 274 r = rs[0] 275 req = r.NewChunk() 276 it = chunk.NewIterator4Chunk(req) 277 for { 278 err = r.Next(ctx, req) 279 if err != nil { 280 r.Close() 281 return err 282 } 283 if req.NumRows() == 0 { 284 break 285 } 286 287 for row := it.Begin(); row != it.End(); row = it.Next() { 288 value := &ChunkCheckpoint{} 289 engineID := int32(row.GetInt64(0)) 290 value.Key.Path = row.GetString(1) 291 value.Key.Offset = row.GetInt64(2) 292 value.FileMeta.Type = mydump.SourceType(row.GetInt64(3)) 293 value.FileMeta.Compression = mydump.Compression(row.GetInt64(4)) 294 value.FileMeta.SortKey = row.GetString(5) 295 value.FileMeta.FileSize = row.GetInt64(6) 296 colPerm := row.GetBytes(7) 297 value.Chunk.Offset = row.GetInt64(8) 298 value.Chunk.EndOffset = row.GetInt64(9) 299 value.Chunk.PrevRowIDMax = row.GetInt64(10) 300 value.Chunk.RowIDMax = row.GetInt64(11) 301 kvcBytes := row.GetUint64(12) 302 kvcKVs := row.GetUint64(13) 303 kvcChecksum := row.GetUint64(14) 304 value.Timestamp = row.GetInt64(15) 305 306 value.FileMeta.Path = value.Key.Path 307 value.Checksum = verify.MakeKVChecksum(kvcBytes, kvcKVs, kvcChecksum) 308 if err := json.Unmarshal(colPerm, &value.ColumnPermutation); err != nil { 309 r.Close() 310 return errors.Trace(err) 311 } 312 cp.Engines[engineID].Chunks = append(cp.Engines[engineID].Chunks, value) 313 } 314 } 315 r.Close() 316 317 // 3. Fill in the remaining table info 318 sql = fmt.Sprintf(ReadTableRemainTemplate, g.schema, CheckpointTableNameTable) 319 sql = strings.ReplaceAll(sql, "?", tableName) 320 rs, err = s.Execute(ctx, sql) 321 if err != nil { 322 return errors.Trace(err) 323 } 324 r = rs[0] 325 defer r.Close() 326 req = r.NewChunk() 327 err = r.Next(ctx, req) 328 if err != nil { 329 return err 330 } 331 if req.NumRows() == 0 { 332 return nil 333 } 334 335 row := req.GetRow(0) 336 cp.Status = CheckpointStatus(row.GetUint64(0)) 337 cp.AllocBase = row.GetInt64(1) 338 cp.TableID = row.GetInt64(2) 339 return nil 340 }) 341 342 if err != nil { 343 return nil, errors.Trace(err) 344 } 345 346 return cp, nil 347 } 348 349 func (g GlueCheckpointsDB) Close() error { 350 return nil 351 } 352 353 func (g GlueCheckpointsDB) InsertEngineCheckpoints(ctx context.Context, tableName string, checkpointMap map[int32]*EngineCheckpoint) error { 354 logger := log.With(zap.String("table", tableName)) 355 se, err := g.getSessionFunc() 356 if err != nil { 357 return errors.Trace(err) 358 } 359 defer se.Close() 360 361 err = Transact(ctx, "update engine checkpoints", se, logger, func(c context.Context, s Session) error { 362 engineStmt, _, _, err := s.PrepareStmt(fmt.Sprintf(ReplaceEngineTemplate, g.schema, CheckpointTableNameEngine)) 363 if err != nil { 364 return errors.Trace(err) 365 } 366 defer dropPreparedStmt(s, engineStmt) 367 368 chunkStmt, _, _, err := s.PrepareStmt(fmt.Sprintf(ReplaceChunkTemplate, g.schema, CheckpointTableNameChunk)) 369 if err != nil { 370 return errors.Trace(err) 371 } 372 defer dropPreparedStmt(s, chunkStmt) 373 374 for engineID, engine := range checkpointMap { 375 _, err := s.ExecutePreparedStmt(c, engineStmt, []types.Datum{ 376 types.NewStringDatum(tableName), 377 types.NewIntDatum(int64(engineID)), 378 types.NewUintDatum(uint64(engine.Status)), 379 }) 380 if err != nil { 381 return errors.Trace(err) 382 } 383 for _, value := range engine.Chunks { 384 columnPerm, err := json.Marshal(value.ColumnPermutation) 385 if err != nil { 386 return errors.Trace(err) 387 } 388 _, err = s.ExecutePreparedStmt(c, chunkStmt, []types.Datum{ 389 types.NewStringDatum(tableName), 390 types.NewIntDatum(int64(engineID)), 391 types.NewStringDatum(value.Key.Path), 392 types.NewIntDatum(value.Key.Offset), 393 types.NewIntDatum(int64(value.FileMeta.Type)), 394 types.NewIntDatum(int64(value.FileMeta.Compression)), 395 types.NewStringDatum(value.FileMeta.SortKey), 396 types.NewIntDatum(value.FileMeta.FileSize), 397 types.NewBytesDatum(columnPerm), 398 types.NewIntDatum(value.Chunk.Offset), 399 types.NewIntDatum(value.Chunk.EndOffset), 400 types.NewIntDatum(value.Chunk.PrevRowIDMax), 401 types.NewIntDatum(value.Chunk.RowIDMax), 402 types.NewIntDatum(value.Timestamp), 403 }) 404 if err != nil { 405 return errors.Trace(err) 406 } 407 } 408 } 409 return nil 410 }) 411 return errors.Trace(err) 412 } 413 414 func (g GlueCheckpointsDB) Update(checkpointDiffs map[string]*TableCheckpointDiff) { 415 logger := log.L() 416 se, err := g.getSessionFunc() 417 if err != nil { 418 log.L().Error("can't get a session to update GlueCheckpointsDB", zap.Error(errors.Trace(err))) 419 return 420 } 421 defer se.Close() 422 423 chunkQuery := fmt.Sprintf(UpdateChunkTemplate, g.schema, CheckpointTableNameChunk) 424 rebaseQuery := fmt.Sprintf(UpdateTableRebaseTemplate, g.schema, CheckpointTableNameTable) 425 tableStatusQuery := fmt.Sprintf(UpdateTableStatusTemplate, g.schema, CheckpointTableNameTable) 426 engineStatusQuery := fmt.Sprintf(UpdateEngineTemplate, g.schema, CheckpointTableNameEngine) 427 err = Transact(context.Background(), "update checkpoints", se, logger, func(c context.Context, s Session) error { 428 chunkStmt, _, _, err := s.PrepareStmt(chunkQuery) 429 if err != nil { 430 return errors.Trace(err) 431 } 432 defer dropPreparedStmt(s, chunkStmt) 433 rebaseStmt, _, _, err := s.PrepareStmt(rebaseQuery) 434 if err != nil { 435 return errors.Trace(err) 436 } 437 defer dropPreparedStmt(s, rebaseStmt) 438 tableStatusStmt, _, _, err := s.PrepareStmt(tableStatusQuery) 439 if err != nil { 440 return errors.Trace(err) 441 } 442 defer dropPreparedStmt(s, tableStatusStmt) 443 engineStatusStmt, _, _, err := s.PrepareStmt(engineStatusQuery) 444 if err != nil { 445 return errors.Trace(err) 446 } 447 defer dropPreparedStmt(s, engineStatusStmt) 448 449 for tableName, cpd := range checkpointDiffs { 450 if cpd.hasStatus { 451 _, err := s.ExecutePreparedStmt(c, tableStatusStmt, []types.Datum{ 452 types.NewUintDatum(uint64(cpd.status)), 453 types.NewStringDatum(tableName), 454 }) 455 if err != nil { 456 return errors.Trace(err) 457 } 458 } 459 if cpd.hasRebase { 460 _, err := s.ExecutePreparedStmt(c, rebaseStmt, []types.Datum{ 461 types.NewIntDatum(cpd.allocBase), 462 types.NewStringDatum(tableName), 463 }) 464 if err != nil { 465 return errors.Trace(err) 466 } 467 } 468 for engineID, engineDiff := range cpd.engines { 469 if engineDiff.hasStatus { 470 _, err := s.ExecutePreparedStmt(c, engineStatusStmt, []types.Datum{ 471 types.NewUintDatum(uint64(engineDiff.status)), 472 types.NewStringDatum(tableName), 473 types.NewIntDatum(int64(engineID)), 474 }) 475 if err != nil { 476 return errors.Trace(err) 477 } 478 } 479 for key, diff := range engineDiff.chunks { 480 columnPerm, err := json.Marshal(diff.columnPermutation) 481 if err != nil { 482 return errors.Trace(err) 483 } 484 _, err = s.ExecutePreparedStmt(c, chunkStmt, []types.Datum{ 485 types.NewIntDatum(diff.pos), 486 types.NewIntDatum(diff.rowID), 487 types.NewUintDatum(diff.checksum.SumSize()), 488 types.NewUintDatum(diff.checksum.SumKVS()), 489 types.NewUintDatum(diff.checksum.Sum()), 490 types.NewBytesDatum(columnPerm), 491 types.NewStringDatum(tableName), 492 types.NewIntDatum(int64(engineID)), 493 types.NewStringDatum(key.Path), 494 types.NewIntDatum(key.Offset), 495 }) 496 if err != nil { 497 return errors.Trace(err) 498 } 499 } 500 } 501 } 502 return nil 503 }) 504 if err != nil { 505 log.L().Error("save checkpoint failed", zap.Error(err)) 506 } 507 } 508 509 func (g GlueCheckpointsDB) RemoveCheckpoint(ctx context.Context, tableName string) error { 510 logger := log.With(zap.String("table", tableName)) 511 se, err := g.getSessionFunc() 512 if err != nil { 513 return errors.Trace(err) 514 } 515 defer se.Close() 516 517 if tableName == allTables { 518 return common.Retry("remove all checkpoints", logger, func() error { 519 _, err := se.Execute(ctx, "DROP SCHEMA "+g.schema) 520 return err 521 }) 522 } 523 tableName = common.InterpolateMySQLString(tableName) 524 deleteChunkQuery := fmt.Sprintf(DeleteCheckpointRecordTemplate, g.schema, CheckpointTableNameChunk) 525 deleteChunkQuery = strings.ReplaceAll(deleteChunkQuery, "?", tableName) 526 deleteEngineQuery := fmt.Sprintf(DeleteCheckpointRecordTemplate, g.schema, CheckpointTableNameEngine) 527 deleteEngineQuery = strings.ReplaceAll(deleteEngineQuery, "?", tableName) 528 deleteTableQuery := fmt.Sprintf(DeleteCheckpointRecordTemplate, g.schema, CheckpointTableNameTable) 529 deleteTableQuery = strings.ReplaceAll(deleteTableQuery, "?", tableName) 530 531 return errors.Trace(Transact(ctx, "remove checkpoints", se, logger, func(c context.Context, s Session) error { 532 if _, e := s.Execute(c, deleteChunkQuery); e != nil { 533 return e 534 } 535 if _, e := s.Execute(c, deleteEngineQuery); e != nil { 536 return e 537 } 538 if _, e := s.Execute(c, deleteTableQuery); e != nil { 539 return e 540 } 541 return nil 542 })) 543 } 544 545 func (g GlueCheckpointsDB) MoveCheckpoints(ctx context.Context, taskID int64) error { 546 newSchema := fmt.Sprintf("`%s.%d.bak`", g.schema[1:len(g.schema)-1], taskID) 547 logger := log.With(zap.Int64("taskID", taskID)) 548 se, err := g.getSessionFunc() 549 if err != nil { 550 return errors.Trace(err) 551 } 552 defer se.Close() 553 554 err = common.Retry("create backup checkpoints schema", logger, func() error { 555 _, err := se.Execute(ctx, "CREATE SCHEMA IF NOT EXISTS "+newSchema) 556 return err 557 }) 558 if err != nil { 559 return errors.Trace(err) 560 } 561 for _, tbl := range []string{ 562 CheckpointTableNameChunk, CheckpointTableNameEngine, 563 CheckpointTableNameTable, CheckpointTableNameTask, 564 } { 565 query := fmt.Sprintf("RENAME TABLE %[1]s.%[3]s TO %[2]s.%[3]s", g.schema, newSchema, tbl) 566 err := common.Retry(fmt.Sprintf("move %s checkpoints table", tbl), logger, func() error { 567 _, err := se.Execute(ctx, query) 568 return err 569 }) 570 if err != nil { 571 return errors.Trace(err) 572 } 573 } 574 return nil 575 } 576 577 func (g GlueCheckpointsDB) GetLocalStoringTables(ctx context.Context) (map[string][]int32, error) { 578 se, err := g.getSessionFunc() 579 if err != nil { 580 return nil, errors.Trace(err) 581 } 582 defer se.Close() 583 584 var targetTables map[string][]int32 585 586 // lightning didn't check CheckpointStatusMaxInvalid before this function is called, so we skip invalid ones 587 // engines should exist if 588 // 1. table status is earlier than CheckpointStatusIndexImported, and 589 // 2. engine status is earlier than CheckpointStatusImported, and 590 // 3. chunk has been read 591 query := fmt.Sprintf(` 592 SELECT DISTINCT t.table_name, c.engine_id 593 FROM %s.%s t, %s.%s c, %s.%s e 594 WHERE t.table_name = c.table_name AND t.table_name = e.table_name AND c.engine_id = e.engine_id 595 AND %d < t.status AND t.status < %d 596 AND %d < e.status AND e.status < %d 597 AND c.pos > c.offset;`, 598 g.schema, CheckpointTableNameTable, g.schema, CheckpointTableNameChunk, g.schema, CheckpointTableNameEngine, 599 CheckpointStatusMaxInvalid, CheckpointStatusIndexImported, 600 CheckpointStatusMaxInvalid, CheckpointStatusImported) 601 602 err = common.Retry("get local storing tables", log.L(), func() error { 603 targetTables = make(map[string][]int32) 604 rs, err := se.Execute(ctx, query) 605 if err != nil { 606 return errors.Trace(err) 607 } 608 rows, err := drainFirstRecordSet(ctx, rs) 609 if err != nil { 610 return errors.Trace(err) 611 } 612 613 for _, row := range rows { 614 tableName := row.GetString(0) 615 engineID := int32(row.GetInt64(1)) 616 targetTables[tableName] = append(targetTables[tableName], engineID) 617 } 618 return nil 619 }) 620 if err != nil { 621 return nil, errors.Trace(err) 622 } 623 624 return targetTables, err 625 } 626 627 func (g GlueCheckpointsDB) IgnoreErrorCheckpoint(ctx context.Context, tableName string) error { 628 logger := log.With(zap.String("table", tableName)) 629 se, err := g.getSessionFunc() 630 if err != nil { 631 return errors.Trace(err) 632 } 633 defer se.Close() 634 635 var colName string 636 if tableName == allTables { 637 // This will expand to `WHERE 'all' = 'all'` and effectively allowing 638 // all tables to be included. 639 colName = stringLitAll 640 } else { 641 colName = columnTableName 642 } 643 644 tableName = common.InterpolateMySQLString(tableName) 645 646 engineQuery := fmt.Sprintf(` 647 UPDATE %s.%s SET status = %d WHERE %s = %s AND status <= %d; 648 `, g.schema, CheckpointTableNameEngine, CheckpointStatusLoaded, colName, tableName, CheckpointStatusMaxInvalid) 649 tableQuery := fmt.Sprintf(` 650 UPDATE %s.%s SET status = %d WHERE %s = %s AND status <= %d; 651 `, g.schema, CheckpointTableNameTable, CheckpointStatusLoaded, colName, tableName, CheckpointStatusMaxInvalid) 652 return errors.Trace(Transact(ctx, "ignore error checkpoints", se, logger, func(c context.Context, s Session) error { 653 if _, e := s.Execute(c, engineQuery); e != nil { 654 return e 655 } 656 if _, e := s.Execute(c, tableQuery); e != nil { 657 return e 658 } 659 return nil 660 })) 661 } 662 663 func (g GlueCheckpointsDB) DestroyErrorCheckpoint(ctx context.Context, tableName string) ([]DestroyedTableCheckpoint, error) { 664 logger := log.With(zap.String("table", tableName)) 665 se, err := g.getSessionFunc() 666 if err != nil { 667 return nil, errors.Trace(err) 668 } 669 defer se.Close() 670 671 var colName, aliasedColName string 672 673 if tableName == allTables { 674 // These will expand to `WHERE 'all' = 'all'` and effectively allowing 675 // all tables to be included. 676 colName = stringLitAll 677 aliasedColName = stringLitAll 678 } else { 679 colName = columnTableName 680 aliasedColName = "t.table_name" 681 } 682 683 tableName = common.InterpolateMySQLString(tableName) 684 685 selectQuery := fmt.Sprintf(` 686 SELECT 687 t.table_name, 688 COALESCE(MIN(e.engine_id), 0), 689 COALESCE(MAX(e.engine_id), -1) 690 FROM %[1]s.%[4]s t 691 LEFT JOIN %[1]s.%[5]s e ON t.table_name = e.table_name 692 WHERE %[2]s = %[6]s AND t.status <= %[3]d 693 GROUP BY t.table_name; 694 `, g.schema, aliasedColName, CheckpointStatusMaxInvalid, CheckpointTableNameTable, CheckpointTableNameEngine, tableName) 695 deleteChunkQuery := fmt.Sprintf(` 696 DELETE FROM %[1]s.%[4]s WHERE table_name IN (SELECT table_name FROM %[1]s.%[5]s WHERE %[2]s = %[6]s AND status <= %[3]d) 697 `, g.schema, colName, CheckpointStatusMaxInvalid, CheckpointTableNameChunk, CheckpointTableNameTable, tableName) 698 deleteEngineQuery := fmt.Sprintf(` 699 DELETE FROM %[1]s.%[4]s WHERE table_name IN (SELECT table_name FROM %[1]s.%[5]s WHERE %[2]s = %[6]s AND status <= %[3]d) 700 `, g.schema, colName, CheckpointStatusMaxInvalid, CheckpointTableNameEngine, CheckpointTableNameTable, tableName) 701 deleteTableQuery := fmt.Sprintf(` 702 DELETE FROM %s.%s WHERE %s = %s AND status <= %d 703 `, g.schema, CheckpointTableNameTable, colName, tableName, CheckpointStatusMaxInvalid) 704 705 var targetTables []DestroyedTableCheckpoint 706 err = Transact(ctx, "destroy error checkpoints", se, logger, func(c context.Context, s Session) error { 707 // clean because it's in a retry 708 targetTables = nil 709 rs, err := s.Execute(c, selectQuery) 710 if err != nil { 711 return errors.Trace(err) 712 } 713 r := rs[0] 714 req := r.NewChunk() 715 it := chunk.NewIterator4Chunk(req) 716 for { 717 err = r.Next(ctx, req) 718 if err != nil { 719 r.Close() 720 return err 721 } 722 if req.NumRows() == 0 { 723 break 724 } 725 726 for row := it.Begin(); row != it.End(); row = it.Next() { 727 var dtc DestroyedTableCheckpoint 728 dtc.TableName = row.GetString(0) 729 dtc.MinEngineID = int32(row.GetInt64(1)) 730 dtc.MaxEngineID = int32(row.GetInt64(2)) 731 targetTables = append(targetTables, dtc) 732 } 733 } 734 r.Close() 735 736 if _, e := s.Execute(c, deleteChunkQuery); e != nil { 737 return errors.Trace(e) 738 } 739 if _, e := s.Execute(c, deleteEngineQuery); e != nil { 740 return errors.Trace(e) 741 } 742 if _, e := s.Execute(c, deleteTableQuery); e != nil { 743 return errors.Trace(e) 744 } 745 return nil 746 }) 747 748 if err != nil { 749 return nil, errors.Trace(err) 750 } 751 752 return targetTables, nil 753 } 754 755 func (g GlueCheckpointsDB) DumpTables(ctx context.Context, csv io.Writer) error { 756 return errors.Errorf("dumping glue checkpoint into CSV not unsupported") 757 } 758 759 func (g GlueCheckpointsDB) DumpEngines(ctx context.Context, csv io.Writer) error { 760 return errors.Errorf("dumping glue checkpoint into CSV not unsupported") 761 } 762 763 func (g GlueCheckpointsDB) DumpChunks(ctx context.Context, csv io.Writer) error { 764 return errors.Errorf("dumping glue checkpoint into CSV not unsupported") 765 } 766 767 func Transact(ctx context.Context, purpose string, s Session, logger log.Logger, action func(context.Context, Session) error) error { 768 return common.Retry(purpose, logger, func() error { 769 _, err := s.Execute(ctx, "BEGIN") 770 if err != nil { 771 return errors.Annotate(err, "begin transaction failed") 772 } 773 err = action(ctx, s) 774 if err != nil { 775 s.RollbackTxn(ctx) 776 return err 777 } 778 err = s.CommitTxn(ctx) 779 if err != nil { 780 return errors.Annotate(err, "commit transaction failed") 781 } 782 return nil 783 }) 784 } 785 786 // TODO: will use drainFirstRecordSet to reduce repeat in GlueCheckpointsDB later 787 func drainFirstRecordSet(ctx context.Context, rss []sqlexec.RecordSet) ([]chunk.Row, error) { 788 if len(rss) != 1 { 789 return nil, errors.New("given result set doesn't have length 1") 790 } 791 rs := rss[0] 792 var rows []chunk.Row 793 req := rs.NewChunk() 794 for { 795 err := rs.Next(ctx, req) 796 if err != nil || req.NumRows() == 0 { 797 rs.Close() 798 return rows, err 799 } 800 iter := chunk.NewIterator4Chunk(req) 801 for r := iter.Begin(); r != iter.End(); r = iter.Next() { 802 rows = append(rows, r) 803 } 804 req = chunk.Renew(req, 1024) 805 } 806 }