github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/syncer/syncer_test.go (about) 1 // Copyright 2019 PingCAP, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package syncer 15 16 import ( 17 "context" 18 "database/sql" 19 "fmt" 20 "os" 21 "path/filepath" 22 "strconv" 23 "strings" 24 "sync" 25 "testing" 26 "time" 27 28 sqlmock "github.com/DATA-DOG/go-sqlmock" 29 "github.com/go-mysql-org/go-mysql/mysql" 30 "github.com/go-mysql-org/go-mysql/replication" 31 _ "github.com/go-sql-driver/mysql" 32 . "github.com/pingcap/check" 33 "github.com/pingcap/failpoint" 34 "github.com/pingcap/tidb/pkg/infoschema" 35 "github.com/pingcap/tidb/pkg/parser" 36 "github.com/pingcap/tidb/pkg/parser/ast" 37 pmysql "github.com/pingcap/tidb/pkg/parser/mysql" 38 "github.com/pingcap/tidb/pkg/util/filter" 39 regexprrouter "github.com/pingcap/tidb/pkg/util/regexpr-router" 40 router "github.com/pingcap/tidb/pkg/util/table-router" 41 "github.com/pingcap/tiflow/dm/config" 42 "github.com/pingcap/tiflow/dm/pb" 43 "github.com/pingcap/tiflow/dm/pkg/binlog" 44 "github.com/pingcap/tiflow/dm/pkg/binlog/event" 45 "github.com/pingcap/tiflow/dm/pkg/binlog/reader" 46 "github.com/pingcap/tiflow/dm/pkg/conn" 47 tcontext "github.com/pingcap/tiflow/dm/pkg/context" 48 "github.com/pingcap/tiflow/dm/pkg/cputil" 49 "github.com/pingcap/tiflow/dm/pkg/gtid" 50 "github.com/pingcap/tiflow/dm/pkg/log" 51 parserpkg "github.com/pingcap/tiflow/dm/pkg/parser" 52 "github.com/pingcap/tiflow/dm/pkg/retry" 53 "github.com/pingcap/tiflow/dm/pkg/schema" 54 "github.com/pingcap/tiflow/dm/pkg/terror" 55 "github.com/pingcap/tiflow/dm/pkg/utils" 56 "github.com/pingcap/tiflow/dm/syncer/binlogstream" 57 "github.com/pingcap/tiflow/dm/syncer/dbconn" 58 "github.com/pingcap/tiflow/dm/syncer/metrics" 59 bf "github.com/pingcap/tiflow/pkg/binlog-filter" 60 cm "github.com/pingcap/tiflow/pkg/column-mapping" 61 "github.com/pingcap/tiflow/pkg/errorutil" 62 "github.com/pingcap/tiflow/pkg/sqlmodel" 63 "github.com/stretchr/testify/require" 64 "go.uber.org/zap" 65 ) 66 67 var _ = Suite(&testSyncerSuite{}) 68 69 func TestSuite(t *testing.T) { 70 TestingT(t) 71 } 72 73 type ( 74 mockBinlogEvents []mockBinlogEvent 75 mockBinlogEvent struct { 76 typ int 77 args []interface{} 78 } 79 ) 80 81 const ( 82 DBCreate = iota 83 DBDrop 84 TableCreate 85 TableDrop 86 87 DDL 88 89 Write 90 Update 91 Delete 92 93 DMLQuery 94 95 Headers 96 Rotate 97 ) 98 99 type testSyncerSuite struct { 100 cfg *config.SubTaskConfig 101 eventsGenerator *event.Generator 102 } 103 104 type MockStreamer struct { 105 events []*replication.BinlogEvent 106 idx uint32 107 pending bool 108 } 109 110 func (m *MockStreamer) GetEvent(ctx context.Context) (*replication.BinlogEvent, error) { 111 if int(m.idx) >= len(m.events) { 112 if m.pending { 113 <-ctx.Done() 114 } 115 return nil, context.Canceled 116 } 117 e := m.events[m.idx] 118 m.idx++ 119 return e, nil 120 } 121 122 type MockStreamProducer struct { 123 events []*replication.BinlogEvent 124 } 125 126 func (mp *MockStreamProducer) GenerateStreamFrom(location binlog.Location) (reader.Streamer, error) { 127 if location.Position.Pos == 4 { 128 return &MockStreamer{mp.events, 0, false}, nil 129 } 130 bytesLen := 0 131 idx := uint32(0) 132 for i, e := range mp.events { 133 bytesLen += len(e.RawData) 134 if location.Position.Pos == uint32(bytesLen) { 135 idx = uint32(i) 136 break 137 } 138 } 139 return &MockStreamer{mp.events, idx, false}, nil 140 } 141 142 func (s *testSyncerSuite) SetUpSuite(c *C) { 143 s.cfg = genDefaultSubTaskConfig4Test() 144 s.resetEventsGenerator(c) 145 c.Assert(log.InitLogger(&log.Config{}), IsNil) 146 } 147 148 func (s *testSyncerSuite) generateEvents(binlogEvents mockBinlogEvents, c *C) []*replication.BinlogEvent { 149 events := make([]*replication.BinlogEvent, 0, 1024) 150 for _, e := range binlogEvents { 151 switch e.typ { 152 case DBCreate: 153 evs, _, err := s.eventsGenerator.GenCreateDatabaseEvents(e.args[0].(string)) 154 c.Assert(err, IsNil) 155 events = append(events, evs...) 156 case DBDrop: 157 evs, _, err := s.eventsGenerator.GenDropDatabaseEvents(e.args[0].(string)) 158 c.Assert(err, IsNil) 159 events = append(events, evs...) 160 case TableCreate: 161 evs, _, err := s.eventsGenerator.GenCreateTableEvents(e.args[0].(string), e.args[1].(string)) 162 c.Assert(err, IsNil) 163 events = append(events, evs...) 164 case TableDrop: 165 evs, _, err := s.eventsGenerator.GenDropTableEvents(e.args[0].(string), e.args[1].(string)) 166 c.Assert(err, IsNil) 167 events = append(events, evs...) 168 169 case DDL: 170 evs, _, err := s.eventsGenerator.GenDDLEvents(e.args[0].(string), e.args[1].(string), 0) 171 c.Assert(err, IsNil) 172 events = append(events, evs...) 173 174 case Write, Update, Delete: 175 dmlData := []*event.DMLData{ 176 { 177 TableID: e.args[0].(uint64), 178 Schema: e.args[1].(string), 179 Table: e.args[2].(string), 180 ColumnType: e.args[3].([]byte), 181 Rows: e.args[4].([][]interface{}), 182 }, 183 } 184 var eventType replication.EventType 185 switch e.typ { 186 case Write: 187 eventType = replication.WRITE_ROWS_EVENTv2 188 case Update: 189 eventType = replication.UPDATE_ROWS_EVENTv2 190 case Delete: 191 eventType = replication.DELETE_ROWS_EVENTv2 192 default: 193 c.Fatal(fmt.Sprintf("mock event generator don't support event type: %d", e.typ)) 194 } 195 evs, _, err := s.eventsGenerator.GenDMLEvents(eventType, dmlData, 0) 196 c.Assert(err, IsNil) 197 events = append(events, evs...) 198 } 199 } 200 return events 201 } 202 203 func (s *testSyncerSuite) resetEventsGenerator(c *C) { 204 previousGTIDSetStr := "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-14,406a3f61-690d-11e7-87c5-6c92bf46f384:1-94321383" 205 previousGTIDSet, err := gtid.ParserGTID(s.cfg.Flavor, previousGTIDSetStr) 206 if err != nil { 207 c.Fatal(err) 208 } 209 latestGTIDStr := "3ccc475b-2343-11e7-be21-6c0b84d59f30:14" 210 latestGTID, err := gtid.ParserGTID(s.cfg.Flavor, latestGTIDStr) 211 c.Assert(err, IsNil) 212 s.eventsGenerator, err = event.NewGenerator(s.cfg.Flavor, s.cfg.ServerID, 0, latestGTID, previousGTIDSet, 0) 213 if err != nil { 214 c.Fatal(err) 215 } 216 } 217 218 func (s *testSyncerSuite) TearDownSuite(c *C) { 219 os.RemoveAll(s.cfg.Dir) 220 } 221 222 func mockGetServerUnixTS(mock sqlmock.Sqlmock) { 223 ts := time.Now().Unix() 224 rows := sqlmock.NewRows([]string{"UNIX_TIMESTAMP()"}).AddRow(strconv.FormatInt(ts, 10)) 225 mock.ExpectQuery("SELECT UNIX_TIMESTAMP()").WillReturnRows(rows) 226 } 227 228 func (s *testSyncerSuite) TestSelectDB(c *C) { 229 s.cfg.BAList = &filter.Rules{ 230 DoDBs: []string{"~^b.*", "s1", "stest"}, 231 } 232 233 schemas := [][]byte{[]byte("s1"), []byte("s2"), []byte("btest"), []byte("b1"), []byte("stest"), []byte("st")} 234 skips := []bool{false, true, false, false, false, true} 235 type Case struct { 236 schema []byte 237 query []byte 238 skip bool 239 } 240 cases := make([]Case, 0, 2*len(schemas)) 241 for i, schema := range schemas { 242 cases = append(cases, Case{ 243 schema: schema, 244 query: append([]byte("create database "), schema...), 245 skip: skips[i], 246 }) 247 cases = append(cases, Case{ 248 schema: schema, 249 query: append([]byte("drop database "), schema...), 250 skip: skips[i], 251 }) 252 } 253 254 p := parser.New() 255 cfg, err := s.cfg.Clone() 256 c.Assert(err, IsNil) 257 syncer := NewSyncer(cfg, nil, nil) 258 syncer.baList, err = filter.New(syncer.cfg.CaseSensitive, syncer.cfg.BAList) 259 c.Assert(err, IsNil) 260 err = syncer.genRouter() 261 c.Assert(err, IsNil) 262 263 header := &replication.EventHeader{ 264 Timestamp: uint32(time.Now().Unix()), 265 ServerID: 101, 266 Flags: 0x01, 267 } 268 269 statusVars := []byte{4, 0, 0, 0, 0, 46, 0} 270 ddlWorker := NewDDLWorker(&syncer.tctx.Logger, syncer) 271 for _, cs := range cases { 272 e, err := event.GenQueryEvent(header, 123, 0, 0, 0, statusVars, cs.schema, cs.query) 273 c.Assert(err, IsNil) 274 c.Assert(e, NotNil) 275 ev, ok := e.Event.(*replication.QueryEvent) 276 c.Assert(ok, IsTrue) 277 278 sql := string(ev.Query) 279 schema := string(ev.Schema) 280 qec := &queryEventContext{ 281 p: p, 282 ddlSchema: schema, 283 eventStatusVars: ev.StatusVars, 284 } 285 ddlInfo, err := ddlWorker.genDDLInfo(qec, sql) 286 c.Assert(err, IsNil) 287 288 qec.originSQL = sql 289 needSkip, err := syncer.skipQueryEvent(qec, ddlInfo) 290 c.Assert(err, IsNil) 291 c.Assert(needSkip, Equals, cs.skip) 292 } 293 } 294 295 func (s *testSyncerSuite) TestSelectTable(c *C) { 296 s.cfg.BAList = &filter.Rules{ 297 DoDBs: []string{"t2", "stest", "~^ptest*"}, 298 DoTables: []*filter.Table{ 299 {Schema: "stest", Name: "log"}, 300 {Schema: "stest", Name: "~^t.*"}, 301 {Schema: "~^ptest*", Name: "~^t.*"}, 302 }, 303 } 304 s.resetEventsGenerator(c) 305 events := mockBinlogEvents{ 306 mockBinlogEvent{typ: DBCreate, args: []interface{}{"s1"}}, 307 mockBinlogEvent{typ: TableCreate, args: []interface{}{"s1", "create table s1.log(id int)"}}, 308 mockBinlogEvent{typ: DBDrop, args: []interface{}{"s1"}}, 309 310 mockBinlogEvent{typ: TableCreate, args: []interface{}{"mysql", "create table mysql.test(id int)"}}, 311 mockBinlogEvent{typ: TableDrop, args: []interface{}{"mysql", "test"}}, 312 mockBinlogEvent{typ: DBCreate, args: []interface{}{"stest"}}, 313 mockBinlogEvent{typ: TableCreate, args: []interface{}{"stest", "create table stest.log(id int)"}}, 314 mockBinlogEvent{typ: TableCreate, args: []interface{}{"stest", "create table stest.t(id int)"}}, 315 mockBinlogEvent{typ: TableCreate, args: []interface{}{"stest", "create table stest.log2(id int)"}}, 316 mockBinlogEvent{typ: Write, args: []interface{}{uint64(8), "stest", "t", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(10)}}}}, 317 mockBinlogEvent{typ: Write, args: []interface{}{uint64(9), "stest", "log", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(10)}}}}, 318 mockBinlogEvent{typ: Write, args: []interface{}{uint64(10), "stest", "log2", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(10)}}}}, 319 mockBinlogEvent{typ: TableDrop, args: []interface{}{"stest", "log"}}, 320 mockBinlogEvent{typ: TableDrop, args: []interface{}{"stest", "t"}}, 321 mockBinlogEvent{typ: TableDrop, args: []interface{}{"stest", "log2"}}, 322 mockBinlogEvent{typ: DBDrop, args: []interface{}{"stest"}}, 323 324 mockBinlogEvent{typ: DBCreate, args: []interface{}{"t2"}}, 325 mockBinlogEvent{typ: TableCreate, args: []interface{}{"t2", "create table t2.log(id int)"}}, 326 mockBinlogEvent{typ: TableCreate, args: []interface{}{"t2", "create table t2.log1(id int)"}}, 327 mockBinlogEvent{typ: TableDrop, args: []interface{}{"t2", "log"}}, 328 mockBinlogEvent{typ: DBDrop, args: []interface{}{"t2"}}, 329 330 mockBinlogEvent{typ: DBCreate, args: []interface{}{"ptest1"}}, 331 mockBinlogEvent{typ: TableCreate, args: []interface{}{"ptest1", "create table ptest1.t1(id int)"}}, 332 mockBinlogEvent{typ: DBDrop, args: []interface{}{"ptest1"}}, 333 } 334 335 allEvents := s.generateEvents(events, c) 336 337 res := [][]bool{ 338 {true}, 339 {true}, 340 {true}, 341 342 {true}, 343 {true}, 344 {false}, 345 {false}, 346 {false}, 347 {true}, 348 {false}, 349 {false}, 350 {true}, 351 {false}, 352 {false}, 353 {true}, 354 {false}, 355 356 {false}, 357 {true}, 358 {true}, 359 {true}, 360 {false}, 361 362 {false}, 363 {false}, 364 {false}, 365 } 366 367 p := parser.New() 368 cfg, err := s.cfg.Clone() 369 c.Assert(err, IsNil) 370 syncer := NewSyncer(cfg, nil, nil) 371 syncer.baList, err = filter.New(syncer.cfg.CaseSensitive, syncer.cfg.BAList) 372 syncer.metricsProxies = metrics.DefaultMetricsProxies.CacheForOneTask("task", "worker", "source") 373 c.Assert(err, IsNil) 374 c.Assert(syncer.genRouter(), IsNil) 375 376 checkEventWithTableResult(c, syncer, allEvents, p, res) 377 } 378 379 func (s *testSyncerSuite) TestIgnoreDB(c *C) { 380 s.cfg.BAList = &filter.Rules{ 381 IgnoreDBs: []string{"~^b.*", "s1", "stest"}, 382 } 383 384 s.resetEventsGenerator(c) 385 events := mockBinlogEvents{ 386 mockBinlogEvent{typ: DBCreate, args: []interface{}{"s1"}}, 387 mockBinlogEvent{typ: DBDrop, args: []interface{}{"s1"}}, 388 mockBinlogEvent{typ: DBCreate, args: []interface{}{"s2"}}, 389 mockBinlogEvent{typ: DBDrop, args: []interface{}{"s2"}}, 390 mockBinlogEvent{typ: DBCreate, args: []interface{}{"btest"}}, 391 mockBinlogEvent{typ: DBDrop, args: []interface{}{"btest"}}, 392 mockBinlogEvent{typ: DBCreate, args: []interface{}{"b1"}}, 393 mockBinlogEvent{typ: DBDrop, args: []interface{}{"b1"}}, 394 mockBinlogEvent{typ: DBCreate, args: []interface{}{"stest"}}, 395 mockBinlogEvent{typ: DBDrop, args: []interface{}{"stest"}}, 396 mockBinlogEvent{typ: DBCreate, args: []interface{}{"st"}}, 397 mockBinlogEvent{typ: DBDrop, args: []interface{}{"st"}}, 398 } 399 400 allEvents := s.generateEvents(events, c) 401 402 res := []bool{true, true, false, false, true, true, true, true, true, true, false, false} 403 404 p := parser.New() 405 cfg, err := s.cfg.Clone() 406 c.Assert(err, IsNil) 407 syncer := NewSyncer(cfg, nil, nil) 408 syncer.baList, err = filter.New(syncer.cfg.CaseSensitive, syncer.cfg.BAList) 409 c.Assert(err, IsNil) 410 c.Assert(syncer.genRouter(), IsNil) 411 i := 0 412 413 ddlWorker := NewDDLWorker(&syncer.tctx.Logger, syncer) 414 statusVars := []byte{4, 0, 0, 0, 0, 46, 0} 415 for _, e := range allEvents { 416 ev, ok := e.Event.(*replication.QueryEvent) 417 if !ok { 418 continue 419 } 420 sql := string(ev.Query) 421 schema := string(ev.Schema) 422 qec := &queryEventContext{ 423 p: p, 424 ddlSchema: schema, 425 eventStatusVars: statusVars, 426 } 427 ddlInfo, err := ddlWorker.genDDLInfo(qec, sql) 428 c.Assert(err, IsNil) 429 430 qec.originSQL = sql 431 needSkip, err := syncer.skipQueryEvent(qec, ddlInfo) 432 c.Assert(err, IsNil) 433 c.Assert(needSkip, Equals, res[i]) 434 i++ 435 } 436 } 437 438 func (s *testSyncerSuite) TestIgnoreTable(c *C) { 439 s.cfg.BAList = &filter.Rules{ 440 IgnoreDBs: []string{"t2"}, 441 IgnoreTables: []*filter.Table{ 442 {Schema: "stest", Name: "log"}, 443 {Schema: "stest", Name: "~^t.*"}, 444 }, 445 } 446 447 s.resetEventsGenerator(c) 448 events := mockBinlogEvents{ 449 mockBinlogEvent{typ: DBCreate, args: []interface{}{"s1"}}, 450 mockBinlogEvent{typ: TableCreate, args: []interface{}{"s1", "create table s1.log(id int)"}}, 451 mockBinlogEvent{typ: DBDrop, args: []interface{}{"s1"}}, 452 mockBinlogEvent{typ: TableCreate, args: []interface{}{"mysql", "create table mysql.test(id int)"}}, 453 mockBinlogEvent{typ: TableDrop, args: []interface{}{"mysql", "test"}}, 454 455 mockBinlogEvent{typ: DBCreate, args: []interface{}{"stest"}}, 456 mockBinlogEvent{typ: TableCreate, args: []interface{}{"stest", "create table stest.log(id int)"}}, 457 mockBinlogEvent{typ: TableCreate, args: []interface{}{"stest", "create table stest.t(id int)"}}, 458 mockBinlogEvent{typ: TableCreate, args: []interface{}{"stest", "create table stest.log2(id int)"}}, 459 460 mockBinlogEvent{typ: Write, args: []interface{}{uint64(8), "stest", "t", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(10)}}}}, 461 mockBinlogEvent{typ: Write, args: []interface{}{uint64(9), "stest", "log", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(10)}}}}, 462 mockBinlogEvent{typ: Write, args: []interface{}{uint64(10), "stest", "log2", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(10)}}}}, 463 // TODO event generator support generate an event with multiple tables DDL 464 mockBinlogEvent{typ: TableDrop, args: []interface{}{"stest", "log"}}, 465 mockBinlogEvent{typ: TableDrop, args: []interface{}{"stest", "t"}}, 466 mockBinlogEvent{typ: TableDrop, args: []interface{}{"stest", "log2"}}, 467 mockBinlogEvent{typ: DBDrop, args: []interface{}{"stest"}}, 468 469 mockBinlogEvent{typ: DBCreate, args: []interface{}{"t2"}}, 470 mockBinlogEvent{typ: TableCreate, args: []interface{}{"t2", "create table t2.log(id int)"}}, 471 mockBinlogEvent{typ: TableCreate, args: []interface{}{"t2", "create table t2.log1(id int)"}}, 472 mockBinlogEvent{typ: TableDrop, args: []interface{}{"t2", "log"}}, 473 mockBinlogEvent{typ: DBDrop, args: []interface{}{"t2"}}, 474 } 475 allEvents := s.generateEvents(events, c) 476 477 res := [][]bool{ 478 {false}, 479 {false}, 480 {false}, 481 {true}, 482 {true}, 483 484 {false}, 485 {true}, 486 {true}, 487 {false}, 488 489 {true}, 490 {true}, 491 {false}, 492 493 {true}, 494 {true}, 495 {false}, 496 {false}, 497 498 {true}, 499 {true}, 500 {true}, 501 {true}, 502 {true}, 503 } 504 505 p := parser.New() 506 cfg, err := s.cfg.Clone() 507 c.Assert(err, IsNil) 508 syncer := NewSyncer(cfg, nil, nil) 509 syncer.baList, err = filter.New(syncer.cfg.CaseSensitive, syncer.cfg.BAList) 510 syncer.metricsProxies = metrics.DefaultMetricsProxies.CacheForOneTask("task", "worker", "source") 511 c.Assert(err, IsNil) 512 c.Assert(syncer.genRouter(), IsNil) 513 514 checkEventWithTableResult(c, syncer, allEvents, p, res) 515 } 516 517 func (s *testSyncerSuite) TestSkipDML(c *C) { 518 s.cfg.FilterRules = []*bf.BinlogEventRule{ 519 { 520 SchemaPattern: "*", 521 TablePattern: "", 522 Events: []bf.EventType{bf.UpdateEvent}, 523 Action: bf.Ignore, 524 }, { 525 SchemaPattern: "foo", 526 TablePattern: "", 527 Events: []bf.EventType{bf.DeleteEvent}, 528 Action: bf.Ignore, 529 }, { 530 SchemaPattern: "foo1", 531 TablePattern: "bar1", 532 Events: []bf.EventType{bf.DeleteEvent}, 533 Action: bf.Ignore, 534 }, { 535 SchemaPattern: "foo1", 536 TablePattern: "bar2", 537 Events: []bf.EventType{bf.EventType(strings.ToUpper(string(bf.DeleteEvent)))}, 538 Action: bf.Ignore, 539 }, 540 } 541 s.cfg.BAList = nil 542 543 s.resetEventsGenerator(c) 544 545 type SQLChecker struct { 546 events []*replication.BinlogEvent 547 isDML bool 548 expected bool 549 } 550 551 sqls := make([]SQLChecker, 0, 16) 552 553 evs := s.generateEvents([]mockBinlogEvent{{DBCreate, []interface{}{"foo"}}}, c) 554 sqls = append(sqls, SQLChecker{events: evs, isDML: false, expected: false}) 555 556 evs = s.generateEvents([]mockBinlogEvent{{TableCreate, []interface{}{"foo", "create table foo.bar(id int)"}}}, c) 557 sqls = append(sqls, SQLChecker{events: evs, isDML: false, expected: false}) 558 559 evs = s.generateEvents([]mockBinlogEvent{{Write, []interface{}{uint64(8), "foo", "bar", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(1)}}}}}, c) 560 sqls = append(sqls, SQLChecker{events: evs, isDML: true, expected: false}) 561 562 evs = s.generateEvents([]mockBinlogEvent{{Update, []interface{}{uint64(8), "foo", "bar", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(2)}, {int32(1)}}}}}, c) 563 sqls = append(sqls, SQLChecker{events: evs, isDML: true, expected: true}) 564 565 evs = s.generateEvents([]mockBinlogEvent{{Delete, []interface{}{uint64(8), "foo", "bar", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(2)}}}}}, c) 566 sqls = append(sqls, SQLChecker{events: evs, isDML: true, expected: true}) 567 568 evs = s.generateEvents([]mockBinlogEvent{{DBDrop, []interface{}{"foo1"}}}, c) 569 sqls = append(sqls, SQLChecker{events: evs, isDML: false, expected: false}) 570 571 evs = s.generateEvents([]mockBinlogEvent{{DBCreate, []interface{}{"foo1"}}}, c) 572 sqls = append(sqls, SQLChecker{events: evs, isDML: false, expected: false}) 573 574 evs = s.generateEvents([]mockBinlogEvent{{TableCreate, []interface{}{"foo1", "create table foo1.bar1(id int)"}}}, c) 575 sqls = append(sqls, SQLChecker{events: evs, isDML: false, expected: false}) 576 577 evs = s.generateEvents([]mockBinlogEvent{{Write, []interface{}{uint64(9), "foo1", "bar1", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(1)}}}}}, c) 578 sqls = append(sqls, SQLChecker{events: evs, isDML: true, expected: false}) 579 580 evs = s.generateEvents([]mockBinlogEvent{{Update, []interface{}{uint64(9), "foo1", "bar1", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(2)}, {int32(1)}}}}}, c) 581 sqls = append(sqls, SQLChecker{events: evs, isDML: true, expected: true}) 582 583 evs = s.generateEvents([]mockBinlogEvent{{Delete, []interface{}{uint64(9), "foo1", "bar1", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(2)}}}}}, c) 584 sqls = append(sqls, SQLChecker{events: evs, isDML: true, expected: true}) 585 586 evs = s.generateEvents([]mockBinlogEvent{{TableCreate, []interface{}{"foo1", "create table foo1.bar2(id int)"}}}, c) 587 sqls = append(sqls, SQLChecker{events: evs, isDML: false, expected: false}) 588 589 evs = s.generateEvents([]mockBinlogEvent{{Write, []interface{}{uint64(10), "foo1", "bar2", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(1)}}}}}, c) 590 sqls = append(sqls, SQLChecker{events: evs, isDML: true, expected: false}) 591 592 evs = s.generateEvents([]mockBinlogEvent{{Update, []interface{}{uint64(10), "foo1", "bar2", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(2)}, {int32(1)}}}}}, c) 593 sqls = append(sqls, SQLChecker{events: evs, isDML: true, expected: true}) 594 595 evs = s.generateEvents([]mockBinlogEvent{{Delete, []interface{}{uint64(10), "foo1", "bar2", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(2)}}}}}, c) 596 sqls = append(sqls, SQLChecker{events: evs, isDML: true, expected: true}) 597 598 p := parser.New() 599 var err error 600 601 cfg, err := s.cfg.Clone() 602 c.Assert(err, IsNil) 603 syncer := NewSyncer(cfg, nil, nil) 604 c.Assert(syncer.genRouter(), IsNil) 605 606 syncer.binlogFilter, err = bf.NewBinlogEvent(false, s.cfg.FilterRules) 607 c.Assert(err, IsNil) 608 609 for _, sql := range sqls { 610 events := sql.events 611 for _, e := range events { 612 switch ev := e.Event.(type) { 613 case *replication.QueryEvent: 614 _, err = p.ParseOneStmt(string(ev.Query), "", "") 615 c.Assert(err, IsNil) 616 case *replication.RowsEvent: 617 table := &filter.Table{ 618 Schema: string(ev.Table.Schema), 619 Name: string(ev.Table.Table), 620 } 621 needSkip, err := syncer.skipRowsEvent(table, e.Header.EventType) 622 c.Assert(err, IsNil) 623 c.Assert(needSkip, Equals, sql.expected) 624 default: 625 continue 626 } 627 } 628 } 629 } 630 631 func (s *testSyncerSuite) TestColumnMapping(c *C) { 632 rules := []*cm.Rule{ 633 { 634 PatternSchema: "stest*", 635 PatternTable: "log*", 636 TargetColumn: "id", 637 Expression: cm.AddPrefix, 638 Arguments: []string{"test:"}, 639 }, 640 { 641 PatternSchema: "stest*", 642 PatternTable: "t*", 643 TargetColumn: "id", 644 Expression: cm.PartitionID, 645 Arguments: []string{"1", "stest_", "t_"}, 646 }, 647 } 648 649 s.resetEventsGenerator(c) 650 651 // create db and tables 652 events := mockBinlogEvents{ 653 mockBinlogEvent{typ: DBCreate, args: []interface{}{"stest_3"}}, 654 mockBinlogEvent{typ: TableCreate, args: []interface{}{"stest_3", "create table stest_3.log(id varchar(45))"}}, 655 mockBinlogEvent{typ: TableCreate, args: []interface{}{"stest_3", "create table stest_3.t_2(name varchar(45), id bigint)"}}, 656 mockBinlogEvent{typ: TableCreate, args: []interface{}{"stest_3", "create table stest_3.a(id int)"}}, 657 } 658 659 createEvents := s.generateEvents(events, c) 660 661 // dmls 662 type dml struct { 663 events []*replication.BinlogEvent 664 column []string 665 data []interface{} 666 } 667 668 dmls := make([]dml, 0, 3) 669 670 evs := s.generateEvents([]mockBinlogEvent{{typ: Write, args: []interface{}{uint64(8), "stest_3", "t_2", []byte{mysql.MYSQL_TYPE_STRING, mysql.MYSQL_TYPE_LONG}, [][]interface{}{{"ian", int32(10)}}}}}, c) 671 dmls = append(dmls, dml{events: evs, column: []string{"name", "id"}, data: []interface{}{"ian", int64(1<<59 | 3<<52 | 2<<44 | 10)}}) 672 673 evs = s.generateEvents([]mockBinlogEvent{{typ: Write, args: []interface{}{uint64(9), "stest_3", "log", []byte{mysql.MYSQL_TYPE_STRING}, [][]interface{}{{"10"}}}}}, c) 674 dmls = append(dmls, dml{events: evs, column: []string{"id"}, data: []interface{}{"test:10"}}) 675 676 evs = s.generateEvents([]mockBinlogEvent{{typ: Write, args: []interface{}{uint64(10), "stest_3", "a", []byte{mysql.MYSQL_TYPE_LONG}, [][]interface{}{{int32(10)}}}}}, c) 677 dmls = append(dmls, dml{events: evs, column: []string{"id"}, data: []interface{}{int32(10)}}) 678 679 dmlEvents := make([]*replication.BinlogEvent, 0, 15) 680 for _, dml := range dmls { 681 dmlEvents = append(dmlEvents, dml.events...) 682 } 683 684 // drop tables and db 685 events = mockBinlogEvents{ 686 // TODO event generator support generate an event with multiple tables DDL 687 mockBinlogEvent{typ: TableDrop, args: []interface{}{"stest_3", "log"}}, 688 mockBinlogEvent{typ: TableDrop, args: []interface{}{"stest_3", "t_2"}}, 689 mockBinlogEvent{typ: TableDrop, args: []interface{}{"stest_3", "a"}}, 690 mockBinlogEvent{typ: DBDrop, args: []interface{}{"stest_3"}}, 691 } 692 dropEvents := s.generateEvents(events, c) 693 694 p := parser.New() 695 var err error 696 mapping, err := cm.NewMapping(false, rules) 697 c.Assert(err, IsNil) 698 699 allEvents := createEvents 700 allEvents = append(allEvents, dmlEvents...) 701 allEvents = append(allEvents, dropEvents...) 702 dmlIndex := 0 703 for _, e := range allEvents { 704 switch ev := e.Event.(type) { 705 case *replication.QueryEvent: 706 _, err = p.ParseOneStmt(string(ev.Query), "", "") 707 c.Assert(err, IsNil) 708 case *replication.RowsEvent: 709 r, _, err := mapping.HandleRowValue(string(ev.Table.Schema), string(ev.Table.Table), dmls[dmlIndex].column, ev.Rows[0]) 710 c.Assert(err, IsNil) 711 c.Assert(r, DeepEquals, dmls[dmlIndex].data) 712 dmlIndex++ 713 default: 714 continue 715 } 716 } 717 } 718 719 func (s *testSyncerSuite) TestcheckpointID(c *C) { 720 cfg, err := s.cfg.Clone() 721 c.Assert(err, IsNil) 722 syncer := NewSyncer(cfg, nil, nil) 723 checkpointID := syncer.checkpointID() 724 c.Assert(checkpointID, Equals, "101") 725 } 726 727 // TODO: add `TestSharding` later. 728 729 func (s *testSyncerSuite) TestRun(c *C) { 730 // 1. execute some sqls which will trigger causality 731 // 2. check the generated jobs 732 // 3. update config, add route rules, and update syncer 733 // 4. execute some sqls and then check jobs generated 734 735 db, mock, err := sqlmock.New() 736 c.Assert(err, IsNil) 737 mockGetServerUnixTS(mock) 738 dbConn, err := db.Conn(context.Background()) 739 c.Assert(err, IsNil) 740 checkPointDB, checkPointMock, err := sqlmock.New() 741 c.Assert(err, IsNil) 742 checkPointDBConn, err := checkPointDB.Conn(context.Background()) 743 c.Assert(err, IsNil) 744 745 testJobs.jobs = testJobs.jobs[:0] 746 747 s.cfg.BAList = &filter.Rules{ 748 DoDBs: []string{"test_1"}, 749 DoTables: []*filter.Table{ 750 {Schema: "test_1", Name: "t_1"}, 751 {Schema: "test_1", Name: "t_2"}, 752 }, 753 } 754 755 s.cfg.Batch = 1000 756 s.cfg.WorkerCount = 2 757 s.cfg.MaxRetry = 1 758 s.cfg.To.Session = map[string]string{ 759 "sql_mode": "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION", 760 "tidb_skip_utf8_check": "0", 761 } 762 763 cfg, err := s.cfg.Clone() 764 c.Assert(err, IsNil) 765 syncer := NewSyncer(cfg, nil, nil) 766 syncer.cfg.CheckpointFlushInterval = 30 // defaultCheckpointFlushInterval 767 syncer.cfg.SafeModeDuration = "60s" // defaultCheckpointFlushInterval * 2 768 syncer.fromDB = &dbconn.UpStreamConn{BaseDB: conn.NewBaseDBForTest(db)} 769 syncer.toDBConns = []*dbconn.DBConn{ 770 dbconn.NewDBConn(s.cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{})), 771 dbconn.NewDBConn(s.cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{})), 772 } 773 syncer.ddlDBConn = dbconn.NewDBConn(s.cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{})) 774 syncer.downstreamTrackConn = dbconn.NewDBConn(s.cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{})) 775 syncer.schemaTracker, err = schema.NewTestTracker(context.Background(), s.cfg.Name, syncer.downstreamTrackConn, log.L()) 776 c.Assert(err, IsNil) 777 778 syncer.metricsProxies = metrics.DefaultMetricsProxies.CacheForOneTask("task", "worker", "source") 779 780 mock.ExpectBegin() 781 mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", pmysql.DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) 782 mock.ExpectCommit() 783 mock.ExpectQuery("SHOW CREATE TABLE " + "`test_1`.`t_1`").WillReturnRows( 784 sqlmock.NewRows([]string{"Table", "Create Table"}). 785 AddRow("t_1", "create table t_1(id int primary key, name varchar(24))")) 786 mock.ExpectQuery("SHOW CREATE TABLE " + "`test_1`.`t_1`").WillReturnRows( 787 sqlmock.NewRows([]string{"Table", "Create Table"}). 788 AddRow("t_1", "create table t_1(id int primary key, name varchar(24), KEY `index1` (`name`))")) 789 790 syncer.exprFilterGroup = NewExprFilterGroup(tcontext.Background(), utils.NewSessionCtx(nil), nil) 791 c.Assert(syncer.Type(), Equals, pb.UnitType_Sync) 792 793 c.Assert(syncer.genRouter(), IsNil) 794 795 syncer.metricsProxies = metrics.DefaultMetricsProxies.CacheForOneTask("task", "worker", "source") 796 797 syncer.setupMockCheckpoint(c, checkPointDBConn, checkPointMock) 798 799 syncer.reset() 800 events1 := mockBinlogEvents{ 801 mockBinlogEvent{typ: DBCreate, args: []interface{}{"test_1"}}, 802 mockBinlogEvent{typ: TableCreate, args: []interface{}{"test_1", "create table test_1.t_1(id int primary key, name varchar(24))"}}, 803 mockBinlogEvent{typ: TableCreate, args: []interface{}{"test_1", "create table test_1.t_2(id int primary key, name varchar(24))"}}, 804 805 mockBinlogEvent{typ: Write, args: []interface{}{uint64(8), "test_1", "t_1", []byte{mysql.MYSQL_TYPE_LONG, mysql.MYSQL_TYPE_STRING}, [][]interface{}{{int32(1), "a"}}}}, 806 mockBinlogEvent{typ: DDL, args: []interface{}{"test_1", "alter table test_1.t_1 add index index1(name)"}}, 807 mockBinlogEvent{typ: Write, args: []interface{}{uint64(8), "test_1", "t_1", []byte{mysql.MYSQL_TYPE_LONG, mysql.MYSQL_TYPE_STRING}, [][]interface{}{{int32(2), "b"}}}}, 808 mockBinlogEvent{typ: Delete, args: []interface{}{uint64(8), "test_1", "t_1", []byte{mysql.MYSQL_TYPE_LONG, mysql.MYSQL_TYPE_STRING}, [][]interface{}{{int32(1), "a"}}}}, 809 mockBinlogEvent{typ: Update, args: []interface{}{uint64(8), "test_1", "t_1", []byte{mysql.MYSQL_TYPE_LONG, mysql.MYSQL_TYPE_STRING}, [][]interface{}{{int32(2), "b"}, {int32(1), "b"}}}}, 810 811 mockBinlogEvent{typ: TableCreate, args: []interface{}{"test_1", "create table test_1.t_3(id int primary key, name varchar(24))"}}, 812 mockBinlogEvent{typ: DDL, args: []interface{}{"test_1", "alter table test_1.t_3 drop primary key"}}, 813 mockBinlogEvent{typ: DDL, args: []interface{}{"test_1", "alter table test_1.t_3 add primary key(id, name)"}}, 814 } 815 816 mockStreamerProducer := &MockStreamProducer{s.generateEvents(events1, c)} 817 mockStreamer, err := mockStreamerProducer.GenerateStreamFrom(binlog.MustZeroLocation(mysql.MySQLFlavor)) 818 c.Assert(err, IsNil) 819 syncer.streamerController = binlogstream.NewStreamerController4Test( 820 mockStreamerProducer, 821 mockStreamer, 822 ) 823 syncer.checkpointFlushWorker = &checkpointFlushWorker{ 824 input: make(chan *checkpointFlushTask, 16), 825 cp: syncer.checkpoint, 826 execError: &syncer.execError, 827 afterFlushFn: syncer.afterFlushCheckpoint, 828 updateJobMetricsFn: func(bool, string, *job) {}, 829 } 830 831 syncer.handleJobFunc = syncer.addJobToMemory 832 syncer.ddlWorker = NewDDLWorker(&syncer.tctx.Logger, syncer) 833 834 ctx, cancel := context.WithCancel(context.Background()) 835 resultCh := make(chan pb.ProcessResult) 836 837 go syncer.Process(ctx, resultCh) 838 839 expectJobs1 := []*expectJob{ 840 // now every ddl job will start with a flush job 841 { 842 flush, 843 nil, 844 nil, 845 }, { 846 ddl, 847 []string{"CREATE DATABASE IF NOT EXISTS `test_1`"}, 848 nil, 849 }, { 850 flush, 851 nil, 852 nil, 853 }, { 854 ddl, 855 []string{"CREATE TABLE IF NOT EXISTS `test_1`.`t_1` (`id` INT PRIMARY KEY,`name` VARCHAR(24))"}, 856 nil, 857 }, { 858 flush, 859 nil, 860 nil, 861 }, { 862 ddl, 863 []string{"CREATE TABLE IF NOT EXISTS `test_1`.`t_2` (`id` INT PRIMARY KEY,`name` VARCHAR(24))"}, 864 nil, 865 }, { 866 dml, 867 []string{"REPLACE INTO `test_1`.`t_1` (`id`,`name`) VALUES (?,?)"}, 868 [][]interface{}{{int64(1), "a"}}, 869 }, { 870 flush, 871 nil, 872 nil, 873 }, { 874 ddl, 875 []string{"ALTER TABLE `test_1`.`t_1` ADD INDEX `index1`(`name`)"}, 876 nil, 877 }, { 878 dml, 879 []string{"REPLACE INTO `test_1`.`t_1` (`id`,`name`) VALUES (?,?)"}, 880 [][]interface{}{{int64(2), "b"}}, 881 }, { 882 dml, 883 []string{"DELETE FROM `test_1`.`t_1` WHERE `id` = ? LIMIT 1"}, 884 [][]interface{}{{int64(1)}}, 885 }, { 886 // safe mode is true, will split update to delete + replace 887 dml, 888 []string{"DELETE FROM `test_1`.`t_1` WHERE `id` = ? LIMIT 1", "REPLACE INTO `test_1`.`t_1` (`id`,`name`) VALUES (?,?)"}, 889 [][]interface{}{{int64(2)}, {int64(1), "b"}}, 890 }, { 891 flush, 892 nil, 893 nil, 894 }, { 895 ddl, 896 []string{"CREATE TABLE IF NOT EXISTS `test_1`.`t_3` (`id` INT PRIMARY KEY,`name` VARCHAR(24))"}, 897 nil, 898 }, { 899 flush, 900 nil, 901 nil, 902 }, { 903 ddl, 904 []string{"ALTER TABLE `test_1`.`t_3` DROP PRIMARY KEY"}, 905 nil, 906 }, { 907 flush, 908 nil, 909 nil, 910 }, { 911 ddl, 912 []string{"ALTER TABLE `test_1`.`t_3` ADD PRIMARY KEY(`id`, `name`)"}, 913 nil, 914 }, { 915 flush, 916 nil, 917 nil, 918 }, 919 } 920 921 executeSQLAndWait(len(expectJobs1)) 922 c.Assert(syncer.Status(nil).(*pb.SyncStatus).TotalEvents, Equals, int64(0)) 923 syncer.mockFinishJob(expectJobs1) 924 925 testJobs.Lock() 926 checkJobs(c, testJobs.jobs, expectJobs1) 927 testJobs.jobs = testJobs.jobs[:0] 928 testJobs.Unlock() 929 930 s.cfg.ColumnMappingRules = nil 931 s.cfg.RouteRules = []*router.TableRule{ 932 { 933 SchemaPattern: "test_1", 934 TablePattern: "t_1", 935 TargetSchema: "test_1", 936 TargetTable: "t_2", 937 }, 938 } 939 940 cancel() 941 <-resultCh // wait for the process to finish 942 // when syncer exit Run(), will flush job 943 syncer.Pause() 944 945 mockDBProvider := conn.InitMockDB(c) 946 mockDBProvider.ExpectQuery("SELECT cast\\(TIMEDIFF\\(NOW\\(6\\), UTC_TIMESTAMP\\(6\\)\\) as time\\);"). 947 WillReturnRows(sqlmock.NewRows([]string{""}).AddRow("01:00:00")) 948 mockGetServerUnixTS(mock) 949 mock.ExpectBegin() 950 mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", pmysql.DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) 951 mock.ExpectCommit() 952 mock.ExpectQuery("SHOW CREATE TABLE " + "`test_1`.`t_2`").WillReturnRows( 953 sqlmock.NewRows([]string{"Table", "Create Table"}). 954 AddRow("t_2", "create table t_2(id int primary key, name varchar(24))")) 955 956 c.Assert(syncer.Update(context.Background(), s.cfg), IsNil) 957 c.Assert(syncer.timezone.String(), Equals, "+01:00") 958 959 events2 := mockBinlogEvents{ 960 mockBinlogEvent{typ: Write, args: []interface{}{uint64(8), "test_1", "t_1", []byte{mysql.MYSQL_TYPE_LONG, mysql.MYSQL_TYPE_STRING}, [][]interface{}{{int32(3), "c"}}}}, 961 mockBinlogEvent{typ: Delete, args: []interface{}{uint64(8), "test_1", "t_1", []byte{mysql.MYSQL_TYPE_LONG, mysql.MYSQL_TYPE_STRING}, [][]interface{}{{int32(3), "c"}}}}, 962 } 963 964 ctx, cancel = context.WithCancel(context.Background()) 965 resultCh = make(chan pb.ProcessResult) 966 // simulate `syncer.Resume` here, but doesn't reset database conns 967 syncer.secondsBehindMaster.Store(100) 968 syncer.workerJobTSArray[ddlJobIdx].Store(100) 969 syncer.reset() 970 c.Assert(syncer.secondsBehindMaster.Load(), Equals, int64(0)) 971 c.Assert(syncer.workerJobTSArray[ddlJobIdx].Load(), Equals, int64(0)) 972 mockStreamerProducer = &MockStreamProducer{s.generateEvents(events2, c)} 973 mockStreamer, err = mockStreamerProducer.GenerateStreamFrom(binlog.MustZeroLocation(mysql.MySQLFlavor)) 974 c.Assert(err, IsNil) 975 syncer.streamerController = binlogstream.NewStreamerController4Test( 976 mockStreamerProducer, 977 mockStreamer, 978 ) 979 syncer.checkpointFlushWorker = &checkpointFlushWorker{ 980 input: make(chan *checkpointFlushTask, 16), 981 cp: syncer.checkpoint, 982 execError: &syncer.execError, 983 afterFlushFn: syncer.afterFlushCheckpoint, 984 updateJobMetricsFn: func(bool, string, *job) {}, 985 } 986 987 // When crossing safeModeExitPoint, will generate a flush sql 988 checkPointMock.ExpectBegin() 989 checkPointMock.ExpectExec(".*INSERT INTO .* VALUES.* ON DUPLICATE KEY UPDATE.*").WillReturnResult(sqlmock.NewResult(0, 1)) 990 checkPointMock.ExpectCommit() 991 // Simulate resume from syncer, last time we exit successfully, so we shouldn't open safe mode here 992 go syncer.Process(ctx, resultCh) 993 994 expectJobs2 := []*expectJob{ 995 { 996 dml, 997 []string{"INSERT INTO `test_1`.`t_2` (`id`,`name`) VALUES (?,?)"}, 998 [][]interface{}{{int64(3), "c"}}, 999 }, { 1000 dml, 1001 []string{"DELETE FROM `test_1`.`t_2` WHERE `id` = ? LIMIT 1"}, 1002 [][]interface{}{{int64(3)}}, 1003 }, { 1004 flush, 1005 nil, 1006 nil, 1007 }, 1008 } 1009 1010 executeSQLAndWait(len(expectJobs2)) 1011 c.Assert(syncer.Status(nil).(*pb.SyncStatus).TotalEvents, Equals, int64(len(expectJobs1))) 1012 syncer.mockFinishJob(expectJobs2) 1013 c.Assert(syncer.Status(nil).(*pb.SyncStatus).TotalEvents, Equals, int64(len(expectJobs1)+len(expectJobs2))) 1014 1015 testJobs.RLock() 1016 checkJobs(c, testJobs.jobs, expectJobs2) 1017 testJobs.RUnlock() 1018 1019 cancel() 1020 <-resultCh // wait for the process to finish 1021 1022 // test OperateSchema starts 1023 ctx, cancel = context.WithCancel(context.Background()) 1024 1025 syncer.sessCtx = utils.NewSessionCtx(map[string]string{"time_zone": "UTC"}) 1026 sourceSchemaFromCheckPoint, err := syncer.OperateSchema(ctx, &pb.OperateWorkerSchemaRequest{Op: pb.SchemaOp_GetSchema, Database: "test_1", Table: "t_1"}) 1027 c.Assert(err, IsNil) 1028 1029 syncer.tableRouter = ®exprrouter.RouteTable{} 1030 c.Assert(syncer.tableRouter.AddRule(&router.TableRule{ 1031 SchemaPattern: "test_1", 1032 TablePattern: "t_1", 1033 TargetSchema: "test_1", 1034 TargetTable: "t_2", 1035 }), IsNil) 1036 1037 syncer.checkpoint.(*RemoteCheckPoint).points = make(map[string]map[string]*binlogPoint) 1038 1039 showTableResultString := "CREATE TABLE `t_2` (\n" + 1040 " `id` int(11) NOT NULL,\n" + 1041 " `name` varchar(24) DEFAULT NULL,\n" + 1042 " PRIMARY KEY (`id`) /*T![clustered_index] NONCLUSTERED */,\n" + 1043 " KEY `index1` (`name`)\n" + 1044 ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin" 1045 1046 mock.ExpectQuery("SHOW CREATE TABLE " + "`test_1`.`t_2`").WillReturnRows( 1047 sqlmock.NewRows([]string{"Table", "Create Table"}). 1048 AddRow("t_2", showTableResultString)) 1049 1050 sourceSchemaFromDownstream, err := syncer.OperateSchema(ctx, &pb.OperateWorkerSchemaRequest{Op: pb.SchemaOp_GetSchema, Database: "test_1", Table: "t_1"}) 1051 c.Assert(err, IsNil) 1052 1053 sourceSchemaExpected := "CREATE TABLE `t_1` (" + 1054 " `id` int(11) NOT NULL," + 1055 " `name` varchar(24) DEFAULT NULL," + 1056 " PRIMARY KEY (`id`) /*T![clustered_index] NONCLUSTERED */," + 1057 " KEY `index1` (`name`)" + 1058 ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin" 1059 c.Assert(sourceSchemaFromCheckPoint, Equals, sourceSchemaExpected) 1060 c.Assert(sourceSchemaFromDownstream, Equals, sourceSchemaExpected) 1061 1062 cancel() 1063 // test OperateSchema ends 1064 1065 syncer.Close() 1066 c.Assert(syncer.isClosed(), IsTrue) 1067 1068 if err := mock.ExpectationsWereMet(); err != nil { 1069 c.Errorf("db unfulfilled expectations: %s", err) 1070 } 1071 1072 if err := checkPointMock.ExpectationsWereMet(); err != nil { 1073 c.Errorf("checkpointDB unfulfilled expectations: %s", err) 1074 } 1075 } 1076 1077 func (s *testSyncerSuite) TestExitSafeModeByConfig(c *C) { 1078 db, mock, err := sqlmock.New() 1079 c.Assert(err, IsNil) 1080 mockGetServerUnixTS(mock) 1081 1082 dbConn, err := db.Conn(context.Background()) 1083 c.Assert(err, IsNil) 1084 checkPointDB, checkPointMock, err := sqlmock.New() 1085 c.Assert(err, IsNil) 1086 checkPointDBConn, err := checkPointDB.Conn(context.Background()) 1087 c.Assert(err, IsNil) 1088 1089 testJobs.jobs = testJobs.jobs[:0] 1090 1091 s.cfg.BAList = &filter.Rules{ 1092 DoDBs: []string{"test_1"}, 1093 DoTables: []*filter.Table{ 1094 {Schema: "test_1", Name: "t_1"}, 1095 }, 1096 } 1097 s.cfg.To.Session = map[string]string{ 1098 "sql_mode": "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION", 1099 "tidb_skip_utf8_check": "0", 1100 } 1101 1102 cfg, err := s.cfg.Clone() 1103 c.Assert(err, IsNil) 1104 syncer := NewSyncer(cfg, nil, nil) 1105 syncer.fromDB = &dbconn.UpStreamConn{BaseDB: conn.NewBaseDBForTest(db)} 1106 syncer.toDBConns = []*dbconn.DBConn{ 1107 dbconn.NewDBConn(s.cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{})), 1108 dbconn.NewDBConn(s.cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{})), 1109 } 1110 syncer.ddlDBConn = dbconn.NewDBConn(s.cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{})) 1111 syncer.downstreamTrackConn = dbconn.NewDBConn(s.cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{})) 1112 mock.ExpectBegin() 1113 mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", pmysql.DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) 1114 mock.ExpectCommit() 1115 1116 mock.ExpectQuery("SHOW CREATE TABLE " + "`test_1`.`t_1`").WillReturnRows( 1117 sqlmock.NewRows([]string{"Table", "Create Table"}). 1118 AddRow("t_1", "create table t_1(id int primary key, name varchar(24))")) 1119 1120 syncer.schemaTracker, err = schema.NewTestTracker(context.Background(), s.cfg.Name, syncer.ddlDBConn, log.L()) 1121 c.Assert(err, IsNil) 1122 c.Assert(syncer.Type(), Equals, pb.UnitType_Sync) 1123 1124 syncer.exprFilterGroup = NewExprFilterGroup(tcontext.Background(), utils.NewSessionCtx(nil), nil) 1125 c.Assert(syncer.genRouter(), IsNil) 1126 1127 syncer.metricsProxies = metrics.DefaultMetricsProxies.CacheForOneTask("task", "worker", "source") 1128 1129 syncer.setupMockCheckpoint(c, checkPointDBConn, checkPointMock) 1130 1131 syncer.reset() 1132 1133 events1 := mockBinlogEvents{ 1134 mockBinlogEvent{typ: DBCreate, args: []interface{}{"test_1"}}, 1135 mockBinlogEvent{typ: TableCreate, args: []interface{}{"test_1", "create table test_1.t_1(id int primary key, name varchar(24))"}}, 1136 1137 mockBinlogEvent{typ: Write, args: []interface{}{uint64(8), "test_1", "t_1", []byte{mysql.MYSQL_TYPE_LONG, mysql.MYSQL_TYPE_STRING}, [][]interface{}{{int32(1), "a"}}}}, 1138 mockBinlogEvent{typ: Delete, args: []interface{}{uint64(8), "test_1", "t_1", []byte{mysql.MYSQL_TYPE_LONG, mysql.MYSQL_TYPE_STRING}, [][]interface{}{{int32(1), "a"}}}}, 1139 mockBinlogEvent{typ: Update, args: []interface{}{uint64(8), "test_1", "t_1", []byte{mysql.MYSQL_TYPE_LONG, mysql.MYSQL_TYPE_STRING}, [][]interface{}{{int32(2), "b"}, {int32(1), "b"}}}}, 1140 } 1141 1142 generatedEvents1 := s.generateEvents(events1, c) 1143 // make sure [18] is last event, and use [18]'s position as safeModeExitLocation 1144 c.Assert(len(generatedEvents1), Equals, 19) 1145 safeModeExitLocation := binlog.MustZeroLocation(mysql.MySQLFlavor) 1146 safeModeExitLocation.Position.Pos = generatedEvents1[18].Header.LogPos 1147 syncer.checkpoint.SaveSafeModeExitPoint(&safeModeExitLocation) 1148 1149 // check after safeModeExitLocation, safe mode is turned off 1150 events2 := mockBinlogEvents{ 1151 mockBinlogEvent{typ: Write, args: []interface{}{uint64(8), "test_1", "t_1", []byte{mysql.MYSQL_TYPE_LONG, mysql.MYSQL_TYPE_STRING}, [][]interface{}{{int32(1), "a"}}}}, 1152 mockBinlogEvent{typ: Delete, args: []interface{}{uint64(8), "test_1", "t_1", []byte{mysql.MYSQL_TYPE_LONG, mysql.MYSQL_TYPE_STRING}, [][]interface{}{{int32(1), "a"}}}}, 1153 mockBinlogEvent{typ: Update, args: []interface{}{uint64(8), "test_1", "t_1", []byte{mysql.MYSQL_TYPE_LONG, mysql.MYSQL_TYPE_STRING}, [][]interface{}{{int32(2), "b"}, {int32(1), "b"}}}}, 1154 } 1155 generatedEvents2 := s.generateEvents(events2, c) 1156 1157 generatedEvents := generatedEvents1 1158 generatedEvents = append(generatedEvents, generatedEvents2...) 1159 1160 mockStreamerProducer := &MockStreamProducer{generatedEvents} 1161 mockStreamer, err := mockStreamerProducer.GenerateStreamFrom(binlog.MustZeroLocation(mysql.MySQLFlavor)) 1162 c.Assert(err, IsNil) 1163 syncer.streamerController = binlogstream.NewStreamerController4Test( 1164 mockStreamerProducer, 1165 mockStreamer, 1166 ) 1167 syncer.checkpointFlushWorker = &checkpointFlushWorker{ 1168 input: make(chan *checkpointFlushTask, 16), 1169 cp: syncer.checkpoint, 1170 execError: &syncer.execError, 1171 afterFlushFn: syncer.afterFlushCheckpoint, 1172 updateJobMetricsFn: func(bool, string, *job) {}, 1173 } 1174 1175 syncer.handleJobFunc = syncer.addJobToMemory 1176 syncer.ddlWorker = NewDDLWorker(&syncer.tctx.Logger, syncer) 1177 1178 ctx, cancel := context.WithCancel(context.Background()) 1179 resultCh := make(chan pb.ProcessResult) 1180 1181 // When crossing safeModeExitPoint, will generate a flush sql 1182 checkPointMock.ExpectBegin() 1183 checkPointMock.ExpectExec(".*INSERT INTO .* VALUES.* ON DUPLICATE KEY UPDATE.*").WillReturnResult(sqlmock.NewResult(0, 1)) 1184 checkPointMock.ExpectCommit() 1185 // disable 1-minute safe mode 1186 c.Assert(failpoint.Enable("github.com/pingcap/tiflow/dm/syncer/SafeModeInitPhaseSeconds", `return("10ms")`), IsNil) 1187 go syncer.Process(ctx, resultCh) 1188 go func() { 1189 for r := range resultCh { 1190 if len(r.Errors) > 0 { 1191 c.Fatal(r.String()) 1192 } 1193 } 1194 }() 1195 1196 expectJobs := []*expectJob{ 1197 // now every ddl job will start with a flush job 1198 { 1199 flush, 1200 nil, 1201 nil, 1202 }, { 1203 ddl, 1204 []string{"CREATE DATABASE IF NOT EXISTS `test_1`"}, 1205 nil, 1206 }, { 1207 flush, 1208 nil, 1209 nil, 1210 }, { 1211 ddl, 1212 []string{"CREATE TABLE IF NOT EXISTS `test_1`.`t_1` (`id` INT PRIMARY KEY,`name` VARCHAR(24))"}, 1213 nil, 1214 }, { 1215 dml, 1216 []string{"REPLACE INTO `test_1`.`t_1` (`id`,`name`) VALUES (?,?)"}, 1217 [][]interface{}{{int64(1), "a"}}, 1218 }, { 1219 dml, 1220 []string{"DELETE FROM `test_1`.`t_1` WHERE `id` = ? LIMIT 1"}, 1221 [][]interface{}{{int64(1)}}, 1222 }, { 1223 dml, 1224 []string{"DELETE FROM `test_1`.`t_1` WHERE `id` = ? LIMIT 1", "REPLACE INTO `test_1`.`t_1` (`id`,`name`) VALUES (?,?)"}, 1225 [][]interface{}{{int64(2)}, {int64(1), "b"}}, 1226 }, { 1227 // start from this event, location passes safeModeExitLocation and safe mode should exit 1228 dml, 1229 []string{"INSERT INTO `test_1`.`t_1` (`id`,`name`) VALUES (?,?)"}, 1230 [][]interface{}{{int64(1), "a"}}, 1231 }, { 1232 dml, 1233 []string{"DELETE FROM `test_1`.`t_1` WHERE `id` = ? LIMIT 1"}, 1234 [][]interface{}{{int64(1)}}, 1235 }, { 1236 dml, 1237 []string{"UPDATE `test_1`.`t_1` SET `id` = ?, `name` = ? WHERE `id` = ? LIMIT 1"}, 1238 [][]interface{}{{int64(1), "b", int64(2)}}, 1239 }, { 1240 flush, 1241 nil, 1242 nil, 1243 }, 1244 } 1245 1246 executeSQLAndWait(len(expectJobs)) 1247 c.Assert(syncer.Status(nil).(*pb.SyncStatus).TotalEvents, Equals, int64(0)) 1248 syncer.mockFinishJob(expectJobs) 1249 1250 testJobs.Lock() 1251 checkJobs(c, testJobs.jobs, expectJobs) 1252 testJobs.jobs = testJobs.jobs[:0] 1253 testJobs.Unlock() 1254 1255 cancel() 1256 syncer.Close() 1257 c.Assert(syncer.isClosed(), IsTrue) 1258 1259 if err := mock.ExpectationsWereMet(); err != nil { 1260 c.Errorf("db unfulfilled expectations: %s", err) 1261 } 1262 1263 if err := checkPointMock.ExpectationsWereMet(); err != nil { 1264 c.Errorf("checkpointDB unfulfilled expectations: %s", err) 1265 } 1266 c.Assert(failpoint.Disable("github.com/pingcap/tiflow/dm/syncer/SafeModeInitPhaseSeconds"), IsNil) 1267 } 1268 1269 func (s *testSyncerSuite) TestRemoveMetadataIsFine(c *C) { 1270 cfg, err := s.cfg.Clone() 1271 c.Assert(err, IsNil) 1272 cfg.Mode = config.ModeAll 1273 syncer := NewSyncer(cfg, nil, nil) 1274 fresh, err := syncer.IsFreshTask(context.Background()) 1275 c.Assert(err, IsNil) 1276 c.Assert(fresh, IsTrue) 1277 1278 filename := filepath.Join(s.cfg.Dir, "metadata") 1279 err = os.WriteFile(filename, []byte("SHOW MASTER STATUS:\n\tLog: BAD METADATA"), 0o644) 1280 c.Assert(err, IsNil) 1281 c.Assert(syncer.checkpoint.LoadMeta(context.Background()), NotNil) 1282 1283 err = os.WriteFile(filename, []byte("SHOW MASTER STATUS:\n\tLog: mysql-bin.000003\n\tPos: 1234\n\tGTID:\n\n"), 0o644) 1284 c.Assert(err, IsNil) 1285 c.Assert(syncer.checkpoint.LoadMeta(context.Background()), IsNil) 1286 1287 c.Assert(os.Remove(filename), IsNil) 1288 1289 // after successful LoadMeta, IsFreshTask should return false so don't load again 1290 fresh, err = syncer.IsFreshTask(context.Background()) 1291 c.Assert(err, IsNil) 1292 c.Assert(fresh, IsFalse) 1293 } 1294 1295 func (s *testSyncerSuite) TestTrackDDL(c *C) { 1296 var ( 1297 testDB = "test_db" 1298 testTbl = "test_tbl" 1299 testTbl2 = "test_tbl2" 1300 ec = &eventContext{tctx: tcontext.Background()} 1301 qec = &queryEventContext{ 1302 eventContext: ec, 1303 ddlSchema: testDB, 1304 p: parser.New(), 1305 eventStatusVars: []byte{4, 0, 0, 0, 0, 46, 0}, 1306 } 1307 ) 1308 db, mock, err := sqlmock.New() 1309 c.Assert(err, IsNil) 1310 dbConn, err := db.Conn(context.Background()) 1311 c.Assert(err, IsNil) 1312 1313 checkPointDB, checkPointMock, err := sqlmock.New() 1314 c.Assert(err, IsNil) 1315 checkPointDBConn, err := checkPointDB.Conn(context.Background()) 1316 c.Assert(err, IsNil) 1317 1318 cfg, err := s.cfg.Clone() 1319 c.Assert(err, IsNil) 1320 syncer := NewSyncer(cfg, nil, nil) 1321 syncer.toDBConns = []*dbconn.DBConn{ 1322 dbconn.NewDBConn(s.cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{})), 1323 dbconn.NewDBConn(s.cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{})), 1324 } 1325 syncer.ddlDBConn = dbconn.NewDBConn(s.cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{})) 1326 syncer.checkpoint.(*RemoteCheckPoint).dbConn = dbconn.NewDBConn(s.cfg, conn.NewBaseConnForTest(checkPointDBConn, &retry.FiniteRetryStrategy{})) 1327 syncer.schemaTracker, err = schema.NewTestTracker(context.Background(), s.cfg.Name, syncer.ddlDBConn, log.L()) 1328 c.Assert(err, IsNil) 1329 defer syncer.schemaTracker.Close() 1330 syncer.exprFilterGroup = NewExprFilterGroup(tcontext.Background(), utils.NewSessionCtx(nil), nil) 1331 c.Assert(syncer.genRouter(), IsNil) 1332 1333 cases := []struct { 1334 sql string 1335 callback func() 1336 }{ 1337 {"CREATE DATABASE IF NOT EXISTS " + testDB, func() {}}, 1338 {"ALTER DATABASE " + testDB + " DEFAULT COLLATE utf8_bin", func() {}}, 1339 {"DROP DATABASE IF EXISTS " + testDB, func() {}}, 1340 {fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s.%s (c int)", testDB, testTbl), func() {}}, 1341 {fmt.Sprintf("DROP TABLE IF EXISTS %s.%s", testDB, testTbl), func() {}}, 1342 {"CREATE INDEX idx1 ON " + testTbl + " (c)", func() { 1343 mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( 1344 sqlmock.NewRows([]string{"Variable_name", "Value"}).AddRow("sql_mode", "")) 1345 mock.ExpectQuery("SHOW CREATE TABLE.*").WillReturnRows( 1346 sqlmock.NewRows([]string{"Table", "Create Table"}). 1347 AddRow(testTbl, " CREATE TABLE `"+testTbl+"` (\n `c` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) 1348 }}, 1349 {fmt.Sprintf("ALTER TABLE %s.%s add c2 int", testDB, testTbl), func() { 1350 mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( 1351 sqlmock.NewRows([]string{"Variable_name", "Value"}).AddRow("sql_mode", "")) 1352 mock.ExpectQuery("SHOW CREATE TABLE.*").WillReturnRows( 1353 sqlmock.NewRows([]string{"Table", "Create Table"}). 1354 AddRow(testTbl, " CREATE TABLE `"+testTbl+"` (\n `c` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) 1355 }}, 1356 1357 // alter add FK will not executed on tracker (otherwise will report error tb2 not exist) 1358 {fmt.Sprintf("ALTER TABLE %s.%s add constraint foreign key (c) references tb2(c)", testDB, testTbl), func() { 1359 mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( 1360 sqlmock.NewRows([]string{"Variable_name", "Value"}).AddRow("sql_mode", "")) 1361 mock.ExpectQuery("SHOW CREATE TABLE.*").WillReturnRows( 1362 sqlmock.NewRows([]string{"Table", "Create Table"}). 1363 AddRow(testTbl, " CREATE TABLE `"+testTbl+"` (\n `c` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) 1364 }}, 1365 {"TRUNCATE TABLE " + testTbl, func() {}}, 1366 1367 // test CREATE TABLE that reference another table 1368 {fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s.%s LIKE %s", testDB, testTbl, testTbl2), func() { 1369 mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( 1370 sqlmock.NewRows([]string{"Variable_name", "Value"}).AddRow("sql_mode", "")) 1371 mock.ExpectQuery("SHOW CREATE TABLE.*").WillReturnRows( 1372 sqlmock.NewRows([]string{"Table", "Create Table"}). 1373 AddRow(testTbl, " CREATE TABLE `"+testTbl+"` (\n `c` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) 1374 }}, 1375 1376 // 'CREATE TABLE ... SELECT' is not implemented yet 1377 // {fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s.%s AS SELECT * FROM %s, %s.%s WHERE %s.n=%s.%s.n", testDB, testTbl, testTbl2, testDB2, testTbl3, testTbl2, testDB2, testTbl3), func() { 1378 // mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( 1379 // sqlmock.NewRows([]string{"Variable_name", "Value"}).AddRow("sql_mode", "")) 1380 // mock.ExpectQuery(fmt.Sprintf("SHOW CREATE TABLE \\`%s\\`.\\`%s\\`.*", testDB, testTbl2)).WillReturnRows( 1381 // sqlmock.NewRows([]string{"Table", "Create Table"}). 1382 // AddRow(testTbl, " CREATE TABLE `"+testTbl+"` (\n `c` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) 1383 // mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( 1384 // sqlmock.NewRows([]string{"Variable_name", "Value"}).AddRow("sql_mode", "")) 1385 // mock.ExpectQuery(fmt.Sprintf("SHOW CREATE TABLE \\`%s\\`.\\`%s\\`.*", testDB2, testTbl3)).WillReturnRows( 1386 // sqlmock.NewRows([]string{"Table", "Create Table"}). 1387 // AddRow(testTbl, " CREATE TABLE `"+testTbl+"` (\n `c` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) 1388 // }}, 1389 1390 // test RENAME TABLE 1391 {fmt.Sprintf("RENAME TABLE %s.%s TO %s.%s", testDB, testTbl, testDB, testTbl2), func() { 1392 mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( 1393 sqlmock.NewRows([]string{"Variable_name", "Value"}).AddRow("sql_mode", "")) 1394 mock.ExpectQuery("SHOW CREATE TABLE.*").WillReturnRows( 1395 sqlmock.NewRows([]string{"Table", "Create Table"}). 1396 AddRow(testTbl, " CREATE TABLE `"+testTbl+"` (\n `c` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) 1397 }}, 1398 {fmt.Sprintf("ALTER TABLE %s.%s RENAME %s.%s", testDB, testTbl, testDB, testTbl2), func() { 1399 mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( 1400 sqlmock.NewRows([]string{"Variable_name", "Value"}).AddRow("sql_mode", "")) 1401 mock.ExpectQuery("SHOW CREATE TABLE.*").WillReturnRows( 1402 sqlmock.NewRows([]string{"Table", "Create Table"}). 1403 AddRow(testTbl, " CREATE TABLE `"+testTbl+"` (\n `c` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) 1404 }}, 1405 } 1406 1407 ddlWorker := NewDDLWorker(&syncer.tctx.Logger, syncer) 1408 for _, ca := range cases { 1409 ddlInfo, err := ddlWorker.genDDLInfo(qec, ca.sql) 1410 c.Assert(err, IsNil) 1411 ca.callback() 1412 1413 c.Assert(syncer.trackDDL(testDB, ddlInfo, ec), IsNil) 1414 syncer.schemaTracker.Reset() 1415 c.Assert(mock.ExpectationsWereMet(), IsNil) 1416 c.Assert(checkPointMock.ExpectationsWereMet(), IsNil) 1417 } 1418 } 1419 1420 func checkEventWithTableResult(c *C, syncer *Syncer, allEvents []*replication.BinlogEvent, p *parser.Parser, res [][]bool) { 1421 i := 0 1422 tctx := tcontext.Background().WithLogger(log.With(zap.String("test", "checkEventWithTableResult"))) 1423 ec := &eventContext{ 1424 tctx: tctx, 1425 } 1426 statusVars := []byte{4, 0, 0, 0, 0, 46, 0} 1427 ddlWorker := NewDDLWorker(&syncer.tctx.Logger, syncer) 1428 for _, e := range allEvents { 1429 switch ev := e.Event.(type) { 1430 case *replication.QueryEvent: 1431 qec := &queryEventContext{ 1432 eventContext: ec, 1433 originSQL: string(ev.Query), 1434 ddlSchema: string(ev.Schema), 1435 p: p, 1436 eventStatusVars: statusVars, 1437 } 1438 stmt, err := parseOneStmt(qec) 1439 c.Assert(err, IsNil) 1440 1441 if _, ok := stmt.(ast.DDLNode); !ok { 1442 continue // BEGIN event 1443 } 1444 qec.splitDDLs, err = parserpkg.SplitDDL(stmt, qec.ddlSchema) 1445 c.Assert(err, IsNil) 1446 for _, sql := range qec.splitDDLs { 1447 sqls, err := ddlWorker.processOneDDL(qec, sql) 1448 c.Assert(err, IsNil) 1449 qec.appliedDDLs = append(qec.appliedDDLs, sqls...) 1450 } 1451 if len(qec.appliedDDLs) == 0 { 1452 c.Assert(res[i], HasLen, 1) 1453 c.Assert(res[i][0], Equals, true) 1454 i++ 1455 continue 1456 } 1457 1458 for j, sql := range qec.appliedDDLs { 1459 ddlInfo, err := ddlWorker.genDDLInfo(qec, sql) 1460 c.Assert(err, IsNil) 1461 1462 needSkip, err := syncer.skipQueryEvent(qec, ddlInfo) 1463 c.Assert(err, IsNil) 1464 c.Assert(needSkip, Equals, res[i][j]) 1465 } 1466 case *replication.RowsEvent: 1467 table := &filter.Table{ 1468 Schema: string(ev.Table.Schema), 1469 Name: string(ev.Table.Table), 1470 } 1471 needSkip, err := syncer.skipRowsEvent(table, e.Header.EventType) 1472 c.Assert(err, IsNil) 1473 c.Assert(needSkip, Equals, res[i][0]) 1474 default: 1475 continue 1476 } 1477 i++ 1478 } 1479 } 1480 1481 func executeSQLAndWait(expectJobNum int) { 1482 for i := 0; i < 10; i++ { 1483 time.Sleep(time.Second) 1484 1485 testJobs.RLock() 1486 jobNum := len(testJobs.jobs) 1487 testJobs.RUnlock() 1488 1489 if jobNum >= expectJobNum { 1490 break 1491 } 1492 } 1493 } 1494 1495 type expectJob struct { 1496 tp opType 1497 sqlInJob []string 1498 args [][]interface{} 1499 } 1500 1501 var defaultDMLType = map[sqlmodel.RowChangeType]sqlmodel.DMLType{ 1502 sqlmodel.RowChangeInsert: sqlmodel.DMLInsert, 1503 sqlmodel.RowChangeUpdate: sqlmodel.DMLUpdate, 1504 sqlmodel.RowChangeDelete: sqlmodel.DMLDelete, 1505 } 1506 1507 func checkJobs(c *C, jobs []*job, expectJobs []*expectJob) { 1508 c.Assert(len(jobs), Equals, len(expectJobs), Commentf("jobs = %q", jobs)) 1509 for i, job := range jobs { 1510 c.Assert(job.tp, Equals, expectJobs[i].tp) 1511 1512 if job.tp == ddl { 1513 c.Assert(job.ddls, DeepEquals, expectJobs[i].sqlInJob) 1514 continue 1515 } 1516 1517 if job.tp == dml { 1518 if !job.safeMode { 1519 sql, args := job.dml.GenSQL(defaultDMLType[job.dml.Type()]) 1520 c.Assert([]string{sql}, DeepEquals, expectJobs[i].sqlInJob) 1521 c.Assert([][]interface{}{args}, DeepEquals, expectJobs[i].args) 1522 continue 1523 } 1524 1525 // safemode 1526 switch job.dml.Type() { 1527 case sqlmodel.RowChangeInsert: 1528 sql, args := job.dml.GenSQL(sqlmodel.DMLReplace) 1529 c.Assert([]string{sql}, DeepEquals, expectJobs[i].sqlInJob) 1530 c.Assert([][]interface{}{args}, DeepEquals, expectJobs[i].args) 1531 case sqlmodel.RowChangeUpdate: 1532 sql, args := job.dml.GenSQL(sqlmodel.DMLDelete) 1533 sql2, args2 := job.dml.GenSQL(sqlmodel.DMLReplace) 1534 c.Assert([]string{sql, sql2}, DeepEquals, expectJobs[i].sqlInJob) 1535 c.Assert([][]interface{}{args, args2}, DeepEquals, expectJobs[i].args) 1536 case sqlmodel.RowChangeDelete: 1537 sql, args := job.dml.GenSQL(sqlmodel.DMLDelete) 1538 c.Assert([]string{sql}, DeepEquals, expectJobs[i].sqlInJob) 1539 c.Assert([][]interface{}{args}, DeepEquals, expectJobs[i].args) 1540 } 1541 } 1542 } 1543 } 1544 1545 var testJobs struct { 1546 sync.RWMutex 1547 jobs []*job 1548 } 1549 1550 func newDummyJob(tp opType, targetTable *filter.Table, ddls ...string) *job { 1551 return &job{ 1552 tp: tp, 1553 targetTable: targetTable, 1554 ddls: ddls, 1555 dml: &sqlmodel.RowChange{}, 1556 } 1557 } 1558 1559 func (s *Syncer) mockFinishJob(jobs []*expectJob) { 1560 for _, job := range jobs { 1561 switch job.tp { 1562 case ddl, dml, flush: 1563 dummyJob := newDummyJob(job.tp, &filter.Table{}, job.sqlInJob...) 1564 s.updateJobMetrics(true, "test", dummyJob) 1565 } 1566 } 1567 } 1568 1569 func (s *Syncer) addJobToMemory(job *job) (bool, error) { 1570 log.L().Info("add job to memory", zap.Stringer("job", job)) 1571 1572 switch job.tp { 1573 case ddl, dml, flush: 1574 s.updateJobMetrics(false, "test", job) 1575 testJobs.Lock() 1576 testJobs.jobs = append(testJobs.jobs, job) 1577 testJobs.Unlock() 1578 } 1579 1580 switch job.tp { 1581 case xid: 1582 s.saveGlobalPoint(job.location) 1583 s.checkpoint.(*RemoteCheckPoint).globalPoint.flush() 1584 case ddl: 1585 s.saveGlobalPoint(job.location) 1586 s.checkpoint.(*RemoteCheckPoint).globalPoint.flush() 1587 for sourceSchema, tbs := range job.sourceTbls { 1588 if len(sourceSchema) == 0 { 1589 continue 1590 } 1591 for _, sourceTable := range tbs { 1592 s.saveTablePoint(sourceTable, job.location) 1593 s.checkpoint.(*RemoteCheckPoint).points[sourceSchema][sourceTable.Name].flush() 1594 } 1595 } 1596 s.resetShardingGroup(job.targetTable) 1597 case dml: 1598 for sourceSchema, tbs := range job.sourceTbls { 1599 if len(sourceSchema) == 0 { 1600 continue 1601 } 1602 for _, sourceTable := range tbs { 1603 s.saveTablePoint(sourceTable, job.currentLocation) 1604 s.checkpoint.(*RemoteCheckPoint).points[sourceSchema][sourceTable.Name].flush() 1605 } 1606 } 1607 } 1608 1609 return true, nil 1610 } 1611 1612 func (s *Syncer) setupMockCheckpoint(c *C, checkPointDBConn *sql.Conn, checkPointMock sqlmock.Sqlmock) { 1613 checkPointMock.ExpectBegin() 1614 checkPointMock.ExpectExec(fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS `%s`", s.cfg.MetaSchema)).WillReturnResult(sqlmock.NewResult(1, 1)) 1615 checkPointMock.ExpectCommit() 1616 checkPointMock.ExpectBegin() 1617 checkPointMock.ExpectExec(fmt.Sprintf("CREATE TABLE IF NOT EXISTS `%s`.`%s`", s.cfg.MetaSchema, cputil.SyncerCheckpoint(s.cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1)) 1618 checkPointMock.ExpectCommit() 1619 1620 // mock syncer.checkpoint.Init() function 1621 s.checkpoint.(*RemoteCheckPoint).dbConn = dbconn.NewDBConn(s.cfg, conn.NewBaseConnForTest(checkPointDBConn, &retry.FiniteRetryStrategy{})) 1622 // mock syncer.flushCpWorker init 1623 s.checkpointFlushWorker = &checkpointFlushWorker{ 1624 input: nil, 1625 cp: s.checkpoint, 1626 execError: &s.execError, 1627 afterFlushFn: s.afterFlushCheckpoint, 1628 updateJobMetricsFn: func(bool, string, *job) {}, 1629 } 1630 c.Assert(s.checkpoint.(*RemoteCheckPoint).prepare(tcontext.Background()), IsNil) 1631 // disable flush checkpoint periodically 1632 s.checkpoint.(*RemoteCheckPoint).globalPointSaveTime = time.Now() 1633 } 1634 1635 func parseSQL(sql string) (ast.StmtNode, error) { 1636 stmt, err := parser.New().ParseOneStmt(sql, "", "") 1637 if err != nil { 1638 return nil, err 1639 } 1640 return stmt, nil 1641 } 1642 1643 func TestTrackDownstreamTableWontOverwrite(t *testing.T) { 1644 syncer := Syncer{} 1645 ctx := context.Background() 1646 tctx := tcontext.Background() 1647 cfg := genDefaultSubTaskConfig4Test() 1648 1649 db, mock, err := sqlmock.New() 1650 require.NoError(t, err) 1651 dbConn, err := db.Conn(ctx) 1652 require.NoError(t, err) 1653 baseConn := conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{}) 1654 syncer.ddlDBConn = dbconn.NewDBConn(cfg, baseConn) 1655 syncer.downstreamTrackConn = dbconn.NewDBConn(cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{})) 1656 syncer.schemaTracker, err = schema.NewTestTracker(ctx, cfg.Name, syncer.downstreamTrackConn, log.L()) 1657 require.NoError(t, err) 1658 defer syncer.schemaTracker.Close() 1659 1660 upTable := &filter.Table{ 1661 Schema: "test", 1662 Name: "up", 1663 } 1664 downTable := &filter.Table{ 1665 Schema: "test", 1666 Name: "down", 1667 } 1668 createTableSQL := "CREATE TABLE up (c1 int, c2 int);" 1669 createTableStmt, err := parseSQL(createTableSQL) 1670 require.NoError(t, err) 1671 1672 mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( 1673 sqlmock.NewRows([]string{"Variable_name", "Value"}).AddRow("sql_mode", "")) 1674 mock.ExpectQuery("SHOW CREATE TABLE.*").WillReturnRows( 1675 sqlmock.NewRows([]string{"Table", "Create Table"}). 1676 AddRow(downTable.Name, " CREATE TABLE `"+downTable.Name+"` (\n `c` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) 1677 1678 require.NoError(t, syncer.schemaTracker.CreateSchemaIfNotExists(upTable.Schema)) 1679 require.NoError(t, syncer.schemaTracker.Exec(ctx, "test", createTableStmt)) 1680 ti, err := syncer.getTableInfo(tctx, upTable, downTable) 1681 require.NoError(t, err) 1682 require.Len(t, ti.Columns, 2) 1683 require.NoError(t, syncer.trackTableInfoFromDownstream(tctx, upTable, downTable)) 1684 newTi, err := syncer.getTableInfo(tctx, upTable, downTable) 1685 require.NoError(t, err) 1686 require.Equal(t, ti, newTi) 1687 require.NoError(t, mock.ExpectationsWereMet()) 1688 } 1689 1690 func TestDownstreamTableHasAutoRandom(t *testing.T) { 1691 syncer := Syncer{} 1692 ctx := context.Background() 1693 tctx := tcontext.Background() 1694 cfg := genDefaultSubTaskConfig4Test() 1695 schemaName := "test" 1696 tableName := "tbl" 1697 table := &filter.Table{ 1698 Schema: "test", 1699 Name: "tbl", 1700 } 1701 1702 cases := []struct { 1703 input string 1704 expected string 1705 }{ 1706 { 1707 "CREATE TABLE `" + tableName + "` (\n" + 1708 " `c` bigint(20) NOT NULL /*T![auto_rand] AUTO_RANDOM(5) */,\n" + 1709 " PRIMARY KEY (`c`)\n" + 1710 ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", 1711 "create table tbl (c bigint primary key)", 1712 }, 1713 // test no error, input and expected are same 1714 { 1715 "CREATE TABLE `" + tableName + "` (\n" + 1716 " `c` bigint(20) NOT NULL,\n" + 1717 " PRIMARY KEY (`c`)\n" + 1718 ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![placement] PLACEMENT POLICY=`on_ssd` */", 1719 "create table tbl (c bigint primary key) /*T![placement] PLACEMENT POLICY=`on_ssd` */", 1720 }, 1721 } 1722 1723 db, mock, err := sqlmock.New() 1724 require.NoError(t, err) 1725 dbConn, err := db.Conn(ctx) 1726 require.NoError(t, err) 1727 baseConn := conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{}) 1728 syncer.ddlDBConn = dbconn.NewDBConn(cfg, baseConn) 1729 syncer.downstreamTrackConn = dbconn.NewDBConn(cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{})) 1730 1731 for _, c := range cases { 1732 syncer.schemaTracker, err = schema.NewTestTracker(ctx, cfg.Name, syncer.downstreamTrackConn, log.L()) 1733 require.NoError(t, err) 1734 1735 mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( 1736 sqlmock.NewRows([]string{"Variable_name", "Value"}).AddRow("sql_mode", "")) 1737 mock.ExpectQuery("SHOW CREATE TABLE.*").WillReturnRows( 1738 sqlmock.NewRows([]string{"Table", "Create Table"}). 1739 AddRow(tableName, c.input)) 1740 require.NoError(t, syncer.schemaTracker.CreateSchemaIfNotExists(schemaName)) 1741 require.NoError(t, syncer.trackTableInfoFromDownstream(tctx, table, table)) 1742 ti, err := syncer.getTableInfo(tctx, table, table) 1743 require.NoError(t, err) 1744 require.NoError(t, mock.ExpectationsWereMet()) 1745 1746 require.NoError(t, syncer.schemaTracker.DropTable(table)) 1747 stmt, err := parseSQL(c.expected) 1748 require.NoError(t, err) 1749 require.NoError(t, syncer.schemaTracker.Exec(ctx, schemaName, stmt)) 1750 ti2, err := syncer.getTableInfo(tctx, table, table) 1751 require.NoError(t, err) 1752 1753 ti.ID = ti2.ID 1754 ti.UpdateTS = ti2.UpdateTS 1755 1756 require.Equal(t, ti, ti2) 1757 } 1758 } 1759 1760 func TestExecuteSQLSWithIgnore(t *testing.T) { 1761 db, mock, err := sqlmock.New() 1762 require.NoError(t, err) 1763 dbConn, err := db.Conn(context.Background()) 1764 require.NoError(t, err) 1765 conn := dbconn.NewDBConn(&config.SubTaskConfig{ 1766 Name: "test", 1767 }, &conn.BaseConn{ 1768 DBConn: dbConn, 1769 RetryStrategy: &retry.FiniteRetryStrategy{}, 1770 }) 1771 1772 sqls := []string{"alter table t1 add column a int", "alter table t1 add column b int"} 1773 1774 // will ignore the first error, and continue execute the second sql 1775 mock.ExpectBegin() 1776 mock.ExpectExec(sqls[0]).WillReturnError(newMysqlErr(uint16(infoschema.ErrColumnExists.Code()), "column a already exists")) 1777 mock.ExpectExec(sqls[1]).WillReturnResult(sqlmock.NewResult(1, 1)) 1778 mock.ExpectCommit() 1779 1780 tctx := tcontext.Background().WithLogger(log.With(zap.String("test", "TestExecuteSQLSWithIgnore"))) 1781 n, err := conn.ExecuteSQLWithIgnore(tctx, nil, errorutil.IsIgnorableMySQLDDLError, sqls) 1782 require.NoError(t, err) 1783 require.Equal(t, 1, n) 1784 1785 // will return error when execute the first sql 1786 mock.ExpectBegin() 1787 mock.ExpectExec(sqls[0]).WillReturnError(newMysqlErr(uint16(infoschema.ErrColumnExists.Code()), "column a already exists")) 1788 mock.ExpectRollback() 1789 1790 n, err = conn.ExecuteSQL(tctx, nil, sqls) 1791 require.ErrorContains(t, err, "column a already exists") 1792 require.Equal(t, 0, n) 1793 1794 require.NoError(t, mock.ExpectationsWereMet()) 1795 } 1796 1797 func genDefaultSubTaskConfig4Test() *config.SubTaskConfig { 1798 loaderDir, err := os.MkdirTemp("", "loader") 1799 if err != nil { 1800 panic(err) // no happen 1801 } 1802 1803 loaderCfg := config.LoaderConfig{ 1804 Dir: loaderDir, 1805 } 1806 cfg := &config.SubTaskConfig{ 1807 From: config.GetDBConfigForTest(), 1808 To: config.GetDBConfigForTest(), 1809 ServerID: 101, 1810 MetaSchema: "test", 1811 Name: "syncer_ut", 1812 ShadowTableRules: []string{config.DefaultShadowTableRules}, 1813 TrashTableRules: []string{config.DefaultTrashTableRules}, 1814 Mode: config.ModeIncrement, 1815 Flavor: "mysql", 1816 LoaderConfig: loaderCfg, 1817 UseRelay: false, 1818 } 1819 cfg.Experimental.AsyncCheckpointFlush = true 1820 cfg.From.Adjust() 1821 cfg.To.Adjust() 1822 return cfg 1823 } 1824 1825 func TestWaitBeforeRunExit(t *testing.T) { 1826 ctx, cancel := context.WithCancel(context.Background()) 1827 cfg := genDefaultSubTaskConfig4Test() 1828 cfg.WorkerCount = 0 1829 cfg.To.Session = map[string]string{ 1830 "sql_mode": "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION", 1831 "tidb_skip_utf8_check": "0", 1832 } 1833 syncer := NewSyncer(cfg, nil, nil) 1834 syncer.metricsProxies = metrics.DefaultMetricsProxies.CacheForOneTask("task", "worker", "source") 1835 1836 db, mock, err := sqlmock.New() 1837 require.NoError(t, err) 1838 mockGetServerUnixTS(mock) 1839 1840 syncer.fromDB = &dbconn.UpStreamConn{BaseDB: conn.NewBaseDBForTest(db)} 1841 syncer.reset() 1842 require.NoError(t, syncer.genRouter()) 1843 1844 mockStreamerProducer := &MockStreamProducer{} 1845 mockStreamer, err := mockStreamerProducer.GenerateStreamFrom(binlog.MustZeroLocation(mysql.MySQLFlavor)) 1846 require.NoError(t, err) 1847 // let getEvent pending until ctx.Done() 1848 mockStreamer.(*MockStreamer).pending = true 1849 syncer.streamerController = binlogstream.NewStreamerController4Test( 1850 mockStreamerProducer, 1851 mockStreamer, 1852 ) 1853 1854 wg := &sync.WaitGroup{} 1855 errCh := make(chan error, 1) 1856 wg.Add(1) 1857 go func() { 1858 defer wg.Done() 1859 errCh <- syncer.Run(ctx) 1860 }() 1861 time.Sleep(time.Second) // wait s.Run start 1862 1863 // test s.Run will not exit unit caller cancel ctx or call s.runCancel 1864 require.Len(t, errCh, 0) 1865 cancel() // this will make s.Run exit 1866 wg.Wait() 1867 <-errCh 1868 1869 // test syncer wait time not more than maxPauseOrStopWaitTime 1870 oldMaxPauseOrStopWaitTime := defaultMaxPauseOrStopWaitTime 1871 defaultMaxPauseOrStopWaitTime = time.Second 1872 ctx2, cancel := context.WithCancel(context.Background()) 1873 cancel() 1874 runCtx, runCancel := context.WithCancel(context.Background()) 1875 syncer.runCtx, syncer.runCancel = tcontext.NewContext(runCtx, syncer.tctx.L()), runCancel 1876 syncer.isTransactionEnd = false 1877 syncer.runWg.Add(1) 1878 syncer.waitBeforeRunExit(ctx2) 1879 require.Equal(t, context.Canceled, syncer.runCtx.Ctx.Err()) 1880 defaultMaxPauseOrStopWaitTime = oldMaxPauseOrStopWaitTime 1881 1882 // test use cliArgs 1883 require.NoError(t, failpoint.Enable("github.com/pingcap/tiflow/dm/syncer/recordAndIgnorePrepareTime", "return()")) 1884 syncer.cliArgs = &config.TaskCliArgs{WaitTimeOnStop: "2s"} 1885 ctx3, cancel := context.WithCancel(context.Background()) 1886 cancel() 1887 runCtx, runCancel = context.WithCancel(context.Background()) 1888 syncer.runCtx, syncer.runCancel = tcontext.NewContext(runCtx, syncer.tctx.L()), runCancel 1889 syncer.runWg.Add(1) 1890 syncer.waitBeforeRunExit(ctx3) 1891 require.Equal(t, context.Canceled, syncer.runCtx.Ctx.Err()) 1892 require.Equal(t, 2*time.Second, waitBeforeRunExitDurationForTest) 1893 require.NoError(t, failpoint.Disable("github.com/pingcap/tiflow/dm/syncer/recordAndIgnorePrepareTime")) 1894 } 1895 1896 func TestSyncerGetTableInfo(t *testing.T) { 1897 cfg := genDefaultSubTaskConfig4Test() 1898 cfg.WorkerCount = 0 1899 syncer := NewSyncer(cfg, nil, nil) 1900 ctx := context.Background() 1901 tctx := tcontext.Background() 1902 1903 db, mock, err := sqlmock.New() 1904 require.NoError(t, err) 1905 dbConn, err := db.Conn(ctx) 1906 require.NoError(t, err) 1907 baseConn := conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{}) 1908 syncer.ddlDBConn = dbconn.NewDBConn(cfg, baseConn) 1909 syncer.downstreamTrackConn = dbconn.NewDBConn(cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{})) 1910 syncer.schemaTracker, err = schema.NewTestTracker(ctx, cfg.Name, syncer.downstreamTrackConn, log.L()) 1911 require.NoError(t, err) 1912 defer syncer.schemaTracker.Close() 1913 1914 upTable := &filter.Table{ 1915 Schema: "test", 1916 Name: "up", 1917 } 1918 downTable := &filter.Table{ 1919 Schema: "test", 1920 Name: "down", 1921 } 1922 1923 mock.ExpectQuery("SHOW VARIABLES LIKE .*").WillReturnRows( 1924 sqlmock.NewRows([]string{"Variable_name", "Value"}).AddRow("sql_mode", "")) 1925 mock.ExpectQuery("SHOW CREATE TABLE.*").WillReturnRows( 1926 sqlmock.NewRows([]string{"Table", "Create Table"}). 1927 AddRow(downTable.Name, " CREATE TABLE `"+downTable.Name+"` (c1 int, c2 int)")) 1928 1929 ti, err := syncer.getTableInfo(tctx, upTable, downTable) 1930 require.NoError(t, err) 1931 require.Len(t, ti.Columns, 2) 1932 // get again, since it's cached, should return the same result 1933 ti, err = syncer.getTableInfo(tctx, upTable, downTable) 1934 require.NoError(t, err) 1935 require.Len(t, ti.Columns, 2) 1936 1937 noExistTbl := &filter.Table{ 1938 Schema: "test", 1939 Name: "not-exist", 1940 } 1941 mock.ExpectQuery("SHOW VARIABLES LIKE .*").WillReturnRows( 1942 sqlmock.NewRows([]string{"Variable_name", "Value"}).AddRow("sql_mode", "")) 1943 mock.ExpectQuery("SHOW CREATE TABLE.*").WillReturnRows( 1944 sqlmock.NewRows([]string{"Table", "Create Table"})) 1945 _, err = syncer.getTableInfo(tctx, noExistTbl, noExistTbl) 1946 require.Error(t, err) 1947 } 1948 1949 func TestCheckCanUpdateCfg(t *testing.T) { 1950 cfg := genDefaultSubTaskConfig4Test() 1951 syncer := NewSyncer(cfg, nil, nil) 1952 1953 // update to a not change cfg is ok 1954 require.NoError(t, syncer.CheckCanUpdateCfg(cfg)) 1955 1956 cfg2 := genDefaultSubTaskConfig4Test() 1957 cfg2.Name = "new name" 1958 // updated to a not allowed field 1959 require.True(t, terror.ErrWorkerUpdateSubTaskConfig.Equal(syncer.CheckCanUpdateCfg(cfg2))) 1960 1961 // update ba list or route rules or filter rules is ok or syncerCfg 1962 cfg2.Name = cfg.Name 1963 1964 cfg2.BAList = &filter.Rules{DoDBs: []string{"test"}} 1965 cfg2.RouteRules = []*router.TableRule{{SchemaPattern: "test", TargetSchema: "test1"}} 1966 cfg2.FilterRules = []*bf.BinlogEventRule{{SchemaPattern: "test"}} 1967 cfg2.SyncerConfig.Compact = !cfg.SyncerConfig.Compact 1968 require.NoError(t, syncer.CheckCanUpdateCfg(cfg)) 1969 }