github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/relay/relay_test.go (about) 1 // Copyright 2019 PingCAP, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package relay 15 16 import ( 17 "bytes" 18 "context" 19 "fmt" 20 "os" 21 "path/filepath" 22 "strconv" 23 "testing" 24 "time" 25 26 "github.com/BurntSushi/toml" 27 "github.com/DATA-DOG/go-sqlmock" 28 gmysql "github.com/go-mysql-org/go-mysql/mysql" 29 "github.com/go-mysql-org/go-mysql/replication" 30 . "github.com/pingcap/check" 31 "github.com/pingcap/errors" 32 "github.com/pingcap/failpoint" 33 "github.com/pingcap/tidb/pkg/parser" 34 "github.com/pingcap/tiflow/dm/config/dbconfig" 35 "github.com/pingcap/tiflow/dm/pkg/binlog/event" 36 "github.com/pingcap/tiflow/dm/pkg/conn" 37 tcontext "github.com/pingcap/tiflow/dm/pkg/context" 38 "github.com/pingcap/tiflow/dm/pkg/gtid" 39 "github.com/pingcap/tiflow/dm/pkg/log" 40 "github.com/pingcap/tiflow/dm/pkg/utils" 41 ) 42 43 var _ = Suite(&testRelaySuite{}) 44 45 func TestSuite(t *testing.T) { 46 TestingT(t) 47 } 48 49 type testRelaySuite struct{} 50 51 func newRelayCfg(c *C, flavor string) *Config { 52 dbCfg := getDBConfigForTest() 53 return &Config{ 54 EnableGTID: false, // position mode, so auto-positioning can work 55 Flavor: flavor, 56 RelayDir: c.MkDir(), 57 ServerID: 12321, 58 From: dbconfig.DBConfig{ 59 Host: dbCfg.Host, 60 Port: dbCfg.Port, 61 User: dbCfg.User, 62 Password: dbCfg.Password, 63 }, 64 ReaderRetry: ReaderRetryConfig{ 65 BackoffRollback: 200 * time.Millisecond, 66 BackoffMax: 1 * time.Second, 67 BackoffMin: 1 * time.Millisecond, 68 BackoffJitter: true, 69 BackoffFactor: 2, 70 }, 71 } 72 } 73 74 func getDBConfigForTest() *dbconfig.DBConfig { 75 host := os.Getenv("MYSQL_HOST") 76 if host == "" { 77 host = "127.0.0.1" 78 } 79 port, _ := strconv.Atoi(os.Getenv("MYSQL_PORT")) 80 if port == 0 { 81 port = 3306 82 } 83 user := os.Getenv("MYSQL_USER") 84 if user == "" { 85 user = "root" 86 } 87 password := os.Getenv("MYSQL_PSWD") 88 return &dbconfig.DBConfig{ 89 Host: host, 90 Port: port, 91 User: user, 92 Password: password, 93 } 94 } 95 96 // mockReader is used only for relay testing. 97 type mockReader struct { 98 result RResult 99 err error 100 } 101 102 func (r *mockReader) Start() error { 103 return nil 104 } 105 106 func (r *mockReader) Close() error { 107 return nil 108 } 109 110 func (r *mockReader) GetEvent(ctx context.Context) (RResult, error) { 111 select { 112 case <-ctx.Done(): 113 return RResult{}, ctx.Err() 114 default: 115 } 116 return r.result, r.err 117 } 118 119 // mockWriter is used only for relay testing. 120 type mockWriter struct { 121 result WResult 122 err error 123 latestEvent *replication.BinlogEvent 124 } 125 126 func (w *mockWriter) IsActive(uuid, filename string) (bool, int64) { 127 return false, 0 128 } 129 130 func (w *mockWriter) Close() error { 131 return nil 132 } 133 134 func (w *mockWriter) Init(relayDir, filename string) { 135 } 136 137 func (w *mockWriter) WriteEvent(ev *replication.BinlogEvent) (WResult, error) { 138 w.latestEvent = ev // hold it 139 return w.result, w.err 140 } 141 142 func (w *mockWriter) Flush() error { 143 return nil 144 } 145 146 func (t *testRelaySuite) TestTryRecoverLatestFile(c *C) { 147 var ( 148 uuid = "24ecd093-8cec-11e9-aa0d-0242ac170002" 149 uuidWithSuffix = fmt.Sprintf("%s.000001", uuid) 150 previousGTIDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-14,53bfca22-690d-11e7-8a62-18ded7a37b78:1-495,406a3f61-690d-11e7-87c5-6c92bf46f384:123-456" 151 latestGTIDStr1 = "3ccc475b-2343-11e7-be21-6c0b84d59f30:14" 152 latestGTIDStr2 = "53bfca22-690d-11e7-8a62-18ded7a37b78:495" 153 recoverGTIDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-18,53bfca22-690d-11e7-8a62-18ded7a37b78:1-505,406a3f61-690d-11e7-87c5-6c92bf46f384:1-456" // 406a3f61-690d-11e7-87c5-6c92bf46f384:123-456 --> 406a3f61-690d-11e7-87c5-6c92bf46f384:1-456 154 greaterGITDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-20,53bfca22-690d-11e7-8a62-18ded7a37b78:1-510,406a3f61-690d-11e7-87c5-6c92bf46f384:123-456" 155 filename = "mysql-bin.000001" 156 startPos = gmysql.Position{Name: filename, Pos: 123} 157 158 parser2 = parser.New() 159 relayCfg = newRelayCfg(c, gmysql.MySQLFlavor) 160 r = NewRelay(relayCfg).(*Relay) 161 ) 162 c.Assert(failpoint.Enable("github.com/pingcap/tiflow/dm/pkg/conn/GetGTIDPurged", `return("406a3f61-690d-11e7-87c5-6c92bf46f384:1-122")`), IsNil) 163 //nolint:errcheck 164 defer failpoint.Disable("github.com/pingcap/tiflow/dm/pkg/conn/GetGTIDPurged") 165 cfg := getDBConfigForTest() 166 conn.InitMockDB(c) 167 db, err := conn.GetUpstreamDB(cfg) 168 c.Assert(err, IsNil) 169 r.db = db 170 c.Assert(r.Init(context.Background()), IsNil) 171 // purge old relay dir 172 f, err := os.Create(filepath.Join(r.cfg.RelayDir, "old_relay_log")) 173 c.Assert(err, IsNil) 174 f.Close() 175 c.Assert(r.PurgeRelayDir(), IsNil) 176 files, err := os.ReadDir(r.cfg.RelayDir) 177 c.Assert(err, IsNil) 178 c.Assert(files, HasLen, 0) 179 180 c.Assert(r.meta.Load(), IsNil) 181 182 // no file specified, no need to recover 183 c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) 184 185 // save position into meta 186 c.Assert(r.meta.AddDir(uuid, &startPos, nil, 0), IsNil) 187 188 // relay log file does not exists, no need to recover 189 c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) 190 191 // use a generator to generate some binlog events 192 previousGTIDSet, err := gtid.ParserGTID(relayCfg.Flavor, previousGTIDSetStr) 193 c.Assert(err, IsNil) 194 latestGTID1, err := gtid.ParserGTID(relayCfg.Flavor, latestGTIDStr1) 195 c.Assert(err, IsNil) 196 latestGTID2, err := gtid.ParserGTID(relayCfg.Flavor, latestGTIDStr2) 197 c.Assert(err, IsNil) 198 g, events, data := genBinlogEventsWithGTIDs(c, relayCfg.Flavor, previousGTIDSet, latestGTID1, latestGTID2) 199 200 // write events into relay log file 201 err = os.WriteFile(filepath.Join(r.meta.Dir(), filename), data, 0o600) 202 c.Assert(err, IsNil) 203 204 // all events/transactions are complete, no need to recover 205 c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) 206 // now, we will update position/GTID set in meta to latest location in relay logs 207 lastEvent := events[len(events)-1] 208 pos := startPos 209 pos.Pos = lastEvent.Header.LogPos 210 t.verifyMetadata(c, r, uuidWithSuffix, pos, recoverGTIDSetStr, []string{uuidWithSuffix}) 211 212 // write some invalid data into the relay log file 213 f, err = os.OpenFile(filepath.Join(r.meta.Dir(), filename), os.O_WRONLY|os.O_APPEND, 0o600) 214 c.Assert(err, IsNil) 215 _, err = f.Write([]byte("invalid event data")) 216 c.Assert(err, IsNil) 217 f.Close() 218 219 // write a greater GTID sets in meta 220 greaterGITDSet, err := gtid.ParserGTID(relayCfg.Flavor, greaterGITDSetStr) 221 c.Assert(err, IsNil) 222 c.Assert(r.SaveMeta(startPos, greaterGITDSet), IsNil) 223 224 // invalid data truncated, meta updated 225 c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) 226 _, latestPos := r.meta.Pos() 227 c.Assert(latestPos, DeepEquals, gmysql.Position{Name: filename, Pos: g.LatestPos}) 228 _, latestGTIDs := r.meta.GTID() 229 recoverGTIDSet, err := gtid.ParserGTID(relayCfg.Flavor, recoverGTIDSetStr) 230 c.Assert(err, IsNil) 231 c.Assert(latestGTIDs.Equal(recoverGTIDSet), IsTrue) // verifyMetadata is not enough 232 233 // no relay log file need to recover 234 c.Assert(r.SaveMeta(minCheckpoint, latestGTIDs), IsNil) 235 c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) 236 _, latestPos = r.meta.Pos() 237 c.Assert(latestPos, DeepEquals, minCheckpoint) 238 _, latestGTIDs = r.meta.GTID() 239 c.Assert(latestGTIDs.Contain(g.LatestGTID), IsTrue) 240 } 241 242 func (t *testRelaySuite) TestTryRecoverMeta(c *C) { 243 var ( 244 uuid = "24ecd093-8cec-11e9-aa0d-0242ac170002" 245 previousGTIDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-14,53bfca22-690d-11e7-8a62-18ded7a37b78:1-495,406a3f61-690d-11e7-87c5-6c92bf46f384:123-456" 246 latestGTIDStr1 = "3ccc475b-2343-11e7-be21-6c0b84d59f30:14" 247 latestGTIDStr2 = "53bfca22-690d-11e7-8a62-18ded7a37b78:495" 248 // if no @@gtid_purged, 406a3f61-690d-11e7-87c5-6c92bf46f384:123-456 should be not changed 249 recoverGTIDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-18,53bfca22-690d-11e7-8a62-18ded7a37b78:1-505,406a3f61-690d-11e7-87c5-6c92bf46f384:123-456" 250 filename = "mysql-bin.000001" 251 startPos = gmysql.Position{Name: filename, Pos: 123} 252 253 parser2 = parser.New() 254 relayCfg = newRelayCfg(c, gmysql.MySQLFlavor) 255 r = NewRelay(relayCfg).(*Relay) 256 ) 257 cfg := getDBConfigForTest() 258 conn.InitMockDB(c) 259 db, err := conn.GetUpstreamDB(cfg) 260 c.Assert(err, IsNil) 261 r.db = db 262 c.Assert(r.Init(context.Background()), IsNil) 263 recoverGTIDSet, err := gtid.ParserGTID(relayCfg.Flavor, recoverGTIDSetStr) 264 c.Assert(err, IsNil) 265 266 c.Assert(r.meta.AddDir(uuid, &startPos, nil, 0), IsNil) 267 c.Assert(r.meta.Load(), IsNil) 268 269 // use a generator to generate some binlog events 270 previousGTIDSet, err := gtid.ParserGTID(relayCfg.Flavor, previousGTIDSetStr) 271 c.Assert(err, IsNil) 272 latestGTID1, err := gtid.ParserGTID(relayCfg.Flavor, latestGTIDStr1) 273 c.Assert(err, IsNil) 274 latestGTID2, err := gtid.ParserGTID(relayCfg.Flavor, latestGTIDStr2) 275 c.Assert(err, IsNil) 276 g, _, data := genBinlogEventsWithGTIDs(c, relayCfg.Flavor, previousGTIDSet, latestGTID1, latestGTID2) 277 278 // write events into relay log file 279 err = os.WriteFile(filepath.Join(r.meta.Dir(), filename), data, 0o600) 280 c.Assert(err, IsNil) 281 // write some invalid data into the relay log file to trigger a recover. 282 f, err := os.OpenFile(filepath.Join(r.meta.Dir(), filename), os.O_WRONLY|os.O_APPEND, 0o600) 283 c.Assert(err, IsNil) 284 _, err = f.Write([]byte("invalid event data")) 285 c.Assert(err, IsNil) 286 f.Close() 287 288 // recover with empty GTIDs. 289 c.Assert(failpoint.Enable("github.com/pingcap/tiflow/dm/pkg/conn/GetGTIDPurged", `return("")`), IsNil) 290 //nolint:errcheck 291 defer failpoint.Disable("github.com/pingcap/tiflow/dm/pkg/conn/GetGTIDPurged") 292 c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) 293 _, latestPos := r.meta.Pos() 294 c.Assert(latestPos, DeepEquals, gmysql.Position{Name: filename, Pos: g.LatestPos}) 295 _, latestGTIDs := r.meta.GTID() 296 c.Assert(latestGTIDs.Equal(recoverGTIDSet), IsTrue) 297 298 // write some invalid data into the relay log file again. 299 f, err = os.OpenFile(filepath.Join(r.meta.Dir(), filename), os.O_WRONLY|os.O_APPEND, 0o600) 300 c.Assert(err, IsNil) 301 _, err = f.Write([]byte("invalid event data")) 302 c.Assert(err, IsNil) 303 f.Close() 304 305 // recover with the subset of GTIDs (previous GTID set). 306 c.Assert(r.SaveMeta(startPos, previousGTIDSet), IsNil) 307 c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) 308 _, latestPos = r.meta.Pos() 309 c.Assert(latestPos, DeepEquals, gmysql.Position{Name: filename, Pos: g.LatestPos}) 310 _, latestGTIDs = r.meta.GTID() 311 c.Assert(latestGTIDs.Equal(recoverGTIDSet), IsTrue) 312 } 313 314 type dummyListener bool 315 316 func (d *dummyListener) OnEvent(e *replication.BinlogEvent) { 317 *d = true 318 } 319 320 func (t *testRelaySuite) TestListener(c *C) { 321 relay := NewRelay(&Config{}).(*Relay) 322 c.Assert(len(relay.listeners), Equals, 0) 323 324 lis := dummyListener(false) 325 relay.RegisterListener(&lis) 326 c.Assert(len(relay.listeners), Equals, 1) 327 328 relay.notify(nil) 329 c.Assert(bool(lis), Equals, true) 330 331 relay.UnRegisterListener(&lis) 332 c.Assert(len(relay.listeners), Equals, 0) 333 lis = false 334 relay.notify(nil) 335 c.Assert(bool(lis), Equals, false) 336 } 337 338 // genBinlogEventsWithGTIDs generates some binlog events used by testFileUtilSuite and testFileWriterSuite. 339 // now, its generated events including 3 DDL and 10 DML. 340 func genBinlogEventsWithGTIDs(c *C, flavor string, previousGTIDSet, latestGTID1, latestGTID2 gmysql.GTIDSet) (*event.Generator, []*replication.BinlogEvent, []byte) { 341 var ( 342 serverID uint32 = 11 343 latestPos uint32 344 latestXID uint64 = 10 345 346 allEvents = make([]*replication.BinlogEvent, 0, 50) 347 allData bytes.Buffer 348 ) 349 350 // use a binlog event generator to generate some binlog events. 351 g, err := event.NewGenerator(flavor, serverID, latestPos, latestGTID1, previousGTIDSet, latestXID) 352 c.Assert(err, IsNil) 353 354 // file header with FormatDescriptionEvent and PreviousGTIDsEvent 355 events, data, err := g.GenFileHeader(0) 356 c.Assert(err, IsNil) 357 allEvents = append(allEvents, events...) 358 allData.Write(data) 359 360 // CREATE DATABASE/TABLE, 3 DDL 361 queries := []string{ 362 "CREATE DATABASE `db`", 363 "COMMIT", 364 "CREATE TABLE `db`.`tbl1` (c1 INT)", 365 "CREATE TABLE `db`.`tbl2` (c1 INT)", 366 } 367 for _, query := range queries { 368 events, data, err = g.GenDDLEvents("db", query, 0) 369 c.Assert(err, IsNil) 370 allEvents = append(allEvents, events...) 371 allData.Write(data) 372 } 373 374 // DMLs, 10 DML 375 g.LatestGTID = latestGTID2 // use another latest GTID with different SID/DomainID 376 var ( 377 tableID uint64 = 8 378 columnType = []byte{gmysql.MYSQL_TYPE_LONG} 379 eventType = replication.WRITE_ROWS_EVENTv2 380 schema = "db" 381 table = "tbl1" 382 ) 383 for i := 0; i < 10; i++ { 384 insertRows := make([][]interface{}, 0, 1) 385 insertRows = append(insertRows, []interface{}{int32(i)}) 386 dmlData := []*event.DMLData{ 387 { 388 TableID: tableID, 389 Schema: schema, 390 Table: table, 391 ColumnType: columnType, 392 Rows: insertRows, 393 }, 394 } 395 events, data, err = g.GenDMLEvents(eventType, dmlData, 0) 396 c.Assert(err, IsNil) 397 allEvents = append(allEvents, events...) 398 allData.Write(data) 399 } 400 401 return g, allEvents, allData.Bytes() 402 } 403 404 func (t *testRelaySuite) TestHandleEvent(c *C) { 405 // NOTE: we can test metrics later. 406 var ( 407 reader2 = &mockReader{} 408 parser2 = parser.New() 409 writer2 = &mockWriter{} 410 relayCfg = newRelayCfg(c, gmysql.MariaDBFlavor) 411 r = NewRelay(relayCfg).(*Relay) 412 413 eventHeader = &replication.EventHeader{ 414 Timestamp: uint32(time.Now().Unix()), 415 ServerID: 11, 416 } 417 binlogPos = gmysql.Position{Name: "mysql-bin.666888", Pos: 4} 418 rotateEv, _ = event.GenRotateEvent(eventHeader, 123, []byte(binlogPos.Name), uint64(binlogPos.Pos)) 419 fakeRotateEv, _ = event.GenRotateEvent(eventHeader, 0, []byte(binlogPos.Name), uint64(1234)) 420 queryEv, _ = event.GenQueryEvent(eventHeader, 123, 0, 0, 0, nil, nil, []byte("CREATE DATABASE db_relay_test")) 421 ) 422 423 r.writer = writer2 424 425 cfg := getDBConfigForTest() 426 conn.InitMockDB(c) 427 db, err := conn.GetUpstreamDB(cfg) 428 c.Assert(err, IsNil) 429 r.db = db 430 c.Assert(r.Init(context.Background()), IsNil) 431 // NOTE: we can mock meta later. 432 c.Assert(r.meta.Load(), IsNil) 433 c.Assert(r.meta.AddDir("24ecd093-8cec-11e9-aa0d-0242ac170002", nil, nil, 0), IsNil) 434 435 // attach GTID sets to QueryEv 436 queryEv2 := queryEv.Event.(*replication.QueryEvent) 437 queryEv2.GSet, _ = gmysql.ParseGTIDSet(relayCfg.Flavor, "1-2-3") 438 439 // reader return with an error 440 for _, reader2.err = range []error{ 441 errors.New("reader error for testing"), 442 replication.ErrChecksumMismatch, 443 replication.ErrSyncClosed, 444 replication.ErrNeedSyncAgain, 445 } { 446 handleErr := r.handleEvents(context.Background(), reader2, parser2) 447 c.Assert(errors.Cause(handleErr), Equals, reader2.err) 448 } 449 450 // reader return fake rotate event 451 reader2.err = nil 452 reader2.result.Event = fakeRotateEv 453 // writer return error to force handleEvents return 454 writer2.err = errors.New("writer error for testing") 455 // return with the annotated writer error 456 err = r.handleEvents(context.Background(), reader2, parser2) 457 c.Assert(errors.Cause(err), Equals, writer2.err) 458 // after handle rotate event, we save and flush the meta immediately 459 c.Assert(r.meta.Dirty(), Equals, false) 460 { 461 lm := r.meta.(*LocalMeta) 462 c.Assert(lm.BinLogName, Equals, "mysql-bin.666888") 463 c.Assert(lm.BinLogPos, Equals, uint32(1234)) 464 filename := filepath.Join(lm.baseDir, lm.currentSubDir, utils.MetaFilename) 465 lm2 := &LocalMeta{} 466 _, err2 := toml.DecodeFile(filename, lm2) 467 c.Assert(err2, IsNil) 468 c.Assert(lm2.BinLogName, Equals, "mysql-bin.666888") 469 c.Assert(lm2.BinLogPos, Equals, uint32(1234)) 470 } 471 { 472 lm := r.meta.(*LocalMeta) 473 backupUUID := lm.currentSubDir 474 lm.currentSubDir = "not exist" 475 err = r.handleEvents(context.Background(), reader2, parser2) 476 c.Assert(os.IsNotExist(errors.Cause(err)), Equals, true) 477 lm.currentSubDir = backupUUID 478 } 479 480 // reader return valid event 481 reader2.err = nil 482 reader2.result.Event = rotateEv 483 484 // writer return error 485 writer2.err = errors.New("writer error for testing") 486 // return with the annotated writer error 487 err = r.handleEvents(context.Background(), reader2, parser2) 488 c.Assert(errors.Cause(err), Equals, writer2.err) 489 c.Assert(r.meta.Dirty(), Equals, false) 490 491 // writer without error 492 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) 493 defer cancel() 494 writer2.err = nil 495 err = r.handleEvents(ctx, reader2, parser2) // returned when ctx timeout 496 c.Assert(errors.Cause(err), Equals, ctx.Err()) 497 // check written event 498 c.Assert(writer2.latestEvent, Equals, reader2.result.Event) 499 // check meta 500 _, pos := r.meta.Pos() 501 _, gs := r.meta.GTID() 502 c.Assert(pos, DeepEquals, binlogPos) 503 c.Assert(gs.String(), Equals, "") // no GTID sets in event yet 504 505 ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Millisecond) 506 defer cancel2() 507 508 // write a QueryEvent with GTID sets 509 reader2.result.Event = queryEv 510 err = r.handleEvents(ctx2, reader2, parser2) 511 c.Assert(errors.Cause(err), Equals, ctx.Err()) 512 // check written event 513 c.Assert(writer2.latestEvent, Equals, reader2.result.Event) 514 // check meta 515 _, pos = r.meta.Pos() 516 _, gs = r.meta.GTID() 517 c.Assert(pos.Name, Equals, binlogPos.Name) 518 c.Assert(pos.Pos, Equals, queryEv.Header.LogPos) 519 c.Assert(gs, DeepEquals, queryEv2.GSet) // got GTID sets 520 521 // transformer return ignorable for the event 522 reader2.err = nil 523 reader2.result.Event = &replication.BinlogEvent{ 524 Header: &replication.EventHeader{EventType: replication.HEARTBEAT_EVENT}, 525 Event: &replication.GenericEvent{}, 526 } 527 ctx4, cancel4 := context.WithTimeout(context.Background(), 10*time.Millisecond) 528 defer cancel4() 529 err = r.handleEvents(ctx4, reader2, parser2) 530 c.Assert(errors.Cause(err), Equals, ctx.Err()) 531 select { 532 case <-ctx4.Done(): 533 default: 534 c.Fatalf("ignorable event for transformer not ignored") 535 } 536 537 // writer return ignorable for the event 538 reader2.result.Event = queryEv 539 writer2.result.Ignore = true 540 ctx5, cancel5 := context.WithTimeout(context.Background(), 10*time.Millisecond) 541 defer cancel5() 542 err = r.handleEvents(ctx5, reader2, parser2) 543 c.Assert(errors.Cause(err), Equals, ctx.Err()) 544 select { 545 case <-ctx5.Done(): 546 default: 547 c.Fatalf("ignorable event for writer not ignored") 548 } 549 } 550 551 func (t *testRelaySuite) TestReSetupMeta(c *C) { 552 ctx, cancel := context.WithTimeout(context.Background(), conn.DefaultDBTimeout) 553 defer cancel() 554 555 var ( 556 relayCfg = newRelayCfg(c, gmysql.MySQLFlavor) 557 r = NewRelay(relayCfg).(*Relay) 558 ) 559 cfg := getDBConfigForTest() 560 mockDB := conn.InitMockDB(c) 561 db, err := conn.GetUpstreamDB(cfg) 562 c.Assert(err, IsNil) 563 r.db = db 564 c.Assert(r.Init(context.Background()), IsNil) 565 566 // empty metadata 567 c.Assert(r.meta.Load(), IsNil) 568 t.verifyMetadata(c, r, "", minCheckpoint, "", nil) 569 570 // open connected DB and get its UUID 571 defer func() { 572 r.db.Close() 573 r.db = nil 574 }() 575 mockGetServerUUID(mockDB) 576 uuid, err := conn.GetServerUUID(tcontext.NewContext(ctx, log.L()), r.db, r.cfg.Flavor) 577 c.Assert(err, IsNil) 578 579 // re-setup meta with start pos adjusted 580 r.cfg.EnableGTID = true 581 r.cfg.BinlogGTID = "24ecd093-8cec-11e9-aa0d-0242ac170002:1-23" 582 r.cfg.BinLogName = "mysql-bin.000005" 583 584 c.Assert(r.setSyncConfig(), IsNil) 585 // all adjusted gset should be empty since we didn't flush logs 586 emptyGTID, err := gtid.ParserGTID(r.cfg.Flavor, "") 587 c.Assert(err, IsNil) 588 589 mockGetServerUUID(mockDB) 590 mockGetRandomServerID(mockDB) 591 // mock AddGSetWithPurged 592 mockDB.ExpectQuery("select @@GLOBAL.gtid_purged").WillReturnRows(sqlmock.NewRows([]string{"@@GLOBAL.gtid_purged"}).AddRow("")) 593 c.Assert(failpoint.Enable("github.com/pingcap/tiflow/dm/pkg/binlog/reader/MockGetEmptyPreviousGTIDFromGTIDSet", "return()"), IsNil) 594 //nolint:errcheck 595 defer failpoint.Disable("github.com/pingcap/tiflow/dm/pkg/binlog/reader/MockGetEmptyPreviousGTIDFromGTIDSet") 596 c.Assert(r.reSetupMeta(ctx), IsNil) 597 uuid001 := fmt.Sprintf("%s.000001", uuid) 598 t.verifyMetadata(c, r, uuid001, gmysql.Position{Name: r.cfg.BinLogName, Pos: 4}, emptyGTID.String(), []string{uuid001}) 599 600 // re-setup meta again, often happen when connecting a server behind a VIP. 601 mockGetServerUUID(mockDB) 602 mockGetRandomServerID(mockDB) 603 mockDB.ExpectQuery("select @@GLOBAL.gtid_purged").WillReturnRows(sqlmock.NewRows([]string{"@@GLOBAL.gtid_purged"}).AddRow("")) 604 c.Assert(r.reSetupMeta(ctx), IsNil) 605 uuid002 := fmt.Sprintf("%s.000002", uuid) 606 t.verifyMetadata(c, r, uuid002, minCheckpoint, emptyGTID.String(), []string{uuid001, uuid002}) 607 608 r.cfg.BinLogName = "mysql-bin.000002" 609 r.cfg.BinlogGTID = "24ecd093-8cec-11e9-aa0d-0242ac170002:1-50,24ecd093-8cec-11e9-aa0d-0242ac170003:1-50" 610 r.cfg.UUIDSuffix = 2 611 mockGetServerUUID(mockDB) 612 mockGetRandomServerID(mockDB) 613 mockDB.ExpectQuery("select @@GLOBAL.gtid_purged").WillReturnRows(sqlmock.NewRows([]string{"@@GLOBAL.gtid_purged"}).AddRow("")) 614 c.Assert(r.reSetupMeta(ctx), IsNil) 615 t.verifyMetadata(c, r, uuid002, gmysql.Position{Name: r.cfg.BinLogName, Pos: 4}, emptyGTID.String(), []string{uuid002}) 616 617 // re-setup meta again, often happen when connecting a server behind a VIP. 618 mockGetServerUUID(mockDB) 619 mockGetRandomServerID(mockDB) 620 mockDB.ExpectQuery("select @@GLOBAL.gtid_purged").WillReturnRows(sqlmock.NewRows([]string{"@@GLOBAL.gtid_purged"}).AddRow("")) 621 c.Assert(r.reSetupMeta(ctx), IsNil) 622 uuid003 := fmt.Sprintf("%s.000003", uuid) 623 t.verifyMetadata(c, r, uuid003, minCheckpoint, emptyGTID.String(), []string{uuid002, uuid003}) 624 c.Assert(mockDB.ExpectationsWereMet(), IsNil) 625 } 626 627 func (t *testRelaySuite) verifyMetadata(c *C, r *Relay, uuidExpected string, 628 posExpected gmysql.Position, gsStrExpected string, uuidsExpected []string, 629 ) { 630 uuid, pos := r.meta.Pos() 631 _, gs := r.meta.GTID() 632 gsExpected, err := gtid.ParserGTID(gmysql.MySQLFlavor, gsStrExpected) 633 c.Assert(err, IsNil) 634 c.Assert(uuid, Equals, uuidExpected) 635 c.Assert(pos, DeepEquals, posExpected) 636 c.Assert(gs.Equal(gsExpected), IsTrue) 637 638 indexFile := filepath.Join(r.cfg.RelayDir, utils.UUIDIndexFilename) 639 UUIDs, err := utils.ParseUUIDIndex(indexFile) 640 c.Assert(err, IsNil) 641 c.Assert(UUIDs, DeepEquals, uuidsExpected) 642 } 643 644 func (t *testRelaySuite) TestPreprocessEvent(c *C) { 645 type Case struct { 646 event *replication.BinlogEvent 647 result preprocessResult 648 } 649 relay := &Relay{} 650 parser2 := parser.New() 651 var ( 652 header = &replication.EventHeader{ 653 Timestamp: uint32(time.Now().Unix()), 654 ServerID: 11, 655 Flags: 0x01, 656 } 657 latestPos uint32 = 456789 658 gtidStr = "9f61c5f9-1eef-11e9-b6cf-0242ac140003:5" 659 gtidSet, _ = gtid.ParserGTID(gmysql.MySQLFlavor, gtidStr) 660 schema = []byte("test_schema") 661 cases = make([]Case, 0, 10) 662 ) 663 664 // RotateEvent 665 nextLogName := "mysql-bin.000123" 666 position := uint64(4) 667 ev, err := event.GenRotateEvent(header, latestPos, []byte(nextLogName), position) 668 c.Assert(err, IsNil) 669 cases = append(cases, Case{ 670 event: ev, 671 result: preprocessResult{ 672 LogPos: uint32(position), 673 NextLogName: nextLogName, 674 }, 675 }) 676 677 // fake RotateEvent with zero timestamp 678 header.Timestamp = 0 679 ev, err = event.GenRotateEvent(header, latestPos, []byte(nextLogName), position) 680 c.Assert(err, IsNil) 681 cases = append(cases, Case{ 682 event: ev, 683 result: preprocessResult{ 684 LogPos: uint32(position), 685 NextLogName: nextLogName, 686 }, 687 }) 688 header.Timestamp = uint32(time.Now().Unix()) // set to non-zero 689 690 // fake RotateEvent with zero logPos 691 fakeRotateHeader := *header 692 ev, err = event.GenRotateEvent(&fakeRotateHeader, latestPos, []byte(nextLogName), position) 693 c.Assert(err, IsNil) 694 ev.Header.LogPos = 0 // set to zero 695 cases = append(cases, Case{ 696 event: ev, 697 result: preprocessResult{ 698 LogPos: uint32(position), 699 NextLogName: nextLogName, 700 }, 701 }) 702 703 // QueryEvent for DDL 704 query := []byte("CREATE TABLE test_tbl (c1 INT)") 705 ev, err = event.GenQueryEvent(header, latestPos, 0, 0, 0, nil, schema, query) 706 c.Assert(err, IsNil) 707 ev.Event.(*replication.QueryEvent).GSet = gtidSet // set GTIDs manually 708 cases = append(cases, Case{ 709 event: ev, 710 result: preprocessResult{ 711 LogPos: ev.Header.LogPos, 712 GTIDSet: gtidSet, 713 CanSaveGTID: true, 714 }, 715 }) 716 717 // QueryEvent for non-DDL 718 query = []byte("BEGIN") 719 ev, err = event.GenQueryEvent(header, latestPos, 0, 0, 0, nil, schema, query) 720 c.Assert(err, IsNil) 721 cases = append(cases, Case{ 722 event: ev, 723 result: preprocessResult{ 724 LogPos: ev.Header.LogPos, 725 }, 726 }) 727 728 // XIDEvent 729 xid := uint64(135) 730 ev, err = event.GenXIDEvent(header, latestPos, xid) 731 c.Assert(err, IsNil) 732 ev.Event.(*replication.XIDEvent).GSet = gtidSet // set GTIDs manually 733 cases = append(cases, Case{ 734 event: ev, 735 result: preprocessResult{ 736 LogPos: ev.Header.LogPos, 737 GTIDSet: gtidSet, 738 CanSaveGTID: true, 739 }, 740 }) 741 742 // GenericEvent, non-HEARTBEAT_EVENT 743 ev = &replication.BinlogEvent{Header: header, Event: &replication.GenericEvent{}} 744 cases = append(cases, Case{ 745 event: ev, 746 result: preprocessResult{ 747 LogPos: ev.Header.LogPos, 748 }, 749 }) 750 751 // GenericEvent, HEARTBEAT_EVENT 752 genericHeader := *header 753 ev = &replication.BinlogEvent{Header: &genericHeader, Event: &replication.GenericEvent{}} 754 ev.Header.EventType = replication.HEARTBEAT_EVENT 755 cases = append(cases, Case{ 756 event: ev, 757 result: preprocessResult{ 758 Ignore: true, 759 IgnoreReason: ignoreReasonHeartbeat, 760 LogPos: ev.Header.LogPos, 761 }, 762 }) 763 764 // other event type without LOG_EVENT_ARTIFICIAL_F 765 ev, err = event.GenCommonGTIDEvent(gmysql.MySQLFlavor, header.ServerID, latestPos, gtidSet, false, 0) 766 c.Assert(err, IsNil) 767 cases = append(cases, Case{ 768 event: ev, 769 result: preprocessResult{ 770 LogPos: ev.Header.LogPos, 771 }, 772 }) 773 774 // other event type with LOG_EVENT_ARTIFICIAL_F 775 ev, err = event.GenTableMapEvent(header, latestPos, 0, []byte("testdb"), []byte("testtbl"), []byte("INT")) 776 c.Assert(err, IsNil) 777 ev.Header.Flags |= replication.LOG_EVENT_ARTIFICIAL_F 778 cases = append(cases, Case{ 779 event: ev, 780 result: preprocessResult{ 781 Ignore: true, 782 IgnoreReason: ignoreReasonArtificialFlag, 783 LogPos: ev.Header.LogPos, 784 }, 785 }) 786 787 for _, cs := range cases { 788 c.Assert(relay.preprocessEvent(cs.event, parser2), DeepEquals, cs.result) 789 } 790 } 791 792 func (t *testRelaySuite) TestRecoverMySQL(c *C) { 793 var ( 794 relayDir = c.MkDir() 795 filename = "test-mysql-bin.000001" 796 parser2 = parser.New() 797 flavor = gmysql.MySQLFlavor 798 previousGTIDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-14,406a3f61-690d-11e7-87c5-6c92bf46f384:123-456,53bfca22-690d-11e7-8a62-18ded7a37b78:1-495,686e1ab6-c47e-11e7-a42c-6c92bf46f384:234-567" 799 latestGTIDStr1 = "3ccc475b-2343-11e7-be21-6c0b84d59f30:14" 800 latestGTIDStr2 = "53bfca22-690d-11e7-8a62-18ded7a37b78:495" 801 ) 802 803 r := NewRelay(&Config{Flavor: flavor}).(*Relay) 804 805 // different SIDs in GTID set 806 previousGTIDSet, err := gtid.ParserGTID(flavor, previousGTIDSetStr) 807 c.Assert(err, IsNil) 808 latestGTID1, err := gtid.ParserGTID(flavor, latestGTIDStr1) 809 c.Assert(err, IsNil) 810 latestGTID2, err := gtid.ParserGTID(flavor, latestGTIDStr2) 811 c.Assert(err, IsNil) 812 813 // generate binlog events 814 g, _, baseData := genBinlogEventsWithGTIDs(c, flavor, previousGTIDSet, latestGTID1, latestGTID2) 815 816 // expected latest pos/GTID set 817 expectedPos := gmysql.Position{Name: filename, Pos: uint32(len(baseData))} 818 // 3 DDL + 10 DML 819 expectedGTIDsStr := "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-18,53bfca22-690d-11e7-8a62-18ded7a37b78:1-505,406a3f61-690d-11e7-87c5-6c92bf46f384:123-456,686e1ab6-c47e-11e7-a42c-6c92bf46f384:234-567" 820 expectedGTIDs, err := gtid.ParserGTID(flavor, expectedGTIDsStr) 821 c.Assert(err, IsNil) 822 823 // write the events to a file 824 fullName := filepath.Join(relayDir, filename) 825 err = os.WriteFile(fullName, baseData, 0o644) 826 c.Assert(err, IsNil) 827 828 // try recover, but in fact do nothing 829 result, err := r.doRecovering(context.Background(), relayDir, filename, parser2) 830 c.Assert(err, IsNil) 831 c.Assert(result.Truncated, IsFalse) 832 c.Assert(result.LatestPos, DeepEquals, expectedPos) 833 c.Assert(result.LatestGTIDs, DeepEquals, expectedGTIDs) 834 835 // check file size, whether no recovering operation applied 836 fs, err := os.Stat(fullName) 837 c.Assert(err, IsNil) 838 c.Assert(fs.Size(), Equals, int64(len(baseData))) 839 840 // generate another transaction, DDL 841 extraEvents, extraData, err := g.GenDDLEvents("db2", "CREATE DATABASE db2", 0) 842 c.Assert(err, IsNil) 843 c.Assert(extraEvents, HasLen, 2) // [GTID, Query] 844 845 // write an incomplete event to the file 846 corruptData := extraEvents[0].RawData[:len(extraEvents[0].RawData)-2] 847 f, err := os.OpenFile(fullName, os.O_WRONLY|os.O_APPEND, 0o644) 848 c.Assert(err, IsNil) 849 _, err = f.Write(corruptData) 850 c.Assert(err, IsNil) 851 c.Assert(f.Close(), IsNil) 852 853 // check file size, increased 854 fs, err = os.Stat(fullName) 855 c.Assert(err, IsNil) 856 c.Assert(fs.Size(), Equals, int64(len(baseData)+len(corruptData))) 857 858 // try recover, truncate the incomplete event 859 result, err = r.doRecovering(context.Background(), relayDir, filename, parser2) 860 c.Assert(err, IsNil) 861 c.Assert(result.Truncated, IsTrue) 862 c.Assert(result.LatestPos, DeepEquals, expectedPos) 863 c.Assert(result.LatestGTIDs, DeepEquals, expectedGTIDs) 864 865 // check file size, truncated 866 fs, err = os.Stat(fullName) 867 c.Assert(err, IsNil) 868 c.Assert(fs.Size(), Equals, int64(len(baseData))) 869 870 // write an incomplete transaction 871 f, err = os.OpenFile(fullName, os.O_WRONLY|os.O_APPEND, 0o644) 872 c.Assert(err, IsNil) 873 var extraLen int64 874 for i := 0; i < len(extraEvents)-1; i++ { 875 _, err = f.Write(extraEvents[i].RawData) 876 c.Assert(err, IsNil) 877 extraLen += int64(len(extraEvents[i].RawData)) 878 } 879 c.Assert(f.Close(), IsNil) 880 881 // check file size, increased 882 fs, err = os.Stat(fullName) 883 c.Assert(err, IsNil) 884 c.Assert(fs.Size(), Equals, int64(len(baseData))+extraLen) 885 886 // try recover, truncate the incomplete transaction 887 result, err = r.doRecovering(context.Background(), relayDir, filename, parser2) 888 c.Assert(err, IsNil) 889 c.Assert(result.Truncated, IsTrue) 890 c.Assert(result.LatestPos, DeepEquals, expectedPos) 891 c.Assert(result.LatestGTIDs, DeepEquals, expectedGTIDs) 892 893 // check file size, truncated 894 fs, err = os.Stat(fullName) 895 c.Assert(err, IsNil) 896 c.Assert(fs.Size(), Equals, int64(len(baseData))) 897 898 // write an completed transaction 899 f, err = os.OpenFile(fullName, os.O_WRONLY|os.O_APPEND, 0o644) 900 c.Assert(err, IsNil) 901 for i := 0; i < len(extraEvents); i++ { 902 _, err = f.Write(extraEvents[i].RawData) 903 c.Assert(err, IsNil) 904 } 905 c.Assert(f.Close(), IsNil) 906 907 // check file size, increased 908 fs, err = os.Stat(fullName) 909 c.Assert(err, IsNil) 910 c.Assert(fs.Size(), Equals, int64(len(baseData)+len(extraData))) 911 912 // try recover, no operation applied 913 expectedPos.Pos += uint32(len(extraData)) 914 // 4 DDL + 10 DML 915 expectedGTIDsStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-18,53bfca22-690d-11e7-8a62-18ded7a37b78:1-506,406a3f61-690d-11e7-87c5-6c92bf46f384:123-456,686e1ab6-c47e-11e7-a42c-6c92bf46f384:234-567" 916 expectedGTIDs, err = gtid.ParserGTID(flavor, expectedGTIDsStr) 917 c.Assert(err, IsNil) 918 result, err = r.doRecovering(context.Background(), relayDir, filename, parser2) 919 c.Assert(err, IsNil) 920 c.Assert(result.Truncated, IsFalse) 921 c.Assert(result.LatestPos, DeepEquals, expectedPos) 922 c.Assert(result.LatestGTIDs, DeepEquals, expectedGTIDs) 923 924 // compare file data 925 var allData bytes.Buffer 926 allData.Write(baseData) 927 allData.Write(extraData) 928 fileData, err := os.ReadFile(fullName) 929 c.Assert(err, IsNil) 930 c.Assert(fileData, DeepEquals, allData.Bytes()) 931 } 932 933 func (t *testRelaySuite) TestRecoverMySQLNone(c *C) { 934 relayDir := c.MkDir() 935 parser2 := parser.New() 936 937 r := NewRelay(&Config{Flavor: gmysql.MySQLFlavor}).(*Relay) 938 939 // no file specified to recover 940 result, err := r.doRecovering(context.Background(), relayDir, "", parser2) 941 c.Assert(err, IsNil) 942 c.Assert(result.Truncated, IsFalse) 943 944 filename := "mysql-bin.000001" 945 946 // file not exist, no need to recover 947 result, err = r.doRecovering(context.Background(), relayDir, filename, parser2) 948 c.Assert(err, IsNil) 949 c.Assert(result.Truncated, IsFalse) 950 }