github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/config/task_test.go (about) 1 // Copyright 2019 PingCAP, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package config 15 16 import ( 17 "crypto/rand" 18 "os" 19 "path" 20 "reflect" 21 "sort" 22 "strings" 23 "testing" 24 25 "github.com/coreos/go-semver/semver" 26 "github.com/pingcap/tidb/pkg/util/filter" 27 router "github.com/pingcap/tidb/pkg/util/table-router" 28 "github.com/pingcap/tiflow/dm/config/dbconfig" 29 "github.com/pingcap/tiflow/dm/config/security" 30 "github.com/pingcap/tiflow/dm/pkg/encrypt" 31 "github.com/pingcap/tiflow/dm/pkg/terror" 32 "github.com/pingcap/tiflow/dm/pkg/utils" 33 bf "github.com/pingcap/tiflow/pkg/binlog-filter" 34 "github.com/stretchr/testify/require" 35 ) 36 37 var correctTaskConfig = `--- 38 name: test 39 task-mode: all 40 shard-mode: "pessimistic" 41 meta-schema: "dm_meta" 42 case-sensitive: false 43 online-ddl: true 44 clean-dump-file: true 45 46 target-database: 47 host: "127.0.0.1" 48 port: 4000 49 user: "root" 50 password: "" 51 52 routes: 53 route-rule-1: 54 schema-pattern: "test_*" 55 target-schema: "test" 56 route-rule-2: 57 schema-pattern: "test_*" 58 target-schema: "test" 59 60 filters: 61 filter-rule-1: 62 schema-pattern: "test_*" 63 events: ["truncate table", "drop table"] 64 action: Ignore 65 filter-rule-2: 66 schema-pattern: "test_*" 67 events: ["all dml"] 68 action: Do 69 70 mydumpers: 71 global1: 72 threads: 4 73 chunk-filesize: 64 74 skip-tz-utc: true 75 extra-args: "--consistency none" 76 global2: 77 threads: 8 78 chunk-filesize: 128 79 skip-tz-utc: true 80 extra-args: "--consistency none" 81 82 loaders: 83 global1: 84 pool-size: 16 85 dir: "./dumped_data1" 86 global2: 87 pool-size: 8 88 dir: "./dumped_data2" 89 90 syncers: 91 global1: 92 worker-count: 16 93 batch: 100 94 enable-ansi-quotes: true 95 safe-mode: false 96 global2: 97 worker-count: 32 98 batch: 100 99 enable-ansi-quotes: true 100 safe-mode: false 101 102 expression-filter: 103 expr-1: 104 schema: "db" 105 table: "tbl" 106 insert-value-expr: "a > 1" 107 108 mysql-instances: 109 - source-id: "mysql-replica-01" 110 route-rules: ["route-rule-2"] 111 filter-rules: ["filter-rule-2"] 112 mydumper-config-name: "global1" 113 loader-config-name: "global1" 114 syncer-config-name: "global1" 115 expression-filters: ["expr-1"] 116 117 - source-id: "mysql-replica-02" 118 route-rules: ["route-rule-1"] 119 filter-rules: ["filter-rule-1"] 120 mydumper-config-name: "global2" 121 loader-config-name: "global2" 122 syncer-config-name: "global2" 123 ` 124 125 func TestUnusedTaskConfig(t *testing.T) { 126 t.Parallel() 127 128 taskConfig := NewTaskConfig() 129 err := taskConfig.FromYaml(correctTaskConfig) 130 require.NoError(t, err) 131 errorTaskConfig := `--- 132 name: test 133 task-mode: all 134 shard-mode: "pessimistic" 135 meta-schema: "dm_meta" 136 case-sensitive: false 137 online-ddl: true 138 clean-dump-file: true 139 140 target-database: 141 host: "127.0.0.1" 142 port: 4000 143 user: "root" 144 password: "" 145 146 routes: 147 route-rule-1: 148 schema-pattern: "test_*" 149 target-schema: "test" 150 route-rule-2: 151 schema-pattern: "test_*" 152 target-schema: "test" 153 154 filters: 155 filter-rule-1: 156 schema-pattern: "test_*" 157 table-pattern: "t_*" 158 events: ["truncate table", "drop table"] 159 action: Ignore 160 filter-rule-2: 161 schema-pattern: "test_*" 162 events: ["all dml"] 163 action: Do 164 165 mydumpers: 166 global1: 167 threads: 4 168 chunk-filesize: 64 169 skip-tz-utc: true 170 extra-args: "--consistency none" 171 global2: 172 threads: 8 173 chunk-filesize: 128 174 skip-tz-utc: true 175 extra-args: "--consistency none" 176 177 loaders: 178 global1: 179 pool-size: 16 180 dir: "./dumped_data1" 181 global2: 182 pool-size: 8 183 dir: "./dumped_data2" 184 185 syncers: 186 global1: 187 worker-count: 16 188 batch: 100 189 enable-ansi-quotes: true 190 safe-mode: false 191 global2: 192 worker-count: 32 193 batch: 100 194 enable-ansi-quotes: true 195 safe-mode: false 196 197 expression-filter: 198 expr-1: 199 schema: "db" 200 table: "tbl" 201 insert-value-expr: "a > 1" 202 203 mysql-instances: 204 - source-id: "mysql-replica-01" 205 route-rules: ["route-rule-1"] 206 filter-rules: ["filter-rule-1"] 207 mydumper-config-name: "global1" 208 loader-config-name: "global1" 209 syncer-config-name: "global1" 210 211 - source-id: "mysql-replica-02" 212 route-rules: ["route-rule-1"] 213 filter-rules: ["filter-rule-1"] 214 mydumper-config-name: "global2" 215 loader-config-name: "global2" 216 syncer-config-name: "global2" 217 ` 218 taskConfig = NewTaskConfig() 219 err = taskConfig.FromYaml(errorTaskConfig) 220 require.ErrorContains(t, err, "The configurations as following [expr-1 filter-rule-2 route-rule-2] are set in global configuration") 221 } 222 223 func TestName(t *testing.T) { 224 t.Parallel() 225 226 errorTaskConfig1 := `--- 227 name: test 228 task-mode: all 229 is-sharding: true 230 meta-schema: "dm_meta" 231 timezone: "Asia/Shanghai" 232 enable-heartbeat: true 233 ignore-checking-items: ["all"] 234 235 target-database: 236 host: "127.0.0.1" 237 port: 4000 238 user: "root" 239 password: "" 240 241 mysql-instances: 242 - source-id: "mysql-replica-01" 243 server-id: 101 244 block-allow-list: "instance" 245 route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] 246 mydumper-config-name: "global" 247 loader-config-name: "global" 248 syncer-config-name: "global" 249 ` 250 errorTaskConfig2 := `--- 251 name: test 252 name: test1 253 task-mode: all 254 is-sharding: true 255 meta-schema: "dm_meta" 256 enable-heartbeat: true 257 timezone: "Asia/Shanghai" 258 ignore-checking-items: ["all"] 259 260 target-database: 261 host: "127.0.0.1" 262 port: 4000 263 user: "root" 264 password: "" 265 266 mysql-instances: 267 - source-id: "mysql-replica-01" 268 block-allow-list: "instance" 269 route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] 270 mydumper-config-name: "global" 271 loader-config-name: "global" 272 syncer-config-name: "global" 273 ` 274 taskConfig := NewTaskConfig() 275 err := taskConfig.FromYaml(errorTaskConfig1) 276 // field server-id is not a member of TaskConfig 277 require.ErrorContains(t, err, "line 18: field server-id not found in type config.MySQLInstance") 278 279 err = taskConfig.FromYaml(errorTaskConfig2) 280 // field name duplicate 281 require.ErrorContains(t, err, "line 3: field name already set in type config.TaskConfig") 282 283 filepath := path.Join(t.TempDir(), "test_invalid_task.yaml") 284 configContent := []byte(`--- 285 aaa: xxx 286 name: test 287 task-mode: all 288 is-sharding: true 289 meta-schema: "dm_meta" 290 enable-heartbeat: true 291 ignore-checking-items: ["all"] 292 `) 293 err = os.WriteFile(filepath, configContent, 0o644) 294 require.NoError(t, err) 295 taskConfig = NewTaskConfig() 296 err = taskConfig.DecodeFile(filepath) 297 require.ErrorContains(t, err, "line 2: field aaa not found in type config.TaskConfig") 298 299 configContent = []byte(`--- 300 name: test 301 task-mode: all 302 task-mode: all 303 is-sharding: true 304 meta-schema: "dm_meta" 305 enable-heartbeat: true 306 ignore-checking-items: ["all"] 307 `) 308 err = os.WriteFile(filepath, configContent, 0o644) 309 require.NoError(t, err) 310 taskConfig = NewTaskConfig() 311 err = taskConfig.DecodeFile(filepath) 312 require.ErrorContains(t, err, "line 4: field task-mode already set in type config.TaskConfig") 313 314 configContent = []byte(`--- 315 name: test 316 is-sharding: true 317 meta-schema: "dm_meta" 318 enable-heartbeat: true 319 ignore-checking-items: ["all"] 320 `) 321 err = os.WriteFile(filepath, configContent, 0o644) 322 require.NoError(t, err) 323 taskConfig = NewTaskConfig() 324 err = taskConfig.DecodeFile(filepath) 325 require.True(t, terror.ErrConfigInvalidTaskMode.Equal(err)) 326 327 // test valid task config 328 configContent = []byte(`--- 329 name: test 330 task-mode: all 331 is-sharding: true 332 meta-schema: "dm_meta" 333 enable-heartbeat: true 334 heartbeat-update-interval: 1 335 heartbeat-report-interval: 1 336 337 target-database: 338 host: "127.0.0.1" 339 port: 4000 340 user: "root" 341 password: "" 342 343 mysql-instances: 344 - source-id: "mysql-replica-01" 345 block-allow-list: "instance" 346 mydumper-thread: 11 347 mydumper-config-name: "global" 348 loader-thread: 22 349 loader-config-name: "global" 350 syncer-thread: 33 351 syncer-config-name: "global" 352 353 - source-id: "mysql-replica-02" 354 block-allow-list: "instance" 355 mydumper-config-name: "global" 356 loader-config-name: "global" 357 syncer-config-name: "global" 358 359 - source-id: "mysql-replica-03" 360 block-allow-list: "instance" 361 mydumper-thread: 44 362 loader-thread: 55 363 syncer-thread: 66 364 365 block-allow-list: 366 instance: 367 do-dbs: ["test"] 368 369 mydumpers: 370 global: 371 threads: 4 372 chunk-filesize: 64 373 skip-tz-utc: true 374 extra-args: "-B test" 375 376 loaders: 377 global: 378 pool-size: 16 379 dir: "./dumped_data" 380 381 syncers: 382 global: 383 worker-count: 16 384 batch: 100 385 `) 386 387 err = os.WriteFile(filepath, configContent, 0o644) 388 require.NoError(t, err) 389 taskConfig = NewTaskConfig() 390 err = taskConfig.DecodeFile(filepath) 391 require.NoError(t, err) 392 require.True(t, taskConfig.IsSharding) 393 require.Equal(t, ShardPessimistic, taskConfig.ShardMode) 394 require.Equal(t, 11, taskConfig.MySQLInstances[0].Mydumper.Threads) 395 require.Equal(t, 22, taskConfig.MySQLInstances[0].Loader.PoolSize) 396 require.Equal(t, 33, taskConfig.MySQLInstances[0].Syncer.WorkerCount) 397 require.Equal(t, 4, taskConfig.MySQLInstances[1].Mydumper.Threads) 398 require.Equal(t, 16, taskConfig.MySQLInstances[1].Loader.PoolSize) 399 require.Equal(t, 16, taskConfig.MySQLInstances[1].Syncer.WorkerCount) 400 require.Equal(t, 44, taskConfig.MySQLInstances[2].Mydumper.Threads) 401 require.Equal(t, 55, taskConfig.MySQLInstances[2].Loader.PoolSize) 402 require.Equal(t, 66, taskConfig.MySQLInstances[2].Syncer.WorkerCount) 403 404 configContent = []byte(`--- 405 name: test 406 task-mode: all 407 is-sharding: true 408 shard-mode: "optimistic" 409 meta-schema: "dm_meta" 410 enable-heartbeat: true 411 heartbeat-update-interval: 1 412 heartbeat-report-interval: 1 413 414 target-database: 415 host: "127.0.0.1" 416 port: 4000 417 user: "root" 418 password: "" 419 420 mysql-instances: 421 - source-id: "mysql-replica-01" 422 - source-id: "mysql-replica-02" 423 - source-id: "mysql-replica-03" 424 425 block-allow-list: 426 instance: 427 do-dbs: ["test"] 428 429 routes: 430 route-rule-1: 431 route-rule-2: 432 route-rule-3: 433 route-rule-4: 434 435 filters: 436 filter-rule-1: 437 filter-rule-2: 438 filter-rule-3: 439 filter-rule-4: 440 `) 441 442 err = os.WriteFile(filepath, configContent, 0o644) 443 require.NoError(t, err) 444 taskConfig = NewTaskConfig() 445 err = taskConfig.DecodeFile(filepath) 446 require.Error(t, err) 447 require.True(t, taskConfig.IsSharding) 448 require.Equal(t, ShardOptimistic, taskConfig.ShardMode) 449 taskConfig.MySQLInstances[0].RouteRules = []string{"route-rule-1", "route-rule-2", "route-rule-1", "route-rule-2"} 450 taskConfig.MySQLInstances[1].FilterRules = []string{"filter-rule-1", "filter-rule-2", "filter-rule-3", "filter-rule-2"} 451 err = taskConfig.adjust() 452 require.True(t, terror.ErrConfigDuplicateCfgItem.Equal(err)) 453 require.ErrorContains(t, err, "mysql-instance(0)'s route-rules: route-rule-1, route-rule-2") 454 require.ErrorContains(t, err, "mysql-instance(1)'s filter-rules: filter-rule-2") 455 } 456 457 func TestCheckDuplicateString(t *testing.T) { 458 t.Parallel() 459 460 a := []string{"a", "b", "c", "d"} 461 dupeStrings := checkDuplicateString(a) 462 require.Len(t, dupeStrings, 0) 463 a = []string{"a", "a", "b", "b", "c", "c"} 464 dupeStrings = checkDuplicateString(a) 465 require.Len(t, dupeStrings, 3) 466 sort.Strings(dupeStrings) 467 require.Equal(t, []string{"a", "b", "c"}, dupeStrings) 468 } 469 470 func TestTaskBlockAllowList(t *testing.T) { 471 t.Parallel() 472 473 filterRules1 := &filter.Rules{ 474 DoDBs: []string{"s1"}, 475 } 476 477 filterRules2 := &filter.Rules{ 478 DoDBs: []string{"s2"}, 479 } 480 481 cfg := &TaskConfig{ 482 Name: "test", 483 TaskMode: ModeFull, 484 TargetDB: &dbconfig.DBConfig{}, 485 MySQLInstances: []*MySQLInstance{{SourceID: "source-1"}}, 486 BWList: map[string]*filter.Rules{"source-1": filterRules1}, 487 } 488 489 // BAList is nil, will set BAList = BWList 490 err := cfg.adjust() 491 require.NoError(t, err) 492 require.Equal(t, filterRules1, cfg.BAList["source-1"]) 493 494 // BAList is not nil, will not update it 495 cfg.BAList = map[string]*filter.Rules{"source-1": filterRules2} 496 err = cfg.adjust() 497 require.NoError(t, err) 498 require.Equal(t, filterRules2, cfg.BAList["source-1"]) 499 } 500 501 func wordCount(s string) map[string]int { 502 words := strings.Fields(s) 503 wordCount := make(map[string]int) 504 for i := range words { 505 wordCount[words[i]]++ 506 } 507 508 return wordCount 509 } 510 511 func TestGenAndFromSubTaskConfigs(t *testing.T) { 512 t.Parallel() 513 514 var ( 515 shardMode = ShardOptimistic 516 onlineDDL = true 517 name = "from-sub-tasks" 518 taskMode = "incremental" 519 ignoreCheckingItems = []string{VersionChecking, BinlogRowImageChecking} 520 source1 = "mysql-replica-01" 521 source2 = "mysql-replica-02" 522 metaSchema = "meta-sub-tasks" 523 heartbeatUI = 12 524 heartbeatRI = 21 525 maxAllowedPacket = 10244201 526 fromSession = map[string]string{ 527 "sql_mode": " NO_AUTO_VALUE_ON_ZERO,ANSI_QUOTES", 528 "time_zone": "+00:00", 529 } 530 toSession = map[string]string{ 531 "sql_mode": " NO_AUTO_VALUE_ON_ZERO,ANSI_QUOTES", 532 "time_zone": "+00:00", 533 } 534 security = security.Security{ 535 SSLCA: "/path/to/ca", 536 SSLCert: "/path/to/cert", 537 SSLKey: "/path/to/key", 538 CertAllowedCN: []string{"allowed-cn"}, 539 } 540 rawDBCfg = dbconfig.RawDBConfig{ 541 MaxIdleConns: 333, 542 ReadTimeout: "2m", 543 WriteTimeout: "1m", 544 } 545 routeRule1 = router.TableRule{ 546 SchemaPattern: "db*", 547 TargetSchema: "db", 548 } 549 routeRule2 = router.TableRule{ 550 SchemaPattern: "db*", 551 TablePattern: "tbl*", 552 TargetSchema: "db", 553 TargetTable: "tbl", 554 } 555 routeRule3 = router.TableRule{ 556 SchemaPattern: "schema*", 557 TargetSchema: "schema", 558 } 559 routeRule4 = router.TableRule{ 560 SchemaPattern: "schema*", 561 TablePattern: "tbs*", 562 TargetSchema: "schema", 563 TargetTable: "tbs", 564 } 565 566 filterRule1 = bf.BinlogEventRule{ 567 SchemaPattern: "db*", 568 TablePattern: "tbl1*", 569 Events: []bf.EventType{bf.CreateIndex, bf.AlterTable}, 570 Action: bf.Do, 571 } 572 filterRule2 = bf.BinlogEventRule{ 573 SchemaPattern: "db*", 574 TablePattern: "tbl2", 575 SQLPattern: []string{"^DROP\\s+PROCEDURE", "^CREATE\\s+PROCEDURE"}, 576 Action: bf.Ignore, 577 } 578 baList1 = filter.Rules{ 579 DoDBs: []string{"db1", "db2"}, 580 DoTables: []*filter.Table{ 581 {Schema: "db1", Name: "tbl1"}, 582 {Schema: "db2", Name: "tbl2"}, 583 }, 584 } 585 baList2 = filter.Rules{ 586 IgnoreDBs: []string{"bd1", "bd2"}, 587 IgnoreTables: []*filter.Table{ 588 {Schema: "bd1", Name: "lbt1"}, 589 {Schema: "bd2", Name: "lbt2"}, 590 }, 591 } 592 exprFilter1 = ExpressionFilter{ 593 Schema: "db", 594 Table: "tbl", 595 DeleteValueExpr: "state = 1", 596 } 597 validatorCfg = ValidatorConfig{Mode: ValidationNone} 598 source1DBCfg = dbconfig.DBConfig{ 599 Host: "127.0.0.1", 600 Port: 3306, 601 User: "user_from_1", 602 Password: "123", 603 MaxAllowedPacket: &maxAllowedPacket, 604 Session: fromSession, 605 Security: &security, 606 RawDBCfg: &rawDBCfg, 607 } 608 source2DBCfg = dbconfig.DBConfig{ 609 Host: "127.0.0.1", 610 Port: 3307, 611 User: "user_from_2", 612 Password: "abc", 613 MaxAllowedPacket: &maxAllowedPacket, 614 Session: fromSession, 615 Security: &security, 616 RawDBCfg: &rawDBCfg, 617 } 618 619 stCfg1 = &SubTaskConfig{ 620 IsSharding: true, 621 ShardMode: shardMode, 622 OnlineDDL: onlineDDL, 623 ShadowTableRules: []string{DefaultShadowTableRules}, 624 TrashTableRules: []string{DefaultTrashTableRules}, 625 CaseSensitive: true, 626 Name: name, 627 Mode: taskMode, 628 IgnoreCheckingItems: ignoreCheckingItems, 629 SourceID: source1, 630 MetaSchema: metaSchema, 631 HeartbeatUpdateInterval: heartbeatUI, 632 HeartbeatReportInterval: heartbeatRI, 633 EnableHeartbeat: true, 634 CollationCompatible: LooseCollationCompatible, 635 Meta: &Meta{ 636 BinLogName: "mysql-bin.000123", 637 BinLogPos: 456, 638 BinLogGTID: "1-1-12,4-4-4", 639 }, 640 From: source1DBCfg, 641 To: dbconfig.DBConfig{ 642 Host: "127.0.0.1", 643 Port: 4000, 644 User: "user_to", 645 Password: "abc", 646 MaxAllowedPacket: &maxAllowedPacket, 647 Session: toSession, 648 Security: &security, 649 RawDBCfg: &rawDBCfg, 650 }, 651 RouteRules: []*router.TableRule{&routeRule2, &routeRule1, &routeRule3}, 652 FilterRules: []*bf.BinlogEventRule{&filterRule1, &filterRule2}, 653 BAList: &baList1, 654 MydumperConfig: MydumperConfig{ 655 MydumperPath: "", 656 Threads: 16, 657 ChunkFilesize: "64", 658 StatementSize: 1000000, 659 Rows: 1024, 660 Where: "", 661 SkipTzUTC: true, 662 ExtraArgs: "--escape-backslash", 663 }, 664 LoaderConfig: LoaderConfig{ 665 PoolSize: 32, 666 Dir: "./dumped_data", 667 SortingDirPhysical: "./dumped_data", 668 ImportMode: LoadModePhysical, 669 OnDuplicateLogical: OnDuplicateReplace, 670 OnDuplicatePhysical: OnDuplicateNone, 671 ChecksumPhysical: OpLevelRequired, 672 Analyze: OpLevelOptional, 673 PDAddr: "http://test:2379", 674 RangeConcurrency: 32, 675 CompressKVPairs: "gzip", 676 }, 677 SyncerConfig: SyncerConfig{ 678 WorkerCount: 32, 679 Batch: 100, 680 QueueSize: 512, 681 CheckpointFlushInterval: 15, 682 MaxRetry: 10, 683 AutoFixGTID: true, 684 EnableGTID: true, 685 SafeMode: true, 686 SafeModeDuration: "60s", 687 }, 688 ValidatorCfg: validatorCfg, 689 CleanDumpFile: true, 690 EnableANSIQuotes: true, 691 } 692 ) 693 694 stCfg1.Experimental.AsyncCheckpointFlush = true 695 stCfg2, err := stCfg1.Clone() 696 require.NoError(t, err) 697 stCfg2.SourceID = source2 698 stCfg2.Meta = &Meta{ 699 BinLogName: "mysql-bin.000321", 700 BinLogPos: 123, 701 BinLogGTID: "1-1-21,2-2-2", 702 } 703 stCfg2.From = source2DBCfg 704 stCfg2.BAList = &baList2 705 stCfg2.RouteRules = []*router.TableRule{&routeRule4, &routeRule1, &routeRule2} 706 stCfg2.ExprFilter = []*ExpressionFilter{&exprFilter1} 707 stCfg2.ValidatorCfg.Mode = ValidationFast 708 709 cfg := SubTaskConfigsToTaskConfig(stCfg1, stCfg2) 710 711 cfg2 := TaskConfig{ 712 Name: name, 713 TaskMode: taskMode, 714 IsSharding: stCfg1.IsSharding, 715 ShardMode: shardMode, 716 IgnoreCheckingItems: ignoreCheckingItems, 717 MetaSchema: metaSchema, 718 EnableHeartbeat: stCfg1.EnableHeartbeat, 719 HeartbeatUpdateInterval: heartbeatUI, 720 HeartbeatReportInterval: heartbeatRI, 721 CaseSensitive: stCfg1.CaseSensitive, 722 TargetDB: &stCfg1.To, 723 CollationCompatible: LooseCollationCompatible, 724 MySQLInstances: []*MySQLInstance{ 725 { 726 SourceID: source1, 727 Meta: stCfg1.Meta, 728 FilterRules: []string{"filter-01", "filter-02"}, 729 ColumnMappingRules: []string{}, 730 RouteRules: []string{"route-01", "route-02", "route-03"}, 731 BWListName: "", 732 BAListName: "balist-01", 733 MydumperConfigName: "dump-01", 734 Mydumper: nil, 735 MydumperThread: 0, 736 LoaderConfigName: "load-01", 737 Loader: nil, 738 LoaderThread: 0, 739 SyncerConfigName: "sync-01", 740 Syncer: nil, 741 SyncerThread: 0, 742 743 ContinuousValidatorConfigName: "validator-01", 744 }, 745 { 746 SourceID: source2, 747 Meta: stCfg2.Meta, 748 FilterRules: []string{"filter-01", "filter-02"}, 749 ColumnMappingRules: []string{}, 750 RouteRules: []string{"route-01", "route-02", "route-04"}, 751 BWListName: "", 752 BAListName: "balist-02", 753 MydumperConfigName: "dump-01", 754 Mydumper: nil, 755 MydumperThread: 0, 756 LoaderConfigName: "load-01", 757 Loader: nil, 758 LoaderThread: 0, 759 SyncerConfigName: "sync-01", 760 Syncer: nil, 761 SyncerThread: 0, 762 ExpressionFilters: []string{"expr-filter-01"}, 763 764 ContinuousValidatorConfigName: "validator-02", 765 }, 766 }, 767 OnlineDDL: onlineDDL, 768 Routes: map[string]*router.TableRule{ 769 "route-01": &routeRule1, 770 "route-02": &routeRule2, 771 "route-03": &routeRule3, 772 "route-04": &routeRule4, 773 }, 774 Filters: map[string]*bf.BinlogEventRule{ 775 "filter-01": &filterRule1, 776 "filter-02": &filterRule2, 777 }, 778 ColumnMappings: nil, 779 BWList: nil, 780 BAList: map[string]*filter.Rules{ 781 "balist-01": &baList1, 782 "balist-02": &baList2, 783 }, 784 Mydumpers: map[string]*MydumperConfig{ 785 "dump-01": &stCfg1.MydumperConfig, 786 }, 787 Loaders: map[string]*LoaderConfig{ 788 "load-01": &stCfg1.LoaderConfig, 789 }, 790 Syncers: map[string]*SyncerConfig{ 791 "sync-01": &stCfg1.SyncerConfig, 792 }, 793 ExprFilter: map[string]*ExpressionFilter{ 794 "expr-filter-01": &exprFilter1, 795 }, 796 Validators: map[string]*ValidatorConfig{ 797 "validator-01": &validatorCfg, 798 "validator-02": { 799 Mode: ValidationFast, 800 }, 801 }, 802 CleanDumpFile: stCfg1.CleanDumpFile, 803 } 804 cfg2.Experimental.AsyncCheckpointFlush = true 805 806 require.Equal(t, wordCount(cfg.String()), wordCount(cfg2.String())) // since rules are unordered, so use wordCount to compare 807 808 require.NoError(t, cfg.adjust()) 809 stCfgs, err := TaskConfigToSubTaskConfigs(cfg, map[string]dbconfig.DBConfig{source1: source1DBCfg, source2: source2DBCfg}) 810 require.NoError(t, err) 811 // revert ./dumpped_data.from-sub-tasks 812 stCfgs[0].LoaderConfig.Dir = stCfg1.LoaderConfig.Dir 813 stCfgs[1].LoaderConfig.Dir = stCfg2.LoaderConfig.Dir 814 // fix empty list and nil 815 require.Len(t, stCfgs[0].ColumnMappingRules, 0) 816 require.Len(t, stCfg1.ColumnMappingRules, 0) 817 require.Len(t, stCfgs[1].ColumnMappingRules, 0) 818 require.Len(t, stCfg2.ColumnMappingRules, 0) 819 require.Len(t, stCfgs[0].ExprFilter, 0) 820 require.Len(t, stCfg1.ExprFilter, 0) 821 stCfgs[0].ColumnMappingRules = stCfg1.ColumnMappingRules 822 stCfgs[1].ColumnMappingRules = stCfg2.ColumnMappingRules 823 stCfgs[0].ExprFilter = stCfg1.ExprFilter 824 // deprecated config will not recover 825 stCfgs[0].EnableANSIQuotes = stCfg1.EnableANSIQuotes 826 stCfgs[1].EnableANSIQuotes = stCfg2.EnableANSIQuotes 827 // some features are disabled 828 require.True(t, stCfg1.EnableHeartbeat) 829 require.True(t, stCfg2.EnableHeartbeat) 830 stCfg1.EnableHeartbeat = false 831 stCfg2.EnableHeartbeat = false 832 require.Equal(t, stCfg1.String(), stCfgs[0].String()) 833 require.Equal(t, stCfg2.String(), stCfgs[1].String()) 834 // adjust loader config 835 stCfg1.Mode = "full" 836 require.NoError(t, stCfg1.Adjust(false)) 837 require.Equal(t, stCfgs[0].SortingDirPhysical, stCfg1.SortingDirPhysical) 838 } 839 840 func TestMetaVerify(t *testing.T) { 841 t.Parallel() 842 843 var m *Meta 844 require.NoError(t, m.Verify()) // nil meta is fine (for not incremental task mode) 845 846 // none 847 m = &Meta{} 848 require.True(t, terror.ErrConfigMetaInvalid.Equal(m.Verify())) 849 850 // only `binlog-name`. 851 m = &Meta{ 852 BinLogName: "mysql-bin.000123", 853 } 854 require.NoError(t, m.Verify()) 855 856 // only `binlog-pos`. 857 m = &Meta{ 858 BinLogPos: 456, 859 } 860 require.True(t, terror.ErrConfigMetaInvalid.Equal(m.Verify())) 861 862 // only `binlog-gtid`. 863 m = &Meta{ 864 BinLogGTID: "1-1-12,4-4-4", 865 } 866 require.NoError(t, m.Verify()) 867 868 // all 869 m = &Meta{ 870 BinLogName: "mysql-bin.000123", 871 BinLogPos: 456, 872 BinLogGTID: "1-1-12,4-4-4", 873 } 874 require.NoError(t, m.Verify()) 875 } 876 877 func TestMySQLInstance(t *testing.T) { 878 t.Parallel() 879 880 var m *MySQLInstance 881 cfgName := "test" 882 err := m.VerifyAndAdjust() 883 require.True(t, terror.ErrConfigMySQLInstNotFound.Equal(err)) 884 885 m = &MySQLInstance{} 886 err = m.VerifyAndAdjust() 887 require.True(t, terror.ErrConfigEmptySourceID.Equal(err)) 888 m.SourceID = "123" 889 890 m.Mydumper = &MydumperConfig{} 891 m.MydumperConfigName = cfgName 892 err = m.VerifyAndAdjust() 893 require.True(t, terror.ErrConfigMydumperCfgConflict.Equal(err)) 894 m.MydumperConfigName = "" 895 896 m.Loader = &LoaderConfig{} 897 m.LoaderConfigName = cfgName 898 err = m.VerifyAndAdjust() 899 require.True(t, terror.ErrConfigLoaderCfgConflict.Equal(err)) 900 m.Loader = nil 901 902 m.Syncer = &SyncerConfig{} 903 m.SyncerConfigName = cfgName 904 err = m.VerifyAndAdjust() 905 require.True(t, terror.ErrConfigSyncerCfgConflict.Equal(err)) 906 m.SyncerConfigName = "" 907 908 require.NoError(t, m.VerifyAndAdjust()) 909 } 910 911 func TestAdjustTargetDBConfig(t *testing.T) { 912 t.Parallel() 913 914 testCases := []struct { 915 dbConfig dbconfig.DBConfig 916 result dbconfig.DBConfig 917 version *semver.Version 918 }{ 919 { 920 dbconfig.DBConfig{}, 921 dbconfig.DBConfig{Session: map[string]string{}}, 922 semver.New("0.0.0"), 923 }, 924 { 925 dbconfig.DBConfig{Session: map[string]string{"SQL_MODE": "ANSI_QUOTES"}}, 926 dbconfig.DBConfig{Session: map[string]string{"sql_mode": "ANSI_QUOTES"}}, 927 semver.New("2.0.7"), 928 }, 929 { 930 dbconfig.DBConfig{}, 931 dbconfig.DBConfig{Session: map[string]string{tidbTxnMode: tidbTxnOptimistic}}, 932 semver.New("3.0.1"), 933 }, 934 { 935 dbconfig.DBConfig{Session: map[string]string{"SQL_MODE": "", tidbTxnMode: "pessimistic"}}, 936 dbconfig.DBConfig{Session: map[string]string{"sql_mode": "", tidbTxnMode: "pessimistic"}}, 937 semver.New("4.0.0-beta.2"), 938 }, 939 } 940 941 for _, tc := range testCases { 942 AdjustTargetDBSessionCfg(&tc.dbConfig, tc.version) 943 require.Equal(t, tc.result, tc.dbConfig) 944 } 945 } 946 947 func TestDefaultConfig(t *testing.T) { 948 t.Parallel() 949 950 cfg := NewTaskConfig() 951 cfg.Name = "test" 952 cfg.TaskMode = ModeAll 953 cfg.TargetDB = &dbconfig.DBConfig{} 954 cfg.MySQLInstances = append(cfg.MySQLInstances, &MySQLInstance{SourceID: "source1"}) 955 require.NoError(t, cfg.adjust()) 956 require.Equal(t, DefaultMydumperConfig(), *cfg.MySQLInstances[0].Mydumper) 957 958 cfg.MySQLInstances[0].Mydumper = &MydumperConfig{MydumperPath: "test"} 959 require.NoError(t, cfg.adjust()) 960 require.Equal(t, defaultChunkFilesize, cfg.MySQLInstances[0].Mydumper.ChunkFilesize) 961 } 962 963 func TestExclusiveAndWrongExprFilterFields(t *testing.T) { 964 t.Parallel() 965 966 cfg := NewTaskConfig() 967 cfg.Name = "test" 968 cfg.TaskMode = ModeAll 969 cfg.TargetDB = &dbconfig.DBConfig{} 970 cfg.MySQLInstances = append(cfg.MySQLInstances, &MySQLInstance{SourceID: "source1"}) 971 require.NoError(t, cfg.adjust()) 972 973 cfg.ExprFilter["test-insert"] = &ExpressionFilter{ 974 Schema: "db", 975 Table: "tbl", 976 InsertValueExpr: "a > 1", 977 } 978 cfg.ExprFilter["test-update-only-old"] = &ExpressionFilter{ 979 Schema: "db", 980 Table: "tbl", 981 UpdateOldValueExpr: "a > 1", 982 } 983 cfg.ExprFilter["test-update-only-new"] = &ExpressionFilter{ 984 Schema: "db", 985 Table: "tbl", 986 UpdateNewValueExpr: "a > 1", 987 } 988 cfg.ExprFilter["test-update"] = &ExpressionFilter{ 989 Schema: "db", 990 Table: "tbl", 991 UpdateOldValueExpr: "a > 1", 992 UpdateNewValueExpr: "a > 1", 993 } 994 cfg.ExprFilter["test-delete"] = &ExpressionFilter{ 995 Schema: "db", 996 Table: "tbl", 997 DeleteValueExpr: "a > 1", 998 } 999 cfg.MySQLInstances[0].ExpressionFilters = []string{ 1000 "test-insert", 1001 "test-update-only-old", 1002 "test-update-only-new", 1003 "test-update", 1004 "test-delete", 1005 } 1006 require.NoError(t, cfg.adjust()) 1007 1008 cfg.ExprFilter["both-field"] = &ExpressionFilter{ 1009 Schema: "db", 1010 Table: "tbl", 1011 InsertValueExpr: "a > 1", 1012 DeleteValueExpr: "a > 1", 1013 } 1014 cfg.MySQLInstances[0].ExpressionFilters = append(cfg.MySQLInstances[0].ExpressionFilters, "both-field") 1015 err := cfg.adjust() 1016 require.True(t, terror.ErrConfigExprFilterManyExpr.Equal(err)) 1017 1018 delete(cfg.ExprFilter, "both-field") 1019 cfg.ExprFilter["wrong"] = &ExpressionFilter{ 1020 Schema: "db", 1021 Table: "tbl", 1022 DeleteValueExpr: "a >", 1023 } 1024 length := len(cfg.MySQLInstances[0].ExpressionFilters) 1025 cfg.MySQLInstances[0].ExpressionFilters[length-1] = "wrong" 1026 err = cfg.adjust() 1027 require.True(t, terror.ErrConfigExprFilterWrongGrammar.Equal(err)) 1028 } 1029 1030 func TestTaskConfigForDowngrade(t *testing.T) { 1031 t.Parallel() 1032 1033 cfg := NewTaskConfig() 1034 err := cfg.FromYaml(correctTaskConfig) 1035 require.NoError(t, err) 1036 1037 cfgForDowngrade := NewTaskConfigForDowngrade(cfg) 1038 1039 // make sure all new field were added 1040 cfgReflect := reflect.Indirect(reflect.ValueOf(cfg)) 1041 cfgForDowngradeReflect := reflect.Indirect(reflect.ValueOf(cfgForDowngrade)) 1042 // without flag, collation_compatible, experimental, validator 1043 require.Equal(t, cfgForDowngradeReflect.NumField()+4, cfgReflect.NumField()) 1044 1045 // make sure all field were copied 1046 cfgForClone := &TaskConfigForDowngrade{} 1047 Clone(cfgForClone, cfg) 1048 require.Equal(t, cfgForClone, cfgForDowngrade) 1049 } 1050 1051 // Clone clones src to dest. 1052 func Clone(dest, src interface{}) { 1053 cloneValues(reflect.ValueOf(dest), reflect.ValueOf(src)) 1054 } 1055 1056 // cloneValues clone src to dest recursively. 1057 // Note: pointer still use shallow copy. 1058 func cloneValues(dest, src reflect.Value) { 1059 destType := dest.Type() 1060 srcType := src.Type() 1061 if destType.Kind() == reflect.Ptr { 1062 destType = destType.Elem() 1063 } 1064 if srcType.Kind() == reflect.Ptr { 1065 srcType = srcType.Elem() 1066 } 1067 1068 if destType.Kind() == reflect.Map { 1069 destMap := reflect.MakeMap(destType) 1070 for _, k := range src.MapKeys() { 1071 if src.MapIndex(k).Type().Kind() == reflect.Ptr { 1072 newVal := reflect.New(destType.Elem().Elem()) 1073 cloneValues(newVal, src.MapIndex(k)) 1074 destMap.SetMapIndex(k, newVal) 1075 } else { 1076 cloneValues(destMap.MapIndex(k).Addr(), src.MapIndex(k).Addr()) 1077 } 1078 } 1079 dest.Set(destMap) 1080 return 1081 } 1082 1083 if destType.Kind() == reflect.Slice { 1084 slice := reflect.MakeSlice(destType, src.Len(), src.Cap()) 1085 for i := 0; i < src.Len(); i++ { 1086 if slice.Index(i).Type().Kind() == reflect.Ptr { 1087 newVal := reflect.New(slice.Index(i).Type().Elem()) 1088 cloneValues(newVal, src.Index(i)) 1089 slice.Index(i).Set(newVal) 1090 } else { 1091 cloneValues(slice.Index(i).Addr(), src.Index(i).Addr()) 1092 } 1093 } 1094 dest.Set(slice) 1095 return 1096 } 1097 1098 destFieldsMap := map[string]int{} 1099 for i := 0; i < destType.NumField(); i++ { 1100 destFieldsMap[destType.Field(i).Name] = i 1101 } 1102 for i := 0; i < srcType.NumField(); i++ { 1103 if j, ok := destFieldsMap[srcType.Field(i).Name]; ok { 1104 destField := dest.Elem().Field(j) 1105 srcField := src.Elem().Field(i) 1106 destFieldType := destField.Type() 1107 srcFieldType := srcField.Type() 1108 if destFieldType.Kind() == reflect.Ptr { 1109 destFieldType = destFieldType.Elem() 1110 } 1111 if srcFieldType.Kind() == reflect.Ptr { 1112 srcFieldType = srcFieldType.Elem() 1113 } 1114 if destFieldType != srcFieldType { 1115 cloneValues(destField, srcField) 1116 } else { 1117 destField.Set(srcField) 1118 } 1119 } 1120 } 1121 } 1122 1123 func TestLoadConfigAdjust(t *testing.T) { 1124 t.Parallel() 1125 1126 cfg := &LoaderConfig{} 1127 require.NoError(t, cfg.adjust()) 1128 require.Equal(t, &LoaderConfig{ 1129 PoolSize: 16, 1130 Dir: "", 1131 SQLMode: "", 1132 ImportMode: "logical", 1133 OnDuplicate: "", 1134 OnDuplicateLogical: "replace", 1135 OnDuplicatePhysical: "none", 1136 ChecksumPhysical: "required", 1137 Analyze: "optional", 1138 }, cfg) 1139 1140 // test deprecated OnDuplicate will write to OnDuplicateLogical 1141 cfg.OnDuplicate = "replace" 1142 cfg.OnDuplicateLogical = "" 1143 require.NoError(t, cfg.adjust()) 1144 require.Equal(t, OnDuplicateReplace, cfg.OnDuplicateLogical) 1145 1146 // test wrong value 1147 cfg.OnDuplicatePhysical = "wrong" 1148 err := cfg.adjust() 1149 require.True(t, terror.ErrConfigInvalidPhysicalDuplicateResolution.Equal(err)) 1150 } 1151 1152 func TestTaskYamlForDowngrade(t *testing.T) { 1153 originCfg := TaskConfig{ 1154 Name: "test", 1155 TaskMode: ModeFull, 1156 MySQLInstances: []*MySQLInstance{ 1157 { 1158 SourceID: "mysql-3306", 1159 }, 1160 }, 1161 TargetDB: &dbconfig.DBConfig{ 1162 Password: "123456", 1163 }, 1164 } 1165 // when secret key is empty, the password should be kept 1166 content, err := originCfg.YamlForDowngrade() 1167 require.NoError(t, err) 1168 newCfg := &TaskConfig{} 1169 require.NoError(t, newCfg.FromYaml(content)) 1170 require.Equal(t, originCfg.TargetDB.Password, newCfg.TargetDB.Password) 1171 1172 // when secret key is not empty, the password should be encrypted 1173 key := make([]byte, 32) 1174 _, err = rand.Read(key) 1175 require.NoError(t, err) 1176 t.Cleanup(func() { 1177 encrypt.InitCipher(nil) 1178 }) 1179 encrypt.InitCipher(key) 1180 content, err = originCfg.YamlForDowngrade() 1181 require.NoError(t, err) 1182 newCfg = &TaskConfig{} 1183 require.NoError(t, newCfg.FromYaml(content)) 1184 require.NotEqual(t, originCfg.TargetDB.Password, newCfg.TargetDB.Password) 1185 decryptedPass, err := utils.Decrypt(newCfg.TargetDB.Password) 1186 require.NoError(t, err) 1187 require.Equal(t, originCfg.TargetDB.Password, decryptedPass) 1188 }