github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/tests/shardddl1/run.sh (about)

     1  #!/bin/bash
     2  
     3  set -eu
     4  
     5  cur=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
     6  source $cur/../_utils/test_prepare
     7  WORK_DIR=$TEST_DIR/$TEST_NAME
     8  source $cur/../_utils/shardddl_lib.sh
     9  
    10  function DM_001_CASE() {
    11  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column new_col1 int;"
    12  	run_sql_source1 "alter table ${shardddl1}.${tb2} add column new_col1 int;"
    13  	# schema tracker could track per table without error
    14  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
    15  		"query-status test" \
    16  		"\"result\": true" 2 \
    17  		"\"synced\": true" 1
    18  	# only downstream sees a duplicate error, but currently ignored by DM
    19  	check_log_contain_with_retry "Duplicate column name 'new_col1'" $WORK_DIR/worker1/log/dm-worker.log $WORK_DIR/worker2/log/dm-worker.log
    20  }
    21  
    22  function DM_001() {
    23  	run_case 001 "single-source-no-sharding" "init_table 111 112" "clean_table" ""
    24  }
    25  
    26  function DM_002_CASE() {
    27  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column new_col1 int;"
    28  	run_sql_source1 "alter table ${shardddl1}.${tb2} add column new_col1 int;"
    29  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
    30  }
    31  
    32  function DM_002() {
    33  	run_case 002 "single-source-pessimistic" "init_table 111 112" "clean_table" ""
    34  }
    35  
    36  function DM_003_CASE() {
    37  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column new_col1 int;"
    38  	run_sql_source1 "insert into ${shardddl1}.${tb1} values (1,1)"
    39  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml 3 "fail"
    40  	run_sql_source1 "alter table ${shardddl1}.${tb2} add column new_col1 int;"
    41  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
    42  }
    43  
    44  function DM_003() {
    45  	run_case 003 "single-source-pessimistic" "init_table 111 112" "clean_table" "pessimistic"
    46  }
    47  
    48  function DM_004_CASE() {
    49  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column new_col1 int;"
    50  	run_sql_source1 "insert into ${shardddl1}.${tb1} values (1,1)"
    51  	run_sql_tidb_with_retry "select count(1) from ${shardddl}.${tb};" "count(1): 1"
    52  	run_sql_source1 "alter table ${shardddl1}.${tb2} add column new_col1 int;"
    53  	run_sql_source1 "insert into ${shardddl1}.${tb2} values (2,2)"
    54  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
    55  }
    56  
    57  function DM_004() {
    58  	run_case 004 "single-source-optimistic" "init_table 111 112" "clean_table" "optimistic"
    59  }
    60  
    61  function DM_005_CASE() {
    62  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column new_col1 int;"
    63  	run_sql_source1 "insert into ${shardddl1}.${tb1} values (1,1)"
    64  	run_sql_source1 "alter table ${shardddl1}.${tb2} add column new_col1 int;"
    65  	run_sql_source1 "insert into ${shardddl1}.${tb2} values (2,2)"
    66  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
    67  }
    68  
    69  function DM_005() {
    70  	run_case 005 "single-source-pessimistic" "init_table 111 112" "clean_table" ""
    71  }
    72  
    73  function DM_RENAME_TABLE_CASE() {
    74  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(1);"
    75  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(2);"
    76  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(3);"
    77  
    78  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column a int;"
    79  	run_sql_source2 "alter table ${shardddl1}.${tb1} add column a int;"
    80  	run_sql_source2 "alter table ${shardddl1}.${tb2} add column a int;"
    81  
    82  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(4,4);"
    83  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(5,5);"
    84  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(6,6);"
    85  
    86  	run_sql_source1 "rename table ${shardddl1}.${tb1} to ${shardddl1}.${tb3};"
    87  	run_sql_source2 "rename table ${shardddl1}.${tb1} to ${shardddl1}.${tb3};"
    88  	run_sql_source2 "rename table ${shardddl1}.${tb2} to ${shardddl1}.${tb4};"
    89  
    90  	run_sql_source1 "insert into ${shardddl1}.${tb3} values(7,7)"
    91  	run_sql_source2 "insert into ${shardddl1}.${tb3} values(8,8);"
    92  	run_sql_source2 "insert into ${shardddl1}.${tb4} values(9,9);"
    93  
    94  	run_sql_source1 "alter table ${shardddl1}.${tb3} add column b int;"
    95  	run_sql_source2 "alter table ${shardddl1}.${tb3} add column b int;"
    96  	run_sql_source2 "alter table ${shardddl1}.${tb4} add column b int;"
    97  
    98  	run_sql_source1 "insert into ${shardddl1}.${tb3} values(10,10,10)"
    99  	run_sql_source2 "insert into ${shardddl1}.${tb3} values(11,11,11);"
   100  	run_sql_source2 "insert into ${shardddl1}.${tb4} values(12,12,12);"
   101  
   102  	if [[ "$1" = "pessimistic" ]]; then
   103  		check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   104  	else
   105  		run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   106  			"query-status test" \
   107  			"\`RENAME TABLE\` statement not supported in $1 mode" 2
   108  	fi
   109  }
   110  
   111  function DM_RENAME_TABLE() {
   112  	run_case RENAME_TABLE "double-source-pessimistic" "init_table 111 211 212" "clean_table" "pessimistic"
   113  	run_case RENAME_TABLE "double-source-optimistic" "init_table 111 211 212" "clean_table" "optimistic"
   114  }
   115  
   116  function DM_RENAME_COLUMN_OPTIMISTIC_CASE() {
   117  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(1,'aaa');"
   118  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(2,'bbb');"
   119  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(3,'ccc');"
   120  
   121  	run_sql_source1 "alter table ${shardddl1}.${tb1} change a c int;"
   122  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(4,'ddd');"
   123  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(5,'eee');"
   124  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(6,'fff');"
   125  
   126  	run_sql_source2 "alter table ${shardddl1}.${tb1} change a c int;"
   127  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(7,'ggg');"
   128  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(8,'hhh');"
   129  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(9,'iii');"
   130  
   131  	run_sql_source2 "alter table ${shardddl1}.${tb2} change a c int;"
   132  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(10,'jjj');"
   133  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(11,'kkk');"
   134  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(12,'lll');"
   135  
   136  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   137  		"query-status test" \
   138  		"Running" 3
   139  
   140  	# now, it works as normal
   141  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column d int;"
   142  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(13,'mmm',13);"
   143  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(14,'nnn');"
   144  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(15,'ooo');"
   145  
   146  	run_sql_source2 "alter table ${shardddl1}.${tb1} add column d int;"
   147  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(16,'ppp',16);"
   148  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(17,'qqq',17);"
   149  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(18,'rrr');"
   150  
   151  	run_sql_source2 "alter table ${shardddl1}.${tb2} add column d int;"
   152  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(19,'sss',19);"
   153  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(20,'ttt',20);"
   154  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(21,'uuu',21);"
   155  
   156  	# insert 3 recorde to make sure optimistic mode sharding resolve can finish fast
   157  	sleep 3
   158  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(22,'vvv',22);"
   159  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(23,'www',23);"
   160  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(24,'xxx',24);"
   161  
   162  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   163  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   164  		"query-status test" \
   165  		"\"result\": true" 3
   166  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   167  		"shard-ddl-lock" \
   168  		"no DDL lock exists" 1
   169  }
   170  
   171  # workaround of rename column in optimistic mode currently until we support it
   172  # maybe also work for some other unsupported ddls in optimistic mode
   173  function DM_RENAME_COLUMN_OPTIMISTIC() {
   174  	run_case RENAME_COLUMN_OPTIMISTIC "double-source-optimistic" \
   175  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, b varchar(10)) DEFAULT CHARSET=latin1 COLLATE=latin1_bin;\"; \
   176       run_sql_source2 \"create table ${shardddl1}.${tb1} (a int primary key, b varchar(10)) DEFAULT CHARSET=latin1 COLLATE=latin1_bin;\"; \
   177       run_sql_source2 \"create table ${shardddl1}.${tb2} (a int primary key, b varchar(10)) DEFAULT CHARSET=latin1 COLLATE=latin1_bin;\"" \
   178  		"clean_table" "optimistic"
   179  }
   180  
   181  function DM_RemoveLock_CASE() {
   182  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(1,'aaa');"
   183  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(2,'bbb');"
   184  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(3,'ccc');"
   185  
   186  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column c double;"
   187  	run_sql_source2 "alter table ${shardddl1}.${tb1} add column c double;"
   188  	run_sql_source2 "alter table ${shardddl1}.${tb2} add column c double;"
   189  	check_log_contain_with_retry "wait new ddl info putted into etcd in ${1}" $WORK_DIR/master/log/dm-master.log
   190  	check_metric_not_contains $MASTER_PORT "dm_master_shard_ddl_error" 3
   191  	run_sql_source1 "alter table ${shardddl1}.${tb1} drop column b;"
   192  
   193  	if [[ "$1" = "pessimistic" ]]; then
   194  		check_log_contain_with_retry "found new DDL info" $WORK_DIR/master/log/dm-master.log
   195  	else
   196  		check_log_contain_with_retry "fail to delete shard DDL infos and lock operations" $WORK_DIR/master/log/dm-master.log
   197  	fi
   198  
   199  	run_sql_source1 "alter table ${shardddl1}.${tb1} change a a bigint default 10;"
   200  	run_sql_source2 "alter table ${shardddl1}.${tb1} drop column b;"
   201  	run_sql_source2 "alter table ${shardddl1}.${tb1} change a a bigint default 10;"
   202  	run_sql_source2 "alter table ${shardddl1}.${tb2} drop column b;"
   203  	run_sql_source2 "alter table ${shardddl1}.${tb2} change a a bigint default 10;"
   204  
   205  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   206  }
   207  
   208  function DM_RemoveLock() {
   209  	kill_process dm-master
   210  	check_master_port_offline 1
   211  	export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/master/shardddl/SleepWhenRemoveLock=return(30)"
   212  	run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml
   213  	check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT
   214  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   215  		"list-member -w" \
   216  		"bound" 2
   217  
   218  	run_case RemoveLock "double-source-pessimistic" \
   219  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, b varchar(10));\"; \
   220       run_sql_source2 \"create table ${shardddl1}.${tb1} (a int primary key, b varchar(10));\"; \
   221       run_sql_source2 \"create table ${shardddl1}.${tb2} (a int primary key, b varchar(10));\"" \
   222  		"clean_table" "pessimistic"
   223  	run_case RemoveLock "double-source-optimistic" \
   224  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, b varchar(10));\"; \
   225       run_sql_source2 \"create table ${shardddl1}.${tb1} (a int primary key, b varchar(10));\"; \
   226       run_sql_source2 \"create table ${shardddl1}.${tb2} (a int primary key, b varchar(10));\"" \
   227  		"clean_table" "optimistic"
   228  
   229  	export GO_FAILPOINTS=""
   230  	kill_process dm-master
   231  	check_master_port_offline 1
   232  	run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml
   233  	check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT
   234  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   235  		"list-member -w" \
   236  		"bound" 2
   237  }
   238  
   239  function DM_RestartMaster_CASE() {
   240  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(1,'aaa');"
   241  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(2,'bbb');"
   242  
   243  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   244  
   245  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column c double;"
   246  	if [[ "$1" = "pessimistic" ]]; then
   247  		check_log_contain_with_retry 'putted shard DDL info.*ADD COLUMN' \
   248  			$WORK_DIR/worker1/log/dm-worker.log $WORK_DIR/worker2/log/dm-worker.log
   249  	else
   250  		check_log_contain_with_retry 'finish to handle ddls in optimistic shard mode.*add column' \
   251  			$WORK_DIR/worker1/log/dm-worker.log $WORK_DIR/worker2/log/dm-worker.log
   252  	fi
   253  
   254  	run_sql_source2 "alter table ${shardddl1}.${tb1} add column c text;"
   255  
   256  	if [[ "$1" = "pessimistic" ]]; then
   257  		# count of 2: `blockingDDLs` and `unresolvedGroups`
   258  		run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   259  			"query-status test" \
   260  			'ALTER TABLE `shardddl`.`tb` ADD COLUMN `c` DOUBLE' 2 \
   261  			'ALTER TABLE `shardddl`.`tb` ADD COLUMN `c` TEXT' 2
   262  		run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   263  			"shard-ddl-lock" \
   264  			'ALTER TABLE `shardddl`.`tb` ADD COLUMN `c`' 1
   265  	else
   266  		run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   267  			"query-status test" \
   268  			'ALTER TABLE `shardddl`.`tb` ADD COLUMN `c` TEXT' 1 \
   269  			"\"${SOURCE_ID2}-\`${shardddl1}\`.\`${tb1}\`\"" 1
   270  		run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   271  			"shard-ddl-lock" \
   272  			'mysql-replica-01-`shardddl1`.`tb1`' 1 \
   273  			'mysql-replica-02-`shardddl1`.`tb1`' 2 \
   274  			'ALTER TABLE `shardddl`.`tb` ADD COLUMN `c` TEXT' 1
   275  	fi
   276  
   277  	restart_master
   278  
   279  	if [[ "$1" = "pessimistic" ]]; then
   280  		run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   281  			"query-status test" \
   282  			'ALTER TABLE `shardddl`.`tb` ADD COLUMN `c` DOUBLE' 2 \
   283  			'ALTER TABLE `shardddl`.`tb` ADD COLUMN `c` TEXT' 2
   284  		run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   285  			"shard-ddl-lock" \
   286  			'ALTER TABLE `shardddl`.`tb` ADD COLUMN `c`' 1
   287  	else
   288  		run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   289  			"query-status test" \
   290  			'ALTER TABLE `shardddl`.`tb` ADD COLUMN `c` TEXT' 1 \
   291  			"\"${SOURCE_ID2}-\`${shardddl1}\`.\`${tb1}\`\"" 1
   292  		run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   293  			"shard-ddl-lock" \
   294  			'mysql-replica-01-`shardddl1`.`tb1`' 1 \
   295  			'mysql-replica-02-`shardddl1`.`tb1`' 2 \
   296  			'ALTER TABLE `shardddl`.`tb` ADD COLUMN `c` TEXT' 1
   297  	fi
   298  }
   299  
   300  function DM_RestartMaster() {
   301  	run_case RestartMaster "double-source-pessimistic" \
   302  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, b varchar(10));\"; \
   303       run_sql_source2 \"create table ${shardddl1}.${tb1} (a int primary key, b varchar(10));\"" \
   304  		"clean_table" "pessimistic"
   305  
   306  	run_case RestartMaster "double-source-optimistic" \
   307  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, b varchar(10));\"; \
   308       run_sql_source2 \"create table ${shardddl1}.${tb1} (a int primary key, b varchar(10));\"" \
   309  		"clean_table" "optimistic"
   310  }
   311  
   312  function DM_UpdateBARule_CASE() {
   313  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(1);"
   314  	run_sql_source1 "insert into ${shardddl2}.${tb1} values(2);"
   315  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(3);"
   316  	run_sql_source2 "insert into ${shardddl2}.${tb1} values(4);"
   317  
   318  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column new_col1 int"
   319  	run_sql_source1 "alter table ${shardddl2}.${tb1} add column new_col1 int"
   320  	run_sql_source2 "alter table ${shardddl1}.${tb1} add column new_col1 int"
   321  	run_sql_source2 "alter table ${shardddl2}.${tb1} add column new_col1 int"
   322  
   323  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(5,5);"
   324  	run_sql_source1 "insert into ${shardddl2}.${tb1} values(6,6);"
   325  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(7,7);"
   326  	run_sql_source2 "insert into ${shardddl2}.${tb1} values(8,8);"
   327  
   328  	# source1 db2.tb1 add column and then drop column
   329  	run_sql_source1 "alter table ${shardddl2}.${tb1} add column new_col2 int"
   330  	run_sql_source1 "insert into ${shardddl2}.${tb1} values(9,9,9);"
   331  	run_sql_source1 "alter table ${shardddl2}.${tb1} drop column new_col2"
   332  	run_sql_source1 "insert into ${shardddl2}.${tb1} values(10,10);"
   333  
   334  	# source1 db1.tb1 add column
   335  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column new_col3 int"
   336  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(11,11,11);"
   337  
   338  	# source2 db1.tb1 drop column
   339  	run_sql_source2 "alter table ${shardddl1}.${tb1} drop column new_col1"
   340  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(12);"
   341  
   342  	# source2 db2.tb1, source1 db2.tb1 do unsupported DDLs and provoke a conflict
   343  	# source2 db2.tb1 do a rename ddl, should not provoke a conflict
   344  	run_sql_source2 "alter table ${shardddl2}.${tb1} change id new_id int;"
   345  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   346  		"query-status test" \
   347  		"Running" 3
   348  	# source1 db2.tb1 do a different add column not null ddl, should provoke a conflict
   349  	run_sql_source1 "alter table ${shardddl2}.${tb1} add column new_col4 int not null;"
   350  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   351  		"query-status test" \
   352  		"fail to try sync the optimistic shard ddl lock" 1
   353  
   354  	# user found error and then change block-allow-list, restart task
   355  	run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   356  		"stop-task test" \
   357  		"\"result\": true" 3
   358  
   359  	cp $cur/conf/double-source-optimistic.yaml $WORK_DIR/task.yaml
   360  	sed -i 's/do-dbs: \["shardddl1","shardddl2"\]/do-dbs: \["shardddl1"\]/g' $WORK_DIR/task.yaml
   361  	echo 'ignore-checking-items: ["schema_of_shard_tables"]' >>$WORK_DIR/task.yaml
   362  
   363  	# source1: db1.tb1(id,new_col1,new_col3)
   364  	# source2: db1.tb1(id)
   365  	run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   366  		"start-task $WORK_DIR/task.yaml" \
   367  		"\"result\": true" 3
   368  
   369  	# no lock exist when task begin
   370  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   371  		"show-ddl-locks" \
   372  		"no DDL lock exists" 1
   373  
   374  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(13,13,13);"
   375  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(14);"
   376  	run_sql_tidb_with_retry "select count(1) from ${shardddl}.${tb};" "count(1): 14"
   377  
   378  	restart_master
   379  
   380  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   381  		"show-ddl-locks" \
   382  		"no DDL lock exists" 1
   383  
   384  	run_sql_source1 "alter table ${shardddl1}.${tb1} drop column new_col1"
   385  	run_sql_source2 "alter table ${shardddl1}.${tb1} add column new_col3 int"
   386  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(15,15);"
   387  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(16,16);"
   388  	run_sql_tidb_with_retry "select count(1) from ${shardddl}.${tb};" "count(1): 16"
   389  
   390  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   391  		"query-status test" \
   392  		"\"result\": true" 3
   393  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   394  		"show-ddl-locks" \
   395  		"no DDL lock exists" 1
   396  }
   397  
   398  function DM_UpdateBARule() {
   399  	run_case UpdateBARule "double-source-optimistic" "init_table 111 121 211 221" "clean_table" "optimistic"
   400  }
   401  
   402  function DM_ADD_DROP_COLUMNS_CASE() {
   403  	# add cols
   404  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column col1 int, add column col2 int, add column col3 int;"
   405  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(1,now(),1,1,1);"
   406  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(2,now());"
   407  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(3,now());"
   408  	run_sql_source2 "alter table ${shardddl1}.${tb1} add column col1 int, add column col2 int, add column col3 int;"
   409  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(4,now(),4,4,4);"
   410  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(5,now(),5,5,5);"
   411  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(6,now());"
   412  	run_sql_source2 "alter table ${shardddl1}.${tb2} add column col1 int, add column col2 int, add column col3 int;"
   413  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(7,now(),7,7,7);"
   414  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(8,now(),8,8,8);"
   415  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(9,now(),9,9,9);"
   416  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   417  
   418  	# drop cols
   419  	run_sql_source1 "alter table ${shardddl1}.${tb1} drop column col1, drop column col2;"
   420  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(11,now(),11);"
   421  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(12,now(),12,12,12);"
   422  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(13,now(),13,13,13);"
   423  	run_sql_source2 "alter table ${shardddl1}.${tb1} drop column col1, drop column col2;"
   424  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(14,now(),14);"
   425  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(15,now(),15);"
   426  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(16,now(),16,16,16);"
   427  	run_sql_source2 "alter table ${shardddl1}.${tb2} drop column col1, drop column col2;"
   428  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(17,now(),17);"
   429  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(18,now(),18);"
   430  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(19,now(),19);"
   431  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   432  
   433  	# add and drop
   434  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column col4 int, drop column col3;"
   435  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(21,now(),21);"
   436  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(22,now(),22);"
   437  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(23,now(),23);"
   438  	run_sql_source2 "alter table ${shardddl1}.${tb1} add column col4 int, drop column col3;"
   439  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(24,now(),24);"
   440  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(25,now(),25);"
   441  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(26,now(),26);"
   442  	run_sql_source2 "alter table ${shardddl1}.${tb2} add column col4 int, drop column col3;"
   443  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(27,now(),27);"
   444  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(28,now(),28);"
   445  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(29,now(),29);"
   446  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   447  
   448  	# drop and add
   449  	run_sql_source1 "alter table ${shardddl1}.${tb1} drop column col4, add column col5 int;"
   450  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(31,now(),31);"
   451  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(32,now(),32);"
   452  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(33,now(),33);"
   453  	run_sql_source2 "alter table ${shardddl1}.${tb1} drop column col4, add column col5 int;"
   454  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(34,now(),34);"
   455  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(35,now(),35);"
   456  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(36,now(),36);"
   457  	run_sql_source2 "alter table ${shardddl1}.${tb2} drop column col4, add column col5 int;"
   458  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(37,now(),37);"
   459  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(38,now(),38);"
   460  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(39,now(),39);"
   461  
   462  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   463  }
   464  
   465  function DM_ADD_DROP_COLUMNS() {
   466  	run_case ADD_DROP_COLUMNS "double-source-pessimistic" \
   467  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, id datetime);\"; \
   468       run_sql_source2 \"create table ${shardddl1}.${tb1} (a int primary key, id datetime);\"; \
   469       run_sql_source2 \"create table ${shardddl1}.${tb2} (a int primary key, id datetime);\"" \
   470  		"clean_table" "pessimistic"
   471  	run_case ADD_DROP_COLUMNS "double-source-optimistic" \
   472  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, id datetime);\"; \
   473       run_sql_source2 \"create table ${shardddl1}.${tb1} (a int primary key, id datetime);\"; \
   474       run_sql_source2 \"create table ${shardddl1}.${tb2} (a int primary key, id datetime);\"" \
   475  		"clean_table" "optimistic"
   476  }
   477  
   478  function DM_COLUMN_INDEX_CASE() {
   479  	# add col and index
   480  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column col3 int, add index idx_col1(col1);"
   481  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(1,1,1,1);"
   482  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(2,2,2);"
   483  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(3,3,3);"
   484  	run_sql_source2 "alter table ${shardddl1}.${tb1} add column col3 int, add index idx_col1(col1);"
   485  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(4,4,4,4);"
   486  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(5,5,5,5);"
   487  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(6,6,6);"
   488  	run_sql_source2 "alter table ${shardddl1}.${tb2} add column col3 int, add index idx_col1(col1);"
   489  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(7,7,7,7);"
   490  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(8,8,8,8);"
   491  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(9,9,9,9);"
   492  
   493  	# drop col and index
   494  	run_sql_source1 "alter table ${shardddl1}.${tb1} drop column col2, drop index idx_col1;"
   495  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(11,11,11);"
   496  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(12,12,12,12);"
   497  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(13,13,13,13);"
   498  	run_sql_source2 "alter table ${shardddl1}.${tb1} drop column col2, drop index idx_col1;"
   499  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(14,14,14);"
   500  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(15,15,15);"
   501  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(16,16,16,16);"
   502  	run_sql_source2 "alter table ${shardddl1}.${tb2} drop column col2, drop index idx_col1;"
   503  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(17,17,17);"
   504  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(18,18,18);"
   505  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(19,19,19);"
   506  
   507  	# drop col, add index
   508  	run_sql_source1 "alter table ${shardddl1}.${tb1} drop column col1, add index idx_col3(col3);"
   509  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(21,21);"
   510  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(22,22,22);"
   511  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(23,23,23);"
   512  	run_sql_source2 "alter table ${shardddl1}.${tb1} drop column col1, add index idx_col3(col3);"
   513  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(24,24);"
   514  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(25,25);"
   515  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(26,26,26);"
   516  	run_sql_source2 "alter table ${shardddl1}.${tb2} drop column col1, add index idx_col3(col3);"
   517  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(27,27);"
   518  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(28,28);"
   519  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(29,29);"
   520  
   521  	# add col, drop index
   522  	run_sql_source1 "alter table ${shardddl1}.${tb1} add column col4 int, drop index idx_col3;"
   523  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(31,31,31);"
   524  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(32,32);"
   525  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(33,33);"
   526  	run_sql_source2 "alter table ${shardddl1}.${tb1} add column col4 int, drop index idx_col3;"
   527  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(34,34,34);"
   528  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(35,35,35);"
   529  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(36,36);"
   530  	run_sql_source2 "alter table ${shardddl1}.${tb2} add column col4 int, drop index idx_col3;"
   531  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(37,37,37);"
   532  	run_sql_source2 "insert into ${shardddl1}.${tb1} values(38,38,38);"
   533  	run_sql_source2 "insert into ${shardddl1}.${tb2} values(39,39,39);"
   534  
   535  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   536  }
   537  
   538  function DM_COLUMN_INDEX() {
   539  	run_case COLUMN_INDEX "double-source-pessimistic" \
   540  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, col1 int, col2 int);\"; \
   541       run_sql_source2 \"create table ${shardddl1}.${tb1} (a int primary key, col1 int, col2 int);\"; \
   542       run_sql_source2 \"create table ${shardddl1}.${tb2} (a int primary key, col1 int, col2 int);\"" \
   543  		"clean_table" "pessimistic"
   544  	run_case COLUMN_INDEX "double-source-optimistic" \
   545  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, col1 int, col2 int);\"; \
   546       run_sql_source2 \"create table ${shardddl1}.${tb1} (a int primary key, col1 int, col2 int);\"; \
   547       run_sql_source2 \"create table ${shardddl1}.${tb2} (a int primary key, col1 int, col2 int);\"" \
   548  		"clean_table" "optimistic"
   549  }
   550  
   551  function DM_COMPACT_CASE() {
   552  	END=100
   553  	for i in $(seq 1 $END); do
   554  		run_sql_source1 "insert into ${shardddl1}.${tb1}(a,b) values($i,$i)"
   555  		run_sql_source1 "update ${shardddl1}.${tb1} set c=1 where a=$i"
   556  		run_sql_source1 "update ${shardddl1}.${tb1} set c=c+1 where a=$i"
   557  		run_sql_source1 "update ${shardddl1}.${tb1} set b=b+1 where a=$i"
   558  		run_sql_source1 "update ${shardddl1}.${tb1} set a=a+100 where a=$i"
   559  		run_sql_source1 "delete from ${shardddl1}.${tb1} where a=$((i + 100))"
   560  		run_sql_source1 "insert into ${shardddl1}.${tb1}(a,b) values($i,$i)"
   561  	done
   562  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml 30
   563  	compactCnt=$(cat $WORK_DIR/worker1/log/dm-worker.log $WORK_DIR/worker2/log/dm-worker.log | grep "finish to compact" | wc -l)
   564  	if [[ "$compactCnt" -le 100 ]]; then
   565  		echo "compact $compactCnt dmls which is less than 100"
   566  		exit 1
   567  	fi
   568  }
   569  
   570  function DM_COMPACT() {
   571  	# mock downstream has a high latency and upstream has a high workload
   572  	kill_process dm-worker
   573  	check_process_exit worker1 20
   574  	check_process_exit worker2 20
   575  	export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/syncer/BlockExecuteSQLs=return(1);github.com/pingcap/tiflow/dm/syncer/SafeModeInitPhaseSeconds=return(\"5s\")"
   576  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   577  	run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml
   578  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   579  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT
   580  
   581  	run_case COMPACT "single-source-no-sharding" \
   582  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, b int unique, c int);\"" \
   583  		"clean_table" ""
   584  }
   585  
   586  function DM_COMPACT_USE_DOWNSTREAM_SCHEMA_CASE() {
   587  	END=10
   588  	# As this kind of sql is no use, like "update tb1 set c=1 where a=100" which is behind of "insert into tb1(a,b,c) values(100,1,1)"
   589  	# We should avoid this kind of sql to make sure the count of dmls
   590  	for i in $(seq 0 $END); do
   591  		run_sql_source1 "insert into ${shardddl1}.${tb1}(a,b,c) values($((i + 100)),$i,$i)"
   592  		run_sql_source1 "update ${shardddl1}.${tb1} set c=20 where a=$((i + 100))"
   593  		run_sql_source1 "update ${shardddl1}.${tb1} set c=c+1 where a=$((i + 100))"
   594  		# Use downstream uk 'b' as key and this sql which modifiies 'b' will be splited to two job(delete+insert)
   595  		run_sql_source1 "update ${shardddl1}.${tb1} set b=b+1 where a=$((i + 100))"
   596  		run_sql_source1 "update ${shardddl1}.${tb1} set a=a+100 where a=$((i + 100))"
   597  		run_sql_source1 "delete from ${shardddl1}.${tb1} where a=$((i + 200))"
   598  		run_sql_source1 "insert into ${shardddl1}.${tb1}(a,b,c) values($((i + 100)),$i,$i)"
   599  	done
   600  	run_sql_tidb_with_retry_times "select count(1) from ${shardddl}.${tb};" "count(1): 11" 30
   601  	run_sql_tidb "create table ${shardddl}.${tb}_temp (a int primary key auto_increment, b int unique not null, c int) auto_increment = 100; 
   602  		insert into ${shardddl}.${tb}_temp (a, b, c) select a, b, c from ${shardddl}.${tb}; 
   603  		drop table ${shardddl}.${tb}; rename table ${shardddl}.${tb}_temp to ${shardddl}.${tb};"
   604  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml 30
   605  	compactCnt=$(cat $WORK_DIR/worker1/log/dm-worker.log $WORK_DIR/worker2/log/dm-worker.log | grep "finish to compact" | wc -l)
   606  	# As compact is affected by "j.tp == flush", the check count of compact use "-le 50"
   607  	if [[ "$compactCnt" -le 50 ]]; then
   608  		echo "compact $compactCnt dmls which is less than 50"
   609  		exit 1
   610  	fi
   611  }
   612  
   613  function DM_COMPACT_USE_DOWNSTREAM_SCHEMA() {
   614  	# downstream pk/uk/column is diffrent with upstream, compact use downstream schema.
   615  	kill_process dm-worker
   616  	check_process_exit worker1 20
   617  	check_process_exit worker2 20
   618  	# DownstreamIdentifyKeyCheckInCompact=return(20) will check whether the key value in compact is less than 20, if false, it will be panic.
   619  	# This goal is check whether it use downstream schema in compator.
   620  	# if use downstream schema, key will be 'b' with value less than 20.
   621  	# If use upstream schema, key will be 'a' with value greater than 100.
   622  	export GO_FAILPOINTS='github.com/pingcap/tiflow/dm/syncer/SkipFlushCompactor=return();github.com/pingcap/tiflow/dm/syncer/DownstreamIdentifyKeyCheckInCompact=return(20)'
   623  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   624  	run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml
   625  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   626  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT
   627  
   628  	run_case COMPACT_USE_DOWNSTREAM_SCHEMA "single-source-no-sharding" \
   629  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, b int unique not null, c int);\"; 
   630  		run_sql_tidb \"drop database if exists ${shardddl}; create database ${shardddl}; create table ${shardddl}.${tb} (a int, b int unique not null, c int, d int primary key auto_increment) auto_increment = 100;\"" \
   631  		"clean_table" ""
   632  }
   633  
   634  function DM_MULTIPLE_ROWS_CASE() {
   635  	END=100
   636  	for i in $(seq 1 10 $END); do
   637  		run_sql_source1 "insert into ${shardddl1}.${tb1}(a,b) values($i,$i),($((i + 1)),$((i + 1))),($((i + 2)),$((i + 2))),($((i + 3)),$((i + 3))),($((i + 4)),$((i + 4))),\
   638  		($((i + 5)),$((i + 5))),($((i + 6)),$((i + 6))),($((i + 7)),$((i + 7))),($((i + 8)),$((i + 8))),($((i + 9)),$((i + 9)))"
   639  	done
   640  	for i in $(seq 1 10 $END); do
   641  		run_sql_source1 "update ${shardddl1}.${tb1} set c=1 where a>=$i and a<$((i + 10))"
   642  	done
   643  	for i in $(seq 1 10 $END); do
   644  		run_sql_source1 "update ${shardddl1}.${tb1} set b = 0 - b where a>=$i and a<$((i + 10))"
   645  	done
   646  	for i in $(seq 1 10 $END); do
   647  		run_sql_source1 "update ${shardddl1}.${tb1} set a = 0 - a where a>=$i and a<$((i + 10))"
   648  	done
   649  	for i in $(seq 1 10 $END); do
   650  		run_sql_source1 "delete from ${shardddl1}.${tb1} where a<=$((0 - i)) and a>$((-10 - i))"
   651  	done
   652  
   653  	# wait safemode exit
   654  	check_log_contain_with_retry "disable safe-mode after task initialization finished" $WORK_DIR/worker1/log/dm-worker.log $WORK_DIR/worker2/log/dm-worker.log
   655  
   656  	# insert again without safmode
   657  	for i in $(seq 1 10 $END); do
   658  		run_sql_source1 "insert into ${shardddl1}.${tb1}(a,b) values($i,$i),($((i + 1)),$((i + 1))),($((i + 2)),$((i + 2))),($((i + 3)),$((i + 3))),($((i + 4)),$((i + 4))),\
   659  		($((i + 5)),$((i + 5))),($((i + 6)),$((i + 6))),($((i + 7)),$((i + 7))),($((i + 8)),$((i + 8))),($((i + 9)),$((i + 9)))"
   660  	done
   661  	for i in $(seq 1 10 $END); do
   662  		run_sql_source1 "update ${shardddl1}.${tb1} set c=1 where a>=$i and a<$((i + 10))"
   663  	done
   664  	for i in $(seq 1 10 $END); do
   665  		run_sql_source1 "update ${shardddl1}.${tb1} set b = 0 - b where a>=$i and a<$((i + 10))"
   666  	done
   667  	for i in $(seq 1 10 $END); do
   668  		run_sql_source1 "update ${shardddl1}.${tb1} set a = 0 - a where a>=$i and a<$((i + 10))"
   669  	done
   670  	for i in $(seq 1 10 $END); do
   671  		run_sql_source1 "delete from ${shardddl1}.${tb1} where a<=$((0 - i)) and a>$((-10 - i))"
   672  	done
   673  
   674  	# insert new values, otherwise there may not be any data in downstream in middle stage and check_sync_diff return true immediately
   675  	for i in $(seq 101 10 200); do
   676  		run_sql_source1 "insert into ${shardddl1}.${tb1}(a,b) values($i,$i),($((i + 1)),$((i + 1))),($((i + 2)),$((i + 2))),($((i + 3)),$((i + 3))),($((i + 4)),$((i + 4))),\
   677  		($((i + 5)),$((i + 5))),($((i + 6)),$((i + 6))),($((i + 7)),$((i + 7))),($((i + 8)),$((i + 8))),($((i + 9)),$((i + 9)))"
   678  	done
   679  
   680  	run_sql_tidb_with_retry "select count(1) from ${shardddl}.${tb} where a>100 and a<=200;" "count(1): 100"
   681  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml 30
   682  	insertMergeCnt=$(cat $WORK_DIR/worker1/log/dm-worker.log $WORK_DIR/worker2/log/dm-worker.log | grep '\[op=DMLInsert\]' | wc -l)
   683  	replaceMergeCnt=$(cat $WORK_DIR/worker1/log/dm-worker.log $WORK_DIR/worker2/log/dm-worker.log | grep '\[op=DMLReplace\]' | wc -l)
   684  	updateMergeCnt=$(cat $WORK_DIR/worker1/log/dm-worker.log $WORK_DIR/worker2/log/dm-worker.log | grep '\[op=DMLInsertOnDuplicateUpdate\]' | wc -l)
   685  	deleteMergeCnt=$(cat $WORK_DIR/worker1/log/dm-worker.log $WORK_DIR/worker2/log/dm-worker.log | grep '\[op=DMLDelete\]' | wc -l)
   686  	echo $insertMergeCnt $replaceMergeCnt $updateMergeCnt $deleteMergeCnt
   687  	if [[ "$insertMergeCnt" -le 5 || "$updateMergeCnt" -le 5 || "$deleteMergeCnt" -le 5 || "$replaceMergeCnt" -le 5 ]]; then
   688  		echo "merge dmls less than 5, insertMergeCnt: $insertMergeCnt, replaceMergeCnt: $replaceMergeCnt, updateMergeCnt: $updateMergeCnt, deleteMergeCnt: $deleteMergeCnt"
   689  		exit 1
   690  	fi
   691  }
   692  
   693  function DM_MULTIPLE_ROWS() {
   694  	kill_process dm-worker
   695  	check_process_exit worker1 20
   696  	check_process_exit worker2 20
   697  	export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/syncer/BlockExecuteSQLs=return(1);github.com/pingcap/tiflow/dm/syncer/SafeModeInitPhaseSeconds=return(\"5s\")"
   698  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   699  	run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml
   700  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   701  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT
   702  
   703  	run_case MULTIPLE_ROWS "single-source-no-sharding" \
   704  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, b int unique, c int);\"" \
   705  		"clean_table" ""
   706  
   707  	kill_process dm-worker
   708  	check_process_exit worker1 20
   709  	check_process_exit worker2 20
   710  	export GO_FAILPOINTS=''
   711  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   712  	run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml
   713  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   714  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT
   715  }
   716  
   717  function DM_KEY_NOT_FOUND_CASE() {
   718  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   719  
   720  	run_sql_tidb "delete from ${shardddl}.${tb} where id=1;"
   721  	run_sql_tidb "delete from ${shardddl}.${tb} where id=2;"
   722  	run_sql_source1 "update ${shardddl1}.${tb1} set id=3 where id=1;"
   723  	run_sql_source2 "update ${shardddl1}.${tb1} set id=4 where id=2;"
   724  	run_sql_source1 "delete from ${shardddl1}.${tb1} where id=3;"
   725  	run_sql_source2 "delete from ${shardddl1}.${tb1} where id=4;"
   726  	check_log_contain_with_retry "no matching record is found to update/delete, ER_KEY_NOT_FOUND" $WORK_DIR/worker1/log/dm-worker.log
   727  	check_log_contain_with_retry "no matching record is found to update/delete, ER_KEY_NOT_FOUND" $WORK_DIR/worker2/log/dm-worker.log
   728  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml 30
   729  }
   730  
   731  function DM_KEY_NOT_FOUND() {
   732  	kill_process dm-worker
   733  	check_process_exit worker1 20
   734  	check_process_exit worker2 20
   735  	export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/syncer/SafeModeInitPhaseSeconds=return(\"0s\")"
   736  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   737  	run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml
   738  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   739  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT
   740  
   741  	run_case KEY_NOT_FOUND "double-source-optimistic" \
   742  		"init_table 111 211; \
   743  	 run_sql_source1 \"insert into ${shardddl1}.${tb1} values(1);\"; \
   744       run_sql_source2 \"insert into ${shardddl1}.${tb1} values(2);\"" \
   745  		"clean_table" ""
   746  
   747  	kill_process dm-worker
   748  	check_process_exit worker1 20
   749  	check_process_exit worker2 20
   750  	export GO_FAILPOINTS=''
   751  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   752  	run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml
   753  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   754  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT
   755  }
   756  
   757  function DM_CAUSALITY_CASE() {
   758  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(1,2)"
   759  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(2,3)"
   760  	run_sql_source1 "update ${shardddl1}.${tb1} set a=3, b=4 where b=3"
   761  	run_sql_source1 "delete from ${shardddl1}.${tb1} where a=1"
   762  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(1,3)"
   763  
   764  	check_log_contain_with_retry "meet causality key, will generate a conflict job to flush all sqls" $WORK_DIR/worker1/log/dm-worker.log $WORK_DIR/worker2/log/dm-worker.log
   765  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   766  }
   767  
   768  function DM_CAUSALITY() {
   769  	run_case CAUSALITY "single-source-no-sharding" \
   770  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, b int unique);\"" \
   771  		"clean_table" ""
   772  }
   773  
   774  function DM_CAUSALITY_USE_DOWNSTREAM_SCHEMA_CASE() {
   775  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(1,2)"
   776  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(2,3)"
   777  	run_sql_source1 "update ${shardddl1}.${tb1} set a=3, b=4 where b=3"
   778  	run_sql_source1 "delete from ${shardddl1}.${tb1} where a=1"
   779  	run_sql_source1 "insert into ${shardddl1}.${tb1} values(1,3)"
   780  
   781  	run_sql_tidb_with_retry_times "select count(1) from ${shardddl}.${tb} where a =1 and b=3;" "count(1): 1" 30
   782  	run_sql_tidb "create table ${shardddl}.${tb}_temp (a int primary key, b int unique); 
   783  		insert into ${shardddl}.${tb}_temp (a, b) select a, b from ${shardddl}.${tb};
   784  		drop table ${shardddl}.${tb}; rename table ${shardddl}.${tb}_temp to ${shardddl}.${tb};"
   785  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   786  
   787  	causalityCnt=$(cat $WORK_DIR/worker1/log/dm-worker.log $WORK_DIR/worker2/log/dm-worker.log | grep "meet causality key, will generate a conflict job to flush all sqls" | wc -l)
   788  	if [[ "$causalityCnt" -ne 0 ]]; then
   789  		echo "causalityCnt is $causalityCnt, but it should be 0"
   790  		exit 1
   791  	fi
   792  }
   793  
   794  function DM_CAUSALITY_USE_DOWNSTREAM_SCHEMA() {
   795  	# downstream pk/uk/column is diffrent with upstream, causality use downstream schema.
   796  	run_case CAUSALITY_USE_DOWNSTREAM_SCHEMA "single-source-no-sharding" \
   797  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, b int unique);\"; 
   798  		run_sql_tidb \"drop database if exists ${shardddl}; create database ${shardddl}; create table ${shardddl}.${tb} (a int, b int unique, c int primary key auto_increment) auto_increment = 100;\"" \
   799  		"clean_table" ""
   800  }
   801  
   802  function DM_DML_EXECUTE_ERROR_CASE() {
   803  	run_sql_source1 "insert into ${shardddl1}.${tb1}(a,b) values(1,1)"
   804  	run_sql_source1 "update ${shardddl1}.${tb1} set b=b+1 where a=1"
   805  
   806  	check_log_contain_with_retry "length of queries not equals length of jobs" $WORK_DIR/worker1/log/dm-worker.log $WORK_DIR/worker2/log/dm-worker.log
   807  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   808  		"query-status test" \
   809  		"\"RawCause\": \"ErrorOnLastDML\"" 1 \
   810  		"Paused" 1
   811  }
   812  
   813  function DM_DML_EXECUTE_ERROR() {
   814  	kill_process dm-worker
   815  	check_process_exit worker1 20
   816  	check_process_exit worker2 20
   817  	export GO_FAILPOINTS='github.com/pingcap/tiflow/dm/syncer/ErrorOnLastDML=return()'
   818  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   819  	run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml
   820  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   821  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT
   822  
   823  	run_case DML_EXECUTE_ERROR "single-source-no-sharding" \
   824  		"run_sql_source1 \"create table ${shardddl1}.${tb1} (a int primary key, b int);\"" \
   825  		"clean_table" ""
   826  }
   827  
   828  function run() {
   829  	init_cluster
   830  	init_database
   831  
   832  	DM_COMPACT
   833  	DM_COMPACT_USE_DOWNSTREAM_SCHEMA
   834  	DM_MULTIPLE_ROWS
   835  	DM_CAUSALITY
   836  	DM_CAUSALITY_USE_DOWNSTREAM_SCHEMA
   837  	DM_UpdateBARule
   838  	DM_RENAME_TABLE
   839  	DM_RENAME_COLUMN_OPTIMISTIC
   840  	DM_RemoveLock
   841  	DM_RestartMaster
   842  	DM_ADD_DROP_COLUMNS
   843  	DM_COLUMN_INDEX
   844  	DM_DML_EXECUTE_ERROR
   845  	DM_KEY_NOT_FOUND
   846  	start=1
   847  	end=5
   848  	for i in $(seq -f "%03g" ${start} ${end}); do
   849  		DM_${i}
   850  		sleep 1
   851  	done
   852  
   853  }
   854  
   855  cleanup_data $shardddl
   856  cleanup_data $shardddl1
   857  cleanup_data $shardddl2
   858  # also cleanup dm processes in case of last run failed
   859  cleanup_process $*
   860  run $*
   861  cleanup_process $*
   862  
   863  echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>"