github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/tests/many_tables/run.sh (about) 1 #!/bin/bash 2 3 set -eu 4 5 cur=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) 6 source $cur/../_utils/test_prepare 7 8 WORK_DIR=$TEST_DIR/$TEST_NAME 9 TABLE_NUM=500 10 11 function restore_timezone() { 12 run_sql_source1 "SET GLOBAL TIME_ZONE = SYSTEM" 13 run_sql_tidb "SET GLOBAL TIME_ZONE = SYSTEM" 14 } 15 16 function prepare_data() { 17 run_sql 'DROP DATABASE if exists many_tables_db;' $MYSQL_PORT1 $MYSQL_PASSWORD1 18 run_sql 'CREATE DATABASE many_tables_db;' $MYSQL_PORT1 $MYSQL_PASSWORD1 19 for i in $(seq $TABLE_NUM); do 20 run_sql "CREATE TABLE many_tables_db.t$i(i INT, j INT UNIQUE KEY, c1 VARCHAR(20), c2 VARCHAR(20), c3 VARCHAR(20), c4 VARCHAR(20), c5 VARCHAR(20), c6 VARCHAR(20), c7 VARCHAR(20), c8 VARCHAR(20), c9 VARCHAR(20), c10 VARCHAR(20), c11 VARCHAR(20), c12 VARCHAR(20), c13 VARCHAR(20));" $MYSQL_PORT1 $MYSQL_PASSWORD1 21 for j in $(seq 2); do 22 run_sql "INSERT INTO many_tables_db.t$i(i,j) VALUES ($i,${j}000$j),($i,${j}001$j);" $MYSQL_PORT1 $MYSQL_PASSWORD1 23 done 24 # to make the tables have odd number of lines before 'ALTER TABLE' command, for check_sync_diff to work correctly 25 run_sql "INSERT INTO many_tables_db.t$i(i,j) VALUES ($i, 99999);" $MYSQL_PORT1 $MYSQL_PASSWORD1 26 done 27 } 28 29 function incremental_data() { 30 for j in $(seq 3 5); do 31 for i in $(seq $TABLE_NUM); do 32 run_sql "INSERT INTO many_tables_db.t$i(i,j) VALUES ($i,${j}000$j),($i,${j}001$j);" $MYSQL_PORT1 $MYSQL_PASSWORD1 33 done 34 done 35 36 run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ 37 "pause-task test" \ 38 "\"result\": true" 2 39 40 run_sql "ALTER TABLE many_tables_db.t1 ADD x datetime DEFAULT current_timestamp;" $MYSQL_PORT1 $MYSQL_PASSWORD1 41 run_sql "ALTER TABLE many_tables_db.t2 ADD x timestamp DEFAULT current_timestamp;" $MYSQL_PORT1 $MYSQL_PASSWORD1 42 sleep 1 43 44 run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ 45 "resume-task test" \ 46 "\"result\": true" 2 47 } 48 49 function incremental_data_2() { 50 j=6 51 for i in $(seq $TABLE_NUM); do 52 run_sql "INSERT INTO many_tables_db.t$i (i, j) VALUES ($i,${j}000$j);" $MYSQL_PORT1 $MYSQL_PASSWORD1 53 done 54 } 55 56 function run() { 57 pkill -hup tidb-server 2>/dev/null || true 58 wait_process_exit tidb-server 59 60 # clean unistore data 61 rm -rf /tmp/tidb 62 63 # start a TiDB with small txn-total-size-limit 64 run_tidb_server 4000 $TIDB_PASSWORD $cur/conf/tidb-config-small-txn.toml 65 sleep 2 66 67 run_sql_source1 "SET GLOBAL TIME_ZONE = '+02:00'" 68 run_sql_source1 "SELECT cast(TIMEDIFF(NOW(6), UTC_TIMESTAMP(6)) as time) time" 69 check_contains "time: 02:00:00" 70 run_sql_tidb "SET GLOBAL TIME_ZONE = '+06:00'" 71 run_sql_tidb "SELECT cast(TIMEDIFF(NOW(6), UTC_TIMESTAMP(6)) as time) time" 72 check_contains "time: 06:00:00" 73 trap restore_timezone EXIT 74 75 echo "start prepare_data" 76 prepare_data 77 echo "finish prepare_data" 78 79 # we will check metrics, so don't clean metrics 80 export GO_FAILPOINTS='github.com/pingcap/tiflow/dm/loader/DontUnregister=return();github.com/pingcap/tiflow/dm/syncer/IOTotalBytes=return("uuid")' 81 82 run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml 83 check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT 84 run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml 85 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT 86 # operate mysql config to worker 87 cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml 88 sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker1/relay_log" $WORK_DIR/source1.yaml 89 dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1 90 91 dmctl_start_task_standalone 92 run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ 93 "query-status test" \ 94 "\"totalTables\": \"500\"" 1 \ 95 "\"completedTables\"" 1 \ 96 "\"finishedBytes\"" 1 \ 97 "\"finishedRows\"" 1 \ 98 "\"estimateTotalRows\"" 1 99 wait_until_sync $WORK_DIR "127.0.0.1:$MASTER_PORT" 100 check_sync_diff $WORK_DIR $cur/conf/diff_config.toml 101 check_metric $WORKER1_PORT 'lightning_tables{result="success",source_id="mysql-replica-01",state="completed",task="test"}' 1 $(($TABLE_NUM - 1)) $(($TABLE_NUM + 1)) 102 103 run_sql_tidb_with_retry "select count(*) from dm_meta.test_syncer_checkpoint" "count(*): $(($TABLE_NUM + 1))" 104 105 check_log_contains $WORK_DIR/worker1/log/dm-worker.log 'Error 8004 (HY000): Transaction is too large' 106 107 # check https://github.com/pingcap/tiflow/issues/5063 108 check_time=100 109 sleep 5 110 while [ $check_time -gt 0 ]; do 111 syncer_recv_event_num=$(grep '"receive binlog event"' $WORK_DIR/worker1/log/dm-worker.log | wc -l) 112 if [ $syncer_recv_event_num -eq 3 ]; then 113 break 114 fi 115 echo "syncer_recv_event_num: $syncer_recv_event_num, will retry later" 116 sleep 1 117 ((check_time--)) 118 done 119 120 if [ $syncer_recv_event_num -ne 3 ]; then 121 exit 1 122 fi 123 124 echo "start incremental_data" 125 incremental_data 126 echo "finish incremental_data" 127 echo "check diff 1" # to check data are synchronized after 'ALTER TABLE' command 128 check_sync_diff $WORK_DIR $cur/conf/diff_config.toml 129 # should contain some lines have non-zero IOTotalBytes 130 grep 'IOTotal' $WORK_DIR/worker1/log/dm-worker.log | grep -v 'IOTotalBytes=0' 131 132 run_sql "INSERT INTO many_tables_db.t1 (i, j) VALUES (1, 1001);" $MYSQL_PORT1 $MYSQL_PASSWORD1 133 run_sql "INSERT INTO many_tables_db.t2 (i, j) VALUES (2, 2002);" $MYSQL_PORT1 $MYSQL_PASSWORD1 134 echo "check diff 2" # to check timezone and timestamp have been set back to default 135 check_sync_diff $WORK_DIR $cur/conf/diff_config.toml 136 restore_timezone 137 trap - EXIT 138 139 # test https://github.com/pingcap/tiflow/issues/5344 140 kill_dm_worker 141 # let some binlog event save table checkpoint before meet downstream error 142 export GO_FAILPOINTS='github.com/pingcap/tiflow/dm/syncer/BlockExecuteSQLs=return(1)' 143 run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml 144 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT 145 run_sql_source1 "CREATE TABLE many_tables_db.flush (c INT PRIMARY KEY);" 146 sleep 5 147 run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ 148 "query-status test" \ 149 '"synced": true' 1 150 151 pkill -hup tidb-server 2>/dev/null || true 152 wait_process_exit tidb-server 153 # now worker will process some binlog events, save table checkpoint and meet downstream error 154 incremental_data_2 155 sleep 30 156 157 resume_num=$(grep 'unit process error' $WORK_DIR/worker1/log/dm-worker.log | wc -l) 158 echo "resume_num: $resume_num" 159 # because we check auto resume every 5 seconds... 160 [ $resume_num -ge 4 ] 161 folder_size=$(du -d0 $WORK_DIR/worker1/ --exclude="$WORK_DIR/worker1/log" | cut -f1) 162 echo "folder_size: $folder_size" 163 # less than 10M 164 [ $folder_size -lt 10000 ] 165 166 export GO_FAILPOINTS='' 167 168 run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" "stop-task test" 169 170 killall tidb-server 2>/dev/null || true 171 killall tikv-server 2>/dev/null || true 172 killall pd-server 2>/dev/null || true 173 174 run_downstream_cluster $WORK_DIR 175 176 run_sql_source1 "ALTER TABLE many_tables_db.t1 DROP x;" 177 run_sql_source1 "ALTER TABLE many_tables_db.t2 DROP x;" 178 run_sql_tidb "CREATE DATABASE merge_many_tables_db;" 179 # check merge shard tables from one source and change UK 180 run_sql_tidb "CREATE TABLE merge_many_tables_db.t(i INT, j INT, UNIQUE KEY(i,j), c1 VARCHAR(20), c2 VARCHAR(20), c3 VARCHAR(20), c4 VARCHAR(20), c5 VARCHAR(20), c6 VARCHAR(20), c7 VARCHAR(20), c8 VARCHAR(20), c9 VARCHAR(20), c10 VARCHAR(20), c11 VARCHAR(20), c12 VARCHAR(20), c13 VARCHAR(20));;" 181 182 dmctl_start_task_standalone $cur/conf/dm-task-2.yaml 183 run_sql_tidb_with_retry_times "select count(*) from merge_many_tables_db.t;" "count(*): 6002" 60 184 185 killall -9 tidb-server 2>/dev/null || true 186 killall -9 tikv-server 2>/dev/null || true 187 killall -9 pd-server 2>/dev/null || true 188 rm -rf /tmp/tidb || true 189 run_tidb_server 4000 $TIDB_PASSWORD 190 } 191 192 cleanup_data many_tables_db merge_many_tables_db 193 cleanup_process 194 195 run $* 196 197 cleanup_process 198 199 echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>"