github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/tests/adjust_gtid/run.sh (about) 1 #!/bin/bash 2 3 set -eu 4 5 cur=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) 6 source $cur/../_utils/test_prepare 7 TASK_NAME="test" 8 WORK_DIR=$TEST_DIR/$TEST_NAME 9 # SQL_RESULT_FILE="$TEST_DIR/sql_res.$TEST_NAME.txt" 10 11 # clean_gtid will 12 # 1. delete source1's gtid info, but keep the binlog pos info (simulate switch gtid and resume from checkpoint) 13 # 2. delete source2's checkpoint info, set only binlog pos info in the task.yaml (simulate switch gtid and start for meta) 14 function clean_gtid() { 15 # delete SOURCE1 checkpoint's gtid info 16 run_sql "update dm_meta.${TASK_NAME}_syncer_checkpoint set binlog_gtid=\"\" where id=\"$SOURCE_ID1\" and is_global=1" $TIDB_PORT $TIDB_PASSWORD 17 # set SOURCE2 incremental metadata without checkpoint 18 run_sql "delete from dm_meta.${TASK_NAME}_syncer_checkpoint where id=\"$SOURCE_ID2\"" $TIDB_PORT $TIDB_PASSWORD 19 20 cp $cur/conf/dm-task.yaml $WORK_DIR/dm-task.yaml 21 sed -i "s/task-mode-placeholder/incremental/g" $WORK_DIR/dm-task.yaml 22 # avoid cannot unmarshal !!str `binlog-...` into uint32 error 23 sed -i "s/binlog-name-placeholder-1/$name1/g" $WORK_DIR/dm-task.yaml 24 sed -i "s/binlog-pos-placeholder-1/4/g" $WORK_DIR/dm-task.yaml 25 sed -i "s/binlog-gtid-placeholder-1/\"\"/g" $WORK_DIR/dm-task.yaml 26 sed -i "s/binlog-name-placeholder-2/$name2/g" $WORK_DIR/dm-task.yaml 27 sed -i "s/binlog-pos-placeholder-2/$pos2/g" $WORK_DIR/dm-task.yaml 28 sed -i "s/binlog-gtid-placeholder-2/\"\"/g" $WORK_DIR/dm-task.yaml 29 } 30 31 # check_checkpoint checks checkpoint data from the database 32 function check_checkpoint() { 33 source_id=$1 34 expected_name=$2 35 expected_pos=$3 36 expected_gtid=$4 37 38 run_sql "select binlog_name,binlog_pos,binlog_gtid from dm_meta.${TASK_NAME}_syncer_checkpoint where id=\"$source_id\" and is_global=1" $TIDB_PORT $TIDB_PASSWORD 39 check_contains $expected_name 40 check_contains $expected_pos 41 if [[ -n $expected_gtid ]]; then 42 check_contains $expected_gtid 43 fi 44 } 45 46 function run() { 47 run_sql_both_source "SET @@GLOBAL.SQL_MODE='ANSI_QUOTES,NO_AUTO_VALUE_ON_ZERO'" 48 run_sql_source1 "SET @@global.time_zone = '+01:00';" 49 run_sql_source2 "SET @@global.time_zone = '+02:00';" 50 51 run_sql_file $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 52 check_contains 'Query OK, 2 rows affected' 53 run_sql_file $cur/data/db2.prepare.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2 54 check_contains 'Query OK, 3 rows affected' 55 56 # start DM worker and master 57 run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml 58 check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT 59 check_metric $MASTER_PORT 'start_leader_counter' 3 0 2 60 61 export GO_FAILPOINTS='github.com/pingcap/tiflow/dm/syncer/AdjustGTIDExit=return(true)' 62 run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml 63 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT 64 65 # operate mysql config to worker 66 cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml 67 cp $cur/conf/source2.yaml $WORK_DIR/source2.yaml 68 sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker1/relay_log" $WORK_DIR/source1.yaml 69 sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker2/relay_log" $WORK_DIR/source2.yaml 70 # make sure source1 is bound to worker1 71 run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ 72 "operate-source create $WORK_DIR/source1.yaml" \ 73 "\"result\": true" 2 \ 74 "\"source\": \"$SOURCE_ID1\"" 1 \ 75 "will be deprecated soon" 1 76 77 run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml 78 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT 79 dmctl_operate_source create $WORK_DIR/source2.yaml $SOURCE_ID2 80 81 # start DM task only 82 cp $cur/conf/dm-task.yaml $WORK_DIR/dm-task.yaml 83 sed -i "s/task-mode-placeholder/all/g" $WORK_DIR/dm-task.yaml 84 # avoid cannot unmarshal !!str `binlog-...` into uint32 error 85 sed -i "s/binlog-pos-placeholder-1/4/g" $WORK_DIR/dm-task.yaml 86 sed -i "s/binlog-pos-placeholder-2/4/g" $WORK_DIR/dm-task.yaml 87 # start DM task. don't check error because it will meet injected error soon 88 run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ 89 "start-task $WORK_DIR/dm-task.yaml --remove-meta" 90 91 # use sync_diff_inspector to check full dump loader 92 check_sync_diff $WORK_DIR $cur/conf/diff_config.toml 93 94 name1=$(grep "Log: " $WORK_DIR/worker1/dumped_data.$TASK_NAME/metadata | awk -F: '{print $2}' | tr -d ' ') 95 pos1=$(grep "Pos: " $WORK_DIR/worker1/dumped_data.$TASK_NAME/metadata | awk -F: '{print $2}' | tr -d ' ') 96 gtid1=$(grep "GTID:" $WORK_DIR/worker1/dumped_data.$TASK_NAME/metadata | awk -F: '{print $2,":",$3}' | tr -d ' ') 97 name2=$(grep "Log: " $WORK_DIR/worker2/dumped_data.$TASK_NAME/metadata | awk -F: '{print $2}' | tr -d ' ') 98 pos2=$(grep "Pos: " $WORK_DIR/worker2/dumped_data.$TASK_NAME/metadata | awk -F: '{print $2}' | tr -d ' ') 99 gtid2=$(grep "GTID:" $WORK_DIR/worker2/dumped_data.$TASK_NAME/metadata | awk -F: '{print $2,":",$3}' | tr -d ' ') 100 101 run_sql_tidb_with_retry "select count(1) from dm_meta.${TASK_NAME}_syncer_checkpoint where is_global=1" "count(1): 2" 102 check_checkpoint $SOURCE_ID1 $name1 $pos1 $gtid1 103 check_checkpoint $SOURCE_ID2 $name2 $pos2 $gtid2 104 dmctl_stop_task_with_retry $TASK_NAME $MASTER_PORT 105 check_port_offline $WORKER1_PORT 20 106 check_port_offline $WORKER2_PORT 20 107 clean_gtid 108 109 # start two workers again 110 run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml 111 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT 112 run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml 113 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT 114 115 # start task without checking, worker may exit before we get success result 116 run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" "start-task $WORK_DIR/dm-task.yaml" 117 118 run_sql_tidb_with_retry "select count(1) from dm_meta.${TASK_NAME}_syncer_checkpoint where is_global=1" "count(1): 2" 119 check_checkpoint $SOURCE_ID1 $name1 $pos1 $gtid1 120 check_checkpoint $SOURCE_ID2 $name2 $pos2 $gtid2 121 check_port_offline $WORKER1_PORT 20 122 check_port_offline $WORKER2_PORT 20 123 clean_gtid 124 125 run_sql_file $cur/data/db1.increment.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 126 run_sql_file $cur/data/db2.increment.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2 127 128 export GO_FAILPOINTS='' 129 # start two workers again 130 run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml 131 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT 132 run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml 133 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT 134 135 # use sync_diff_inspector to check incremental dump loader 136 check_sync_diff $WORK_DIR $cur/conf/diff_config.toml 137 138 run_sql_both_source "SET @@GLOBAL.SQL_MODE='ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'" 139 run_sql_both_source "SET @@global.time_zone = 'SYSTEM';" 140 } 141 142 cleanup_data adjust_gtid 143 # also cleanup dm processes in case of last run failed 144 cleanup_process $* 145 run $* 146 cleanup_process $* 147 148 echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>"