github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/tests/_utils/ha_cases_lib.sh (about) 1 #!/bin/bash 2 3 set -eu 4 5 ha_test="ha_test" 6 ha_test2="ha_test2" 7 master_ports=($MASTER_PORT1 $MASTER_PORT2 $MASTER_PORT3) 8 worker_ports=($WORKER1_PORT $WORKER2_PORT $WORKER3_PORT $WORKER4_PORT $WORKER5_PORT) 9 10 function load_data() { 11 port=$1 12 pswd=$2 13 i=$3 14 if [ $# -ge 4 ]; then 15 db=$4 16 else 17 db=$ha_test 18 fi 19 20 run_sql "CREATE DATABASE if not exists ${db};" $port $pswd 21 run_sql "DROP TABLE if exists ${db}.t${i};" $port $pswd 22 run_sql "CREATE TABLE ${db}.t${i}(i SMALLINT, j INT UNIQUE KEY);" $port $pswd 23 for j in $(seq 800); do 24 run_sql "INSERT INTO ${db}.t${i} VALUES ($j,${j}00$j),($j,${j}01$j);" $port $pswd 25 sleep 0.1 26 done 27 } 28 29 function run_sql_file_withdb() { 30 sql=$1 31 host=$2 32 port=$3 33 pswd=$4 34 db=$5 35 cp $sql $WORK_DIR/data.sql 36 sed -i "s/database-placeholder/$db/g" $WORK_DIR/data.sql 37 run_sql_file $WORK_DIR/data.sql $host $port $pswd 38 } 39 40 # build tables etc. 41 function prepare_sql() { 42 echo "import prepare data" 43 run_sql_file_withdb $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 $ha_test 44 check_contains 'Query OK, 2 rows affected' 45 run_sql_file_withdb $cur/data/db2.prepare.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2 $ha_test 46 check_contains 'Query OK, 3 rows affected' 47 } 48 49 # build tables etc. for multi tasks 50 function prepare_sql_multi_task() { 51 echo "import prepare data" 52 run_sql_file_withdb $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 $ha_test 53 check_contains 'Query OK, 2 rows affected' 54 run_sql_file_withdb $cur/data/db2.prepare.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2 $ha_test 55 check_contains 'Query OK, 3 rows affected' 56 run_sql_file_withdb $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 $ha_test2 57 check_contains 'Query OK, 2 rows affected' 58 run_sql_file_withdb $cur/data/db2.prepare.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2 $ha_test2 59 check_contains 'Query OK, 3 rows affected' 60 } 61 62 function start_cluster() { 63 echo "start DM worker and master cluster" 64 run_dm_master $WORK_DIR/master1 $MASTER_PORT1 $cur/conf/dm-master1.toml 65 run_dm_master $WORK_DIR/master2 $MASTER_PORT2 $cur/conf/dm-master2.toml 66 run_dm_master $WORK_DIR/master3 $MASTER_PORT3 $cur/conf/dm-master3.toml 67 check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT1 68 check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT2 69 check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT3 70 71 echo "start worker and operate mysql config to worker" 72 run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml 73 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT 74 cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml 75 cp $cur/conf/source2.yaml $WORK_DIR/source2.yaml 76 sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker1/relay_log" $WORK_DIR/source1.yaml 77 sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker2/relay_log" $WORK_DIR/source2.yaml 78 dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1 79 run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml 80 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT 81 dmctl_operate_source create $WORK_DIR/source2.yaml $SOURCE_ID2 82 83 echo "start DM task" 84 dmctl_start_task 85 } 86 87 function start_standalone_cluster() { 88 echo "start DM worker and master standalone cluster" 89 run_dm_master $WORK_DIR/master1 $MASTER_PORT1 $cur/conf/dm-master1.toml 90 run_dm_master $WORK_DIR/master2 $MASTER_PORT2 $cur/conf/dm-master2.toml 91 run_dm_master $WORK_DIR/master3 $MASTER_PORT3 $cur/conf/dm-master3.toml 92 check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT1 93 check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT2 94 check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT3 95 96 run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml 97 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT 98 echo "operate mysql config to worker" 99 cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml 100 sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker1/relay_log" $WORK_DIR/source1.yaml 101 dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1 102 103 echo "start DM task" 104 dmctl_start_task_standalone $cur/conf/standalone-task.yaml 105 } 106 107 function start_multi_tasks_cluster() { 108 echo "start DM worker and master" 109 run_dm_master $WORK_DIR/master1 $MASTER_PORT1 $cur/conf/dm-master1.toml 110 run_dm_master $WORK_DIR/master2 $MASTER_PORT2 $cur/conf/dm-master2.toml 111 run_dm_master $WORK_DIR/master3 $MASTER_PORT3 $cur/conf/dm-master3.toml 112 check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT1 113 check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT2 114 check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT3 115 116 echo "operate mysql config to worker" 117 cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml 118 cp $cur/conf/source2.yaml $WORK_DIR/source2.yaml 119 sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker1/relay_log" $WORK_DIR/source1.yaml 120 sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker2/relay_log" $WORK_DIR/source2.yaml 121 122 # make sure source_i bound to worker_i 123 run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml 124 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT 125 dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1 126 run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml 127 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT 128 dmctl_operate_source create $WORK_DIR/source2.yaml $SOURCE_ID2 129 130 run_dm_worker $WORK_DIR/worker3 $WORKER3_PORT $cur/conf/dm-worker3.toml 131 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER3_PORT 132 run_dm_worker $WORK_DIR/worker4 $WORKER4_PORT $cur/conf/dm-worker4.toml 133 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER4_PORT 134 run_dm_worker $WORK_DIR/worker5 $WORKER5_PORT $cur/conf/dm-worker5.toml 135 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER5_PORT 136 137 echo "start DM task" 138 139 dmctl_start_task & 140 pid1=$! 141 dmctl_start_task "$cur/conf/dm-task2.yaml" & 142 pid2=$! 143 144 wait "$pid1" "$pid2" 145 } 146 147 function cleanup() { 148 cleanup_process $* 149 cleanup_data $ha_test 150 cleanup_data $ha_test2 151 echo "clean source table" 152 mysql_ports=($MYSQL_PORT1 $MYSQL_PORT2) 153 for i in ${mysql_ports[@]}; do 154 $(mysql -h127.0.0.1 -p123456 -P${i} -uroot -e "drop database if exists ha_test;") 155 $(mysql -h127.0.0.1 -p123456 -P${i} -uroot -e "drop database if exists ha_test2;") 156 sleep 1 157 done 158 } 159 160 function isolate_master() { 161 port=${master_ports[$(($1 - 1))]} 162 if [ $2 = "isolate" ]; then 163 export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/master/FailToElect=return(\"master$1\")" 164 fi 165 echo "kill dm-master$1" 166 kill_process dm-master$1 167 check_master_port_offline $1 168 run_dm_master $WORK_DIR/master$1 $port $cur/conf/dm-master$1.toml 169 export GO_FAILPOINTS='' 170 } 171 172 function isolate_worker() { 173 port=${worker_ports[$(($1 - 1))]} 174 if [ $2 = "isolate" ]; then 175 export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/worker/FailToKeepAlive=return(\"worker$1\")" 176 fi 177 echo "kill dm-worker$1" 178 kill_process dm-worker$1 179 check_port_offline $port 20 180 run_dm_worker $WORK_DIR/worker$1 $port $cur/conf/dm-worker$1.toml 181 export GO_FAILPOINTS='' 182 } 183 184 function check_bound() { 185 bound1=$($PWD/bin/dmctl.test DEVEL --master-addr "127.0.0.1:$MASTER_PORT1" list-member --name worker1 | 186 grep 'source' | awk -F: '{print $2}') 187 bound2=$($PWD/bin/dmctl.test DEVEL --master-addr "127.0.0.1:$MASTER_PORT1" list-member --name worker2 | 188 grep 'source' | awk -F: '{print $2}') 189 if [[ $worker1bound != $bound1 || $worker2bound != $bound2 ]]; then 190 echo "worker1bound $worker1bound bound1 $bound1" 191 echo "worker2bound $worker2bound bound2 $bound2" 192 exit 1 193 fi 194 } 195 196 function start_2_worker_ensure_bound() { 197 worker_ports_2=(0 $WORKER1_PORT $WORKER2_PORT $WORKER3_PORT $WORKER4_PORT $WORKER5_PORT) 198 199 echo "start worker$1" 200 run_dm_worker $WORK_DIR/worker$1 ${worker_ports_2[$1]} $cur/conf/dm-worker$1.toml 201 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:${worker_ports_2[$1]} 202 echo "start worker$2" 203 run_dm_worker $WORK_DIR/worker$2 ${worker_ports_2[$2]} $cur/conf/dm-worker$2.toml 204 check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:${worker_ports_2[$2]} 205 run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT1" \ 206 "list-member --name worker$1 --name worker$2" \ 207 "\"source\": \"mysql-replica-01\"" 1 \ 208 "\"source\": \"mysql-replica-02\"" 1 209 } 210 211 function kill_2_worker_ensure_unbound() { 212 worker_ports_2=(0 $WORKER1_PORT $WORKER2_PORT $WORKER3_PORT $WORKER4_PORT $WORKER5_PORT) 213 214 echo "kill dm-worker$1" 215 kill_process dm-worker$1 216 echo "kill dm-worker$2" 217 kill_process dm-worker$2 218 219 check_port_offline ${worker_ports_2[$1]} 20 220 check_port_offline ${worker_ports_2[$2]} 20 221 222 run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT1" \ 223 "list-member --name worker$1 --name worker$2" \ 224 "\"source\": \"\"" 2 225 }