github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/tests/only_dml/run.sh (about)

     1  #!/bin/bash
     2  
     3  set -eu
     4  
     5  cur=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
     6  source $cur/../_utils/test_prepare
     7  WORK_DIR=$TEST_DIR/$TEST_NAME
     8  TASK_NAME="test"
     9  SQL_RESULT_FILE="$TEST_DIR/sql_res.$TEST_NAME.txt"
    10  
    11  function purge_relay_success() {
    12  	binlog_file=$1
    13  	source_id=$2
    14  	run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
    15  		"purge-relay --filename $binlog_file -s $source_id" \
    16  		"\"result\": true" 2
    17  }
    18  
    19  function run_sql_silent() {
    20  	TIDB_PORT=4000
    21  	user="root"
    22  	if [[ "$2" = $TIDB_PORT ]]; then
    23  		user="test"
    24  	fi
    25  	mysql -u$user -h127.0.0.1 -P$2 -p$3 --default-character-set utf8 -E -e "$1" >>/dev/null
    26  }
    27  
    28  function insert_data() {
    29  	i=1
    30  
    31  	while true; do
    32  		sleep 1
    33  		run_sql_silent "insert into only_dml.t1 values ($(($i * 2 + 1)));" $MYSQL_PORT1 $MYSQL_PASSWORD1
    34  		run_sql_silent "insert into only_dml.t2 values ($(($i * 2 + 2)));" $MYSQL_PORT2 $MYSQL_PASSWORD2
    35  		((i++))
    36  		run_sql_silent "insert into only_dml.t1 values ($(($i * 2 + 1)));" $MYSQL_PORT1 $MYSQL_PASSWORD1
    37  		run_sql_silent "insert into only_dml.t2 values ($(($i * 2 + 2)));" $MYSQL_PORT2 $MYSQL_PASSWORD2
    38  		((i++))
    39  		run_sql_silent "flush logs;" $MYSQL_PORT1 $MYSQL_PASSWORD1
    40  		run_sql_silent "flush logs;" $MYSQL_PORT2 $MYSQL_PASSWORD2
    41  	done
    42  }
    43  
    44  function run() {
    45  	export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/relay/SetHeartbeatInterval=return(1);github.com/pingcap/tiflow/dm/syncer/syncDMLBatchNotFull=return(true)"
    46  
    47  	run_sql_file $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1
    48  	check_contains 'Query OK, 1 row affected'
    49  	run_sql_file $cur/data/db2.prepare.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2
    50  	check_contains 'Query OK, 1 row affected'
    51  
    52  	# run dm master
    53  	run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml
    54  	check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT
    55  	check_metric $MASTER_PORT 'start_leader_counter' 3 0 2
    56  
    57  	# copy config file
    58  	cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml
    59  	cp $cur/conf/source2.yaml $WORK_DIR/source2.yaml
    60  	sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker1/relay_log" $WORK_DIR/source1.yaml
    61  	sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker2/relay_log" $WORK_DIR/source2.yaml
    62  
    63  	# bound source1 to worker1, source2 to worker2
    64  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
    65  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
    66  	dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1
    67  
    68  	run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml
    69  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT
    70  	dmctl_operate_source create $WORK_DIR/source2.yaml $SOURCE_ID2
    71  
    72  	# check dm-workers metrics unit: relay file index must be 1.
    73  	check_metric $WORKER1_PORT "dm_relay_binlog_file" 3 0 2
    74  	check_metric $WORKER2_PORT "dm_relay_binlog_file" 3 0 2
    75  
    76  	# start a task in all mode, and when enter incremental mode, we only execute DML
    77  	dmctl_start_task $cur/conf/dm-task.yaml
    78  
    79  	# check task has started state=2 running
    80  	check_metric $WORKER1_PORT "dm_worker_task_state{source_id=\"mysql-replica-01\",task=\"$TASK_NAME\",worker=\"worker1\"}" 10 1 3
    81  	check_metric $WORKER2_PORT "dm_worker_task_state{source_id=\"mysql-replica-02\",task=\"$TASK_NAME\",worker=\"worker2\"}" 10 1 3
    82  
    83  	# check diff
    84  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
    85  
    86  	insert_data &
    87  	pid=$!
    88  	echo "PID of insert_data is $pid"
    89  
    90  	# check twice, make sure update active relay log could work for first time and later
    91  	for i in {1..2}; do
    92  		for ((k = 1; k < 10; k++)); do
    93  			server_uuid1=$(tail -n 1 $WORK_DIR/worker1/relay-dir/server-uuid.index)
    94  			run_sql_source1 "show binary logs\G"
    95  			max_binlog_name=$(grep Log_name "$SQL_RESULT_FILE" | tail -n 1 | awk -F":" '{print $NF}')
    96  			earliest_relay_log1=$(ls $WORK_DIR/worker1/relay-dir/$server_uuid1 | grep -v 'relay.meta' | sort | head -n 1)
    97  			purge_relay_success $max_binlog_name $SOURCE_ID1
    98  			earliest_relay_log2=$(ls $WORK_DIR/worker1/relay-dir/$server_uuid1 | grep -v 'relay.meta' | sort | head -n 1)
    99  			echo "earliest_relay_log1: $earliest_relay_log1 earliest_relay_log2: $earliest_relay_log2"
   100  			if [ "$earliest_relay_log1" != "$earliest_relay_log2" ]; then
   101  				break
   102  			fi
   103  			echo "purge relay log failed $k-th time, retry later"
   104  			sleep 1
   105  		done
   106  
   107  		for ((k = 1; k < 10; k++)); do
   108  			server_uuid2=$(tail -n 1 $WORK_DIR/worker2/relay-dir/server-uuid.index)
   109  			run_sql_source2 "show binary logs\G"
   110  			max_binlog_name=$(grep Log_name "$SQL_RESULT_FILE" | tail -n 1 | awk -F":" '{print $NF}')
   111  			earliest_relay_log1=$(ls $WORK_DIR/worker2/relay-dir/$server_uuid2 | grep -v 'relay.meta' | sort | head -n 1)
   112  			purge_relay_success $max_binlog_name $SOURCE_ID2
   113  			earliest_relay_log2=$(ls $WORK_DIR/worker2/relay-dir/$server_uuid2 | grep -v 'relay.meta' | sort | head -n 1)
   114  			echo "earliest_relay_log1: $earliest_relay_log1 earliest_relay_log2: $earliest_relay_log2"
   115  			if [ "$earliest_relay_log1" != "$earliest_relay_log2" ]; then
   116  				break
   117  			fi
   118  			echo "purge relay log failed $k-th time, retry later"
   119  			sleep 1
   120  		done
   121  	done
   122  
   123  	kill $pid
   124  	check_log_contain_with_retry 'execute not full job queue' $WORK_DIR/worker1/log/dm-worker.log
   125  	check_log_contain_with_retry 'execute not full job queue' $WORK_DIR/worker2/log/dm-worker.log
   126  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   127  	export GO_FAILPOINTS=""
   128  }
   129  
   130  cleanup_data $TEST_NAME
   131  # also cleanup dm processes in case of last run failed
   132  cleanup_process $*
   133  run $*
   134  cleanup_process $*
   135  
   136  echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>"