github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/tests/new_relay/run.sh (about)

     1  #!/bin/bash
     2  
     3  set -eu
     4  
     5  cur=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
     6  source $cur/../_utils/test_prepare
     7  WORK_DIR=$TEST_DIR/$TEST_NAME
     8  TASK_NAME="test"
     9  SQL_RESULT_FILE="$TEST_DIR/sql_res.$TEST_NAME.txt"
    10  
    11  API_VERSION="v1alpha1"
    12  
    13  function cleanup_data_and_init_key() {
    14  	cleanup_data $TEST_NAME
    15  	mkdir -p $WORK_DIR/master
    16  	cp $cur/conf/key.txt $WORK_DIR/master/
    17  }
    18  
    19  function test_restart_relay_status() {
    20  	cleanup_process
    21  	cleanup_data_and_init_key
    22  	export GO_FAILPOINTS=""
    23  
    24  	run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml
    25  	check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT
    26  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
    27  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
    28  
    29  	dmctl_operate_source create $cur/conf/source1.yaml $SOURCE_ID1
    30  
    31  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
    32  		"start-relay -s $SOURCE_ID1 worker1" \
    33  		"will be deprecated soon" 1
    34  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
    35  		"query-status -s $SOURCE_ID1" \
    36  		"\"result\": true" 2 \
    37  		"\"worker\": \"worker1\"" 1
    38  
    39  	run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml
    40  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT
    41  
    42  	dmctl_operate_source create $cur/conf/source2.yaml $SOURCE_ID2
    43  
    44  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
    45  		"start-relay -s $SOURCE_ID2 worker2"
    46  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
    47  		"query-status -s $SOURCE_ID2" \
    48  		"\"result\": true" 2 \
    49  		"\"worker\": \"worker2\"" 1
    50  
    51  	run_dm_worker $WORK_DIR/worker3 $WORKER3_PORT $cur/conf/dm-worker3.toml
    52  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER3_PORT
    53  
    54  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
    55  		"start-relay -s $SOURCE_ID2 worker3"
    56  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
    57  		"query-status -s $SOURCE_ID2" \
    58  		"\"result\": true" 3 \
    59  		"\"worker\": \"worker2\"" 1 \
    60  		"\"worker\": \"worker3\"" 1
    61  
    62  	run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
    63  		"list-member -n worker3" \
    64  		"relay" 1
    65  
    66  	kill_dm_worker
    67  	kill_dm_master
    68  
    69  	run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml
    70  	check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT
    71  
    72  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
    73  	run_dm_worker $WORK_DIR/worker3 $WORKER3_PORT $cur/conf/dm-worker3.toml
    74  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
    75  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER3_PORT
    76  
    77  	run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml
    78  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT
    79  
    80  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
    81  		"query-status -s $SOURCE_ID1" \
    82  		"\"result\": true" 2 \
    83  		"\"worker\": \"worker1\"" 1
    84  
    85  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
    86  		"query-status -s $SOURCE_ID2" \
    87  		"\"result\": true" 3 \
    88  		"\"worker\": \"worker2\"" 1 \
    89  		"\"worker\": \"worker3\"" 1
    90  
    91  	run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
    92  		"list-member --worker" \
    93  		"relay" 1 \
    94  		"bound" 2
    95  
    96  	echo ">>>>>>>>>>>>>>>>>>>>>>>>>>test test_restart_relay_status passed"
    97  }
    98  
    99  function test_relay_leak() {
   100  	cleanup_process
   101  	cleanup_data_and_init_key
   102  	export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/relay/RelayGetEventFailed=return()"
   103  
   104  	run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml
   105  	check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT
   106  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   107  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   108  
   109  	cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml
   110  	sed -i "/check-enable: false/d" $WORK_DIR/source1.yaml
   111  	sed -i "/checker:/d" $WORK_DIR/source1.yaml
   112  	dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1
   113  
   114  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   115  		"start-relay -s $SOURCE_ID1 worker1"
   116  
   117  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   118  		"query-status -s $SOURCE_ID1" \
   119  		"RelayGetEventFailed" 1
   120  
   121  	check_log_contain_with_retry 'dispatch auto resume relay' $WORK_DIR/worker1/log/dm-worker.log
   122  
   123  	count=$(curl "http://127.0.0.1:8262/debug/pprof/goroutine?debug=2" 2>/dev/null | grep -c doIntervalOps || true)
   124  	if [ $count -gt 1 ]; then
   125  		echo "relay goroutine leak detected, count expect 1 but got $count"
   126  		exit 1
   127  	fi
   128  	echo ">>>>>>>>>>>>>>>>>>>>>>>>>>test test_relay_leak passed"
   129  }
   130  
   131  function test_cant_dail_upstream() {
   132  	cleanup_process
   133  	cleanup_data_and_init_key
   134  	export GO_FAILPOINTS=""
   135  
   136  	run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml
   137  	check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT
   138  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   139  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   140  
   141  	cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml
   142  	dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1
   143  
   144  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   145  		"start-relay -s $SOURCE_ID1 worker1" \
   146  		"\"result\": true" 2
   147  
   148  	echo "kill dm-worker1"
   149  	kill_process dm-worker1
   150  	check_port_offline $WORKER1_PORT 20
   151  
   152  	export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/pkg/conn/failDBPing=return()"
   153  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   154  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   155  
   156  	# make sure DM-worker doesn't exit
   157  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   158  		"query-status -s $SOURCE_ID1" \
   159  		"injected error" 1
   160  	echo ">>>>>>>>>>>>>>>>>>>>>>>>>>test test_cant_dail_upstream passed"
   161  }
   162  
   163  function test_cant_dail_downstream() {
   164  	cleanup_process
   165  	cleanup_data_and_init_key
   166  	export GO_FAILPOINTS=""
   167  
   168  	run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml
   169  	check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT
   170  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   171  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   172  
   173  	cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml
   174  	dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1
   175  
   176  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   177  		"start-relay -s $SOURCE_ID1 worker1" \
   178  		"\"result\": true" 2
   179  	dmctl_start_task_standalone $cur/conf/dm-task.yaml "--remove-meta"
   180  
   181  	echo "kill dm-worker1"
   182  	kill_process dm-worker1
   183  	check_port_offline $WORKER1_PORT 20
   184  	# kill tidb
   185  	pkill -hup tidb-server 2>/dev/null || true
   186  	wait_process_exit tidb-server
   187  
   188  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   189  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   190  
   191  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   192  		"query-status -s $SOURCE_ID1" \
   193  		"\"relayCatchUpMaster\": true" 1 \
   194  		"dial tcp 127.0.0.1:4000: connect: connection refused" 1
   195  
   196  	# restart tidb
   197  	run_tidb_server 4000 $TIDB_PASSWORD
   198  	echo ">>>>>>>>>>>>>>>>>>>>>>>>>>test test_cant_dail_downstream passed"
   199  }
   200  
   201  function test_kill_dump_connection() {
   202  	cleanup_process
   203  	cleanup_data_and_init_key
   204  
   205  	run_sql_file $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1
   206  	check_contains 'Query OK, 2 rows affected'
   207  
   208  	run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml
   209  	check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT
   210  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   211  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   212  
   213  	cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml
   214  	dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1
   215  
   216  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   217  		"start-relay -s $SOURCE_ID1 worker1" \
   218  		"\"result\": true" 2
   219  
   220  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   221  		"query-status -s $SOURCE_ID1" \
   222  		"\"result\": true" 2 \
   223  		"\"worker\": \"worker1\"" 1
   224  	run_sql_source1 "show processlist"
   225  
   226  	# kill dump connection to test whether relay will auto reconnect db
   227  	dump_conn_id=$(cat $TEST_DIR/sql_res.$TEST_NAME.txt | grep Binlog -B 4 | grep Id | cut -d : -f2)
   228  	run_sql_source1 "kill ${dump_conn_id}"
   229  
   230  	run_sql_file $cur/data/db1.increment.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1
   231  
   232  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   233  		"query-status -s $SOURCE_ID1" \
   234  		"\"relayCatchUpMaster\": true" 1
   235  	echo ">>>>>>>>>>>>>>>>>>>>>>>>>>test test_kill_dump_connection passed"
   236  }
   237  
   238  function test_relay_operations() {
   239  	cleanup_process
   240  	cleanup_data_and_init_key
   241  
   242  	export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/relay/ReportRelayLogSpaceInBackground=return(1)"
   243  
   244  	run_sql_file $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1
   245  	check_contains 'Query OK, 2 rows affected'
   246  
   247  	# set log level of DM-master to info, because debug level will let etcd print KV, thus expose the password in task config
   248  	run_dm_master_info_log $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml
   249  	check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT
   250  	check_metric $MASTER_PORT 'start_leader_counter' 3 0 2
   251  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   252  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   253  	run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml
   254  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT
   255  	run_dm_worker $WORK_DIR/worker3 $WORKER3_PORT $cur/conf/dm-worker3.toml
   256  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER3_PORT
   257  
   258  	cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml
   259  	dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1
   260  
   261  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   262  		"start-relay -s $SOURCE_ID1 worker1 worker2" \
   263  		"\"result\": true" 3
   264  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   265  		"transfer-source $SOURCE_ID1 worker1" \
   266  		"\"result\": true" 1
   267  
   268  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   269  		"query-status -s $SOURCE_ID1" \
   270  		"\"result\": true" 3 \
   271  		"\"worker\": \"worker1\"" 1 \
   272  		"\"worker\": \"worker2\"" 1
   273  
   274  	# worker1 and worker2 has one relay job and worker3 have none.
   275  	check_metric $WORKER1_PORT "dm_relay_binlog_file{node=\"relay\"}" 3 0 2
   276  	check_metric $WORKER1_PORT "dm_relay_exit_with_error_count{resumable_err=\"true\"}" 3 -1 1
   277  	check_metric $WORKER1_PORT "dm_relay_exit_with_error_count{resumable_err=\"false\"}" 3 -1 1
   278  	check_metric $WORKER2_PORT "dm_relay_binlog_file{node=\"relay\"}" 3 0 2
   279  	check_metric $WORKER2_PORT "dm_relay_exit_with_error_count{resumable_err=\"true\"}" 3 -1 1
   280  	check_metric $WORKER2_PORT "dm_relay_exit_with_error_count{resumable_err=\"false\"}" 3 -1 1
   281  	check_metric_not_contains $WORKER3_PORT "dm_relay_binlog_file" 3
   282  
   283  	dmctl_start_task_standalone $cur/conf/dm-task.yaml "--remove-meta"
   284  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   285  
   286  	run_sql_file $cur/data/db1.increment.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1
   287  	# wait syncer begin to sync so it has deleted load task etcd KV.
   288  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   289  
   290  	# relay task transfer to worker1 with no error.
   291  	check_metric $WORKER1_PORT "dm_relay_data_corruption" 3 -1 1
   292  	check_metric $WORKER1_PORT "dm_relay_read_error_count" 3 -1 1
   293  	check_metric $WORKER1_PORT "dm_relay_write_error_count" 3 -1 1
   294  	# check worker relay space great than 0 9223372036854775807 is 2**63 -1
   295  	check_metric $WORKER1_PORT 'dm_relay_space{type="available"}' 5 0 9223372036854775807
   296  
   297  	# subtask is preferred to scheduled to another relay worker
   298  	echo "kill dm-worker1"
   299  	kill_process dm-worker1
   300  	check_port_offline $WORKER1_PORT 20
   301  	# worker1 is down, worker2 has running relay and sync unit
   302  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   303  		"query-status -s $SOURCE_ID1" \
   304  		"connect: connection refused" 1 \
   305  		"\"stage\": \"Running\"" 2
   306  
   307  	run_sql_file $cur/data/db1.increment2.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1
   308  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   309  
   310  	# after restarting, worker will purge relay log directory because checkpoint is newer than relay.meta
   311  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   312  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   313  
   314  	run_sql_file $cur/data/db1.increment3.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1
   315  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   316  
   317  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   318  		"query-status -s $SOURCE_ID1" \
   319  		"\"result\": true" 3 \
   320  		"\"worker\": \"worker1\"" 1 \
   321  		"\"worker\": \"worker2\"" 1
   322  
   323  	# test purge-relay for all relay workers
   324  	run_sql_source1 "show binary logs\G"
   325  	max_binlog_name=$(grep Log_name "$SQL_RESULT_FILE" | tail -n 1 | awk -F":" '{print $NF}')
   326  	server_uuid_1=$(tail -n 1 $WORK_DIR/worker1/relay-dir/server-uuid.index)
   327  	relay_log_count_1=$(($(ls $WORK_DIR/worker1/relay-dir/$server_uuid_1 | wc -l) - 1))
   328  	server_uuid_2=$(tail -n 1 $WORK_DIR/worker2/relay-dir/server-uuid.index)
   329  	relay_log_count_2=$(($(ls $WORK_DIR/worker2/relay-dir/$server_uuid_2 | wc -l) - 1))
   330  	[ "$relay_log_count_1" -ne 1 ]
   331  	[ "$relay_log_count_2" -ne 1 ]
   332  	sleep 1
   333  	run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   334  		"purge-relay --filename $max_binlog_name -s $SOURCE_ID1" \
   335  		"\"result\": true" 3
   336  	new_relay_log_count_1=$(($(ls $WORK_DIR/worker1/relay-dir/$server_uuid_1 | wc -l) - 1))
   337  	new_relay_log_count_2=$(($(ls $WORK_DIR/worker2/relay-dir/$server_uuid_2 | wc -l) - 1))
   338  	[ "$new_relay_log_count_1" -eq 1 ]
   339  	[ "$new_relay_log_count_2" -eq 1 ]
   340  
   341  	echo "kill dm-worker1"
   342  	kill_process dm-worker1
   343  	check_port_offline $WORKER1_PORT 20
   344  	echo "kill dm-worker2"
   345  	kill_process dm-worker2
   346  	check_port_offline $WORKER1_PORT 20
   347  	# if all relay workers are offline, relay-not-enabled worker should continue to sync
   348  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   349  		"query-status -s $SOURCE_ID1" \
   350  		"\"result\": true" 2 \
   351  		"\"worker\": \"worker3\"" 1
   352  
   353  	run_sql_file $cur/data/db1.increment4.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1
   354  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   355  
   356  	# config export
   357  	run_dm_ctl_cmd_mode $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   358  		"config export -p /tmp/configs" \
   359  		"export configs to directory .* succeed" 1
   360  
   361  	# check configs
   362  	sed '/password/d' /tmp/configs/tasks/test.yaml | diff $cur/configs/tasks/test.yaml - || exit 1
   363  	sed '/password/d' /tmp/configs/sources/mysql-replica-01.yaml | diff -I '^case-sensitive' $cur/configs/sources/mysql-replica-01.yaml - || exit 1
   364  	diff <(jq --sort-keys . /tmp/configs/relay_workers.json) <(jq --sort-keys . $cur/configs/relay_workers.json) || exit 1
   365  
   366  	echo "check no password in log"
   367  	check_log_not_contains $WORK_DIR/master/log/dm-master.log "/Q7B9DizNLLTTfiZHv9WoEAKamfpIUs="
   368  	check_log_not_contains $WORK_DIR/worker1/log/dm-worker.log "/Q7B9DizNLLTTfiZHv9WoEAKamfpIUs="
   369  	check_log_not_contains $WORK_DIR/worker2/log/dm-worker.log "/Q7B9DizNLLTTfiZHv9WoEAKamfpIUs="
   370  	check_log_not_contains $WORK_DIR/worker3/log/dm-worker.log "/Q7B9DizNLLTTfiZHv9WoEAKamfpIUs="
   371  	check_log_not_contains $WORK_DIR/master/log/dm-master.log "123456"
   372  	check_log_not_contains $WORK_DIR/worker1/log/dm-worker.log "123456"
   373  	check_log_not_contains $WORK_DIR/worker2/log/dm-worker.log "123456"
   374  	check_log_not_contains $WORK_DIR/worker3/log/dm-worker.log "123456"
   375  
   376  	# destroy cluster
   377  	cleanup_process $*
   378  	cleanup_data_and_init_key
   379  
   380  	# insert new data
   381  	run_sql_file $cur/data/db1.increment5.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1
   382  
   383  	# deploy new cluster
   384  	run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml
   385  	check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT
   386  	run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
   387  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
   388  	run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml
   389  	check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT
   390  
   391  	# import configs
   392  	run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   393  		"config import -p /tmp/configs" \
   394  		"creating sources" 1 \
   395  		"creating tasks" 1 \
   396  		"The original relay workers have been exported to" 1 \
   397  		"Currently DM doesn't support recover relay workers.*transfer-source.*start-relay" 1
   398  
   399  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   400  		"operate-source show" \
   401  		"mysql-replica-01" 1
   402  	run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
   403  		"query-status -s $SOURCE_ID1" \
   404  		"\"result\": true" 2
   405  
   406  	check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
   407  	echo ">>>>>>>>>>>>>>>>>>>>>>>>>>test test_relay_operations passed"
   408  }
   409  
   410  function run() {
   411  	test_relay_leak
   412  	test_relay_operations
   413  	test_cant_dail_upstream
   414  	test_restart_relay_status
   415  	test_cant_dail_downstream
   416  	test_kill_dump_connection
   417  }
   418  
   419  cleanup_data $TEST_NAME
   420  # also cleanup dm processes in case of last run failed
   421  cleanup_process
   422  run
   423  cleanup_process
   424  
   425  echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>"