github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/engine/test/integration_tests/dm_many_tables/run.sh (about)

     1  #!/bin/bash
     2  
     3  set -eu
     4  
     5  WORK_DIR=$OUT_DIR/$TEST_NAME
     6  CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
     7  
     8  CONFIG="$DOCKER_COMPOSE_DIR/3m3e_with_s3.yaml $DOCKER_COMPOSE_DIR/dm_databases.yaml"
     9  CONFIG=$(adjust_config $OUT_DIR $TEST_NAME $CONFIG)
    10  echo "using adjusted configs to deploy cluster: $CONFIG"
    11  TABLE_NUM=500
    12  
    13  function run() {
    14  	start_engine_cluster $CONFIG
    15  	wait_mysql_online.sh --port 3306
    16  	wait_mysql_online.sh --port 4000
    17  
    18  	# prepare data
    19  	run_sql 'DROP DATABASE IF EXISTS dm_many_tables'
    20  	run_sql 'CREATE DATABASE dm_many_tables;'
    21  	for i in $(seq $TABLE_NUM); do
    22  		run_sql --quiet "CREATE TABLE dm_many_tables.t$i(i TINYINT, j INT UNIQUE KEY);"
    23  		for j in $(seq 2); do
    24  			run_sql --quiet "INSERT INTO dm_many_tables.t$i VALUES ($j,${j}000$j),($j,${j}001$j);"
    25  		done
    26  		# to make the tables have odd number of lines before 'ALTER TABLE' command, for check_sync_diff to work correctly
    27  		run_sql --quiet "INSERT INTO dm_many_tables.t$i VALUES (9, 90009);"
    28  	done
    29  
    30  	# create job & wait for job to enter load phase
    31  	job_id=$(create_job "DM" "$CUR_DIR/conf/job.yaml" "dm_many_tables")
    32  	exec_with_retry --count 500 "curl \"http://127.0.0.1:10245/api/v1/jobs/$job_id/status\" | tee /dev/stderr | jq -e '.task_status.\"mysql-01\".status.status | .finishedBytes > 0 and .finishedBytes < .totalBytes'"
    33  
    34  	# test autoresume
    35  	docker stop dm_downstream_tidb
    36  	exec_with_retry --count 20 "curl \"http://127.0.0.1:10245/api/v1/jobs/$job_id/status\" | tee /dev/stderr | jq -e '.task_status.\"mysql-01\".status | .unit == \"DMLoadTask\" and .stage == \"Error\"'"
    37  	docker start dm_downstream_tidb
    38  	docker restart server-executor-0 server-executor-1 server-executor-2
    39  
    40  	# wait jobMaster online
    41  	exec_with_retry --count 50 --interval_sec 10 "curl \"http://127.0.0.1:10245/api/v1/jobs/$job_id/status\" | grep 'job_id'"
    42  	exec_with_retry --count 500 "curl \"http://127.0.0.1:10245/api/v1/jobs/$job_id/status\" | tee /dev/stderr | jq -e '.task_status.\"mysql-01\".status.status | .finishedBytes > 0 and .finishedBytes < .totalBytes'"
    43  
    44  	# test pause and resume
    45  	exec_with_retry --count 20 "curl -X PUT \"http://127.0.0.1:10245/api/v1/jobs/$job_id/status\" -H 'Content-Type: application/json' -d '{\"op\": \"pause\"}'"
    46  	sleep 10
    47  	exec_with_retry --count 20 "curl \"http://127.0.0.1:10245/api/v1/jobs/$job_id/status\" | tee /dev/stderr | jq -e '.task_status.\"mysql-01\".status | .stage == \"Paused\"'"
    48  	exec_with_retry --count 20 "curl -X PUT \"http://127.0.0.1:10245/api/v1/jobs/$job_id/status\" -H 'Content-Type: application/json' -d '{\"op\": \"resume\"}'"
    49  	exec_with_retry --count 20 "curl \"http://127.0.0.1:10245/api/v1/jobs/$job_id/status\" | tee /dev/stderr | jq -e '.task_status.\"mysql-01\".status | .stage == \"Running\"'"
    50  
    51  	# wait for job finished and check data
    52  	exec_with_retry --count 50 --interval_sec 10 "curl \"http://127.0.0.1:10245/api/v1/jobs/$job_id\" | tee /dev/stderr | jq -e '.state == \"Finished\"'"
    53  	check_sync_diff $WORK_DIR $CUR_DIR/conf/diff_config.toml 1
    54  }
    55  
    56  function stop {
    57  	if [ ! -z $job_id ]; then
    58  		echo -e "\n\nquery job statu before stop dm_many_tables...\n"
    59  		curl "http://127.0.0.1:10245/api/v1/jobs/$job_id/status" || true
    60  		curl "http://127.0.0.1:10245/api/v1/jobs/$job_id" || true
    61  	fi
    62  	stop_engine_cluster $WORK_DIR $CONFIG
    63  }
    64  
    65  trap stop EXIT
    66  run $*
    67  echo "[$(date)] <<<<<< run test case $TEST_NAME success! >>>>>>"