github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/tests/integration_tests/changefeed_reconstruct/run.sh (about) 1 #!/bin/bash 2 3 set -eu 4 5 CUR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) 6 source $CUR/../_utils/test_prepare 7 WORK_DIR=$OUT_DIR/$TEST_NAME 8 CDC_BINARY=cdc.test 9 SINK_TYPE=$1 10 MAX_RETRIES=10 11 12 function check_processor_table_count() { 13 pd=$1 14 changefeed=$2 15 capture=$3 16 expected=$4 17 count=$(cdc cli processor query --pd=$pd -c $changefeed -p $capture 2>&1 | jq '.status."tables"|length') 18 if [[ ! "$count" -eq "$expected" ]]; then 19 echo "table count $count does equal to expected count $expected" 20 exit 1 21 fi 22 } 23 24 function check_no_capture() { 25 pd=$1 26 count=$(cdc cli capture list --pd=$pd 2>&1 | jq '.|length') 27 if [[ ! "$count" -eq "0" ]]; then 28 exit 1 29 fi 30 } 31 32 export -f check_processor_table_count 33 export -f check_no_capture 34 35 function run() { 36 rm -rf $WORK_DIR && mkdir -p $WORK_DIR 37 start_tidb_cluster --workdir $WORK_DIR 38 cd $WORK_DIR 39 40 pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1" 41 TOPIC_NAME="ticdc-changefeed-reconstruct-$RANDOM" 42 case $SINK_TYPE in 43 kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; 44 storage) SINK_URI="file://$WORK_DIR/storage_test/$TOPIC_NAME?protocol=canal-json&enable-tidb-extension=true" ;; 45 pulsar) 46 run_pulsar_cluster $WORK_DIR normal 47 SINK_URI="pulsar://127.0.0.1:6650/$TOPIC_NAME?protocol=canal-json&enable-tidb-extension=true" 48 ;; 49 *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; 50 esac 51 52 run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --addr "127.0.0.1:8300" --logsuffix server1 --pd $pd_addr 53 owner_pid=$(ps -C $CDC_BINARY -o pid= | awk '{print $1}') 54 changefeed_id=$(cdc cli changefeed create --pd=$pd_addr --sink-uri="$SINK_URI" 2>&1 | tail -n2 | head -n1 | awk '{print $2}') 55 case $SINK_TYPE in 56 kafka) run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; 57 storage) run_storage_consumer $WORK_DIR $SINK_URI "" "" ;; 58 pulsar) run_pulsar_consumer --upstream-uri $SINK_URI ;; 59 esac 60 61 run_sql "CREATE DATABASE changefeed_reconstruct;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} 62 go-ycsb load mysql -P $CUR/conf/workload -p mysql.host=${UP_TIDB_HOST} -p mysql.port=${UP_TIDB_PORT} -p mysql.user=root -p mysql.db=changefeed_reconstruct 63 check_table_exists "changefeed_reconstruct.usertable" ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT} 64 check_sync_diff $WORK_DIR $CUR/conf/diff_config.toml 65 66 # kill capture 67 kill_cdc_pid $owner_pid 68 ensure $MAX_RETRIES check_no_capture $pd_addr 69 70 # run another cdc server 71 run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --addr "127.0.0.1:8300" --logsuffix server2 72 ensure $MAX_RETRIES "$CDC_BINARY cli capture list --pd=$pd_addr 2>&1 | grep id" 73 capture_id=$($CDC_BINARY cli --pd=$pd_addr capture list 2>&1 | awk -F '"' '/\"id/{print $4}') 74 echo "capture_id:" $capture_id 75 76 # check table has been dispatched to new capture 77 ensure $MAX_RETRIES check_processor_table_count $pd_addr $changefeed_id $capture_id 1 78 run_sql "DROP DATABASE changefeed_reconstruct;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} 79 # check table has been removed from processor 80 ensure $MAX_RETRIES check_processor_table_count $pd_addr $changefeed_id $capture_id 0 81 82 run_sql "CREATE DATABASE changefeed_reconstruct;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} 83 go-ycsb load mysql -P $CUR/conf/workload -p mysql.host=${UP_TIDB_HOST} -p mysql.port=${UP_TIDB_PORT} -p mysql.user=root -p mysql.db=changefeed_reconstruct 84 check_table_exists "changefeed_reconstruct.usertable" ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT} 85 check_sync_diff $WORK_DIR $CUR/conf/diff_config.toml 86 87 cleanup_process $CDC_BINARY 88 } 89 90 trap stop_tidb_cluster EXIT 91 run $* 92 check_logs $WORK_DIR 93 echo "[$(date)] <<<<<< run test case $TEST_NAME success! >>>>>>"