github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/tests/integration_tests/lossy_ddl/run.sh (about) 1 #!/bin/bash 2 3 set -eu 4 5 CUR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) 6 source $CUR/../_utils/test_prepare 7 WORK_DIR=$OUT_DIR/$TEST_NAME 8 CDC_BINARY=cdc.test 9 SINK_TYPE=$1 10 11 MAX_RETRIES=10 12 13 # Because we want the lossy DDL to not cause any data updates, so we can check the 14 # data in the blackhole sink to see if any row is updated. 15 function check_lossy_ddl() { 16 # Check finish_mark is written to the log. 17 is_finish_mark_exist=$(grep "BlackHoleSink: DDL Event" "$1/cdc.log" | grep -c "finish_mark") 18 if [[ "$is_finish_mark_exist" -ne 1 ]]; then 19 echo "can't found finish mark" 20 exit 1 21 fi 22 23 row_logs=$(grep "BlackHoleSink: WriteEvents" "$1/cdc.log") 24 echo $row_logs 25 row_logs_count=$(grep "BlackHoleSink: WriteEvents" -c "$1/cdc.log") 26 if [[ "$row_logs_count" -ne 23 ]]; then 27 echo "can't found 23 row logs, got $row_logs_count" 28 exit 1 29 fi 30 } 31 32 export -f check_lossy_ddl 33 34 function run() { 35 # Use blackhole sink to check if the DDL is lossy. 36 # So no need to run this test for other sinks. 37 if [ "$SINK_TYPE" != "storage" ]; then 38 return 39 fi 40 41 rm -rf $WORK_DIR && mkdir -p $WORK_DIR 42 start_tidb_cluster --workdir $WORK_DIR 43 cd $WORK_DIR 44 45 pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1" 46 SINK_URI="blackhole://" 47 48 run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --addr "127.0.0.1:8300" --pd $pd_addr 49 cdc cli changefeed create --pd=$pd_addr --sink-uri="$SINK_URI" 50 51 run_sql_file $CUR/data/prepare.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} 52 53 ensure $MAX_RETRIES check_lossy_ddl $WORK_DIR 54 cleanup_process $CDC_BINARY 55 } 56 57 trap stop_tidb_cluster EXIT 58 run $* 59 echo "[$(date)] <<<<<< run test case $TEST_NAME success! >>>>>>"