github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/tests/integration_tests/ddl_only_block_related_table/run.sh (about)

     1  #!/bin/bash
     2  
     3  set -eu
     4  
     5  CUR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
     6  source $CUR/../_utils/test_prepare
     7  WORK_DIR=$OUT_DIR/$TEST_NAME
     8  CDC_BINARY=cdc.test
     9  SINK_TYPE=$1
    10  
    11  function check_ts_not_forward() {
    12  	changefeed_id=$1
    13  	ts1=$(cdc cli changefeed query -c "$changefeed_id" | jq -r '.checkpoint_tso')
    14  	sleep 1
    15  	ts2=$(cdc cli changefeed query -c "$changefeed_id" | jq -r '.checkpoint_tso')
    16  	if [ "$ts1" == "$ts2" ]; then
    17  		count=0
    18  		while [ "$ts1" == "$ts2" ]; do
    19  			sleep 1
    20  			ts2=$(cdc cli changefeed query -c "$changefeed_id" | jq -r '.checkpoint_tso')
    21  			((count++))
    22  			if [ $count -ge 10 ]; then
    23  				echo "pass check, checkpoint tso not forward after 10s"
    24  				return
    25  			fi
    26  		done
    27  	fi
    28  	exit 1
    29  }
    30  
    31  function check_ts_forward() {
    32  	changefeedid=$1
    33  	rts1=$(cdc cli changefeed query --changefeed-id=${changefeedid} 2>&1 | jq '.resolved_ts')
    34  	checkpoint1=$(cdc cli changefeed query --changefeed-id=${changefeedid} 2>&1 | jq '.checkpoint_tso')
    35  	sleep 1
    36  	rts2=$(cdc cli changefeed query --changefeed-id=${changefeedid} 2>&1 | jq '.resolved_ts')
    37  	checkpoint2=$(cdc cli changefeed query --changefeed-id=${changefeedid} 2>&1 | jq '.checkpoint_tso')
    38  	if [[ "$rts1" != "null" ]] && [[ "$rts1" != "0" ]]; then
    39  		if [[ "$rts1" -ne "$rts2" ]] || [[ "$checkpoint1" -ne "$checkpoint2" ]]; then
    40  			echo "changefeed is working normally rts: ${rts1}->${rts2} checkpoint: ${checkpoint1}->${checkpoint2}"
    41  			return
    42  		fi
    43  	fi
    44  	exit 1
    45  }
    46  
    47  export -f check_ts_not_forward
    48  export -f check_ts_forward
    49  
    50  function run() {
    51  	rm -rf $WORK_DIR && mkdir -p $WORK_DIR
    52  
    53  	start_tidb_cluster --workdir $WORK_DIR
    54  
    55  	cd $WORK_DIR
    56  
    57  	run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY
    58  	owner_pid=$(ps -C $CDC_BINARY -o pid= | awk '{print $1}')
    59  
    60  	# this test contains `recover table`, which requires super privilege, so we
    61  	# can't use the normal user
    62  	TOPIC_NAME="ticdc-common-1-test-$RANDOM"
    63  	case $SINK_TYPE in
    64  	kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;;
    65  	storage) SINK_URI="file://$WORK_DIR/storage_test/$TOPIC_NAME?protocol=canal-json&enable-tidb-extension=true" ;;
    66  	pulsar)
    67  		run_pulsar_cluster $WORK_DIR normal
    68  		SINK_URI="pulsar://127.0.0.1:6650/$TOPIC_NAME?protocol=canal-json&enable-tidb-extension=true"
    69  		;;
    70  	*) SINK_URI="mysql://root@127.0.0.1:3306/" ;;
    71  	esac
    72  	changefeed_id="ddl-only-block-related-table"
    73  	run_cdc_cli changefeed create --sink-uri="$SINK_URI" -c=${changefeed_id}
    74  
    75  	case $SINK_TYPE in
    76  	kafka) run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" ;;
    77  	storage) run_storage_consumer $WORK_DIR $SINK_URI "" "" ;;
    78  	pulsar) run_pulsar_consumer --upstream-uri $SINK_URI ;;
    79  	esac
    80  
    81  	run_sql_file $CUR/data/start.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT}
    82  
    83  	check_table_exists ddl_only_block_related_table.finish_mark ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT}
    84  
    85  	kill_cdc_pid $owner_pid
    86  	export GO_FAILPOINTS='github.com/pingcap/tiflow/cdc/owner/ExecuteNotDone=return(true)'
    87  	run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY
    88  	owner_pid=$(ps -C $CDC_BINARY -o pid= | awk '{print $1}')
    89  
    90  	run_sql_file $CUR/data/finishe.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT}
    91  	# make sure t1,t2 are equal in upstream and downstream
    92  	# we should not check this if sink type is kafka, since the checkpoint is not advance
    93  	# so the kafka consumer will not consume the dmls of t1,t2 behind the stuck DDL's commitTs
    94  	# the next check diff in line 69 will check the eventual consistency of all tables
    95  	# and it is enough to ensure the correctness of the test
    96  	if [ "$SINK_TYPE" == "mysql" ]; then
    97  		check_sync_diff $WORK_DIR $CUR/conf/diff_config.toml 90
    98  	fi
    99  	# check checkpoint does not advance
   100  	ensure 30 check_ts_not_forward $changefeed_id
   101  
   102  	kill_cdc_pid $owner_pid
   103  	# clear failpoint, so the `ddl_not_done` table can advance
   104  	export GO_FAILPOINTS=''
   105  	run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY
   106  	# make sure all tables are equal in upstream and downstream
   107  	check_sync_diff $WORK_DIR $CUR/conf/diff_config_2.toml 90
   108  
   109  	# check checkpoint advance
   110  	ensure 20 check_ts_forward $changefeed_id
   111  
   112  	cleanup_process $CDC_BINARY
   113  }
   114  
   115  trap stop_tidb_cluster EXIT
   116  run $*
   117  check_logs $WORK_DIR
   118  echo "[$(date)] <<<<<< run test case $TEST_NAME success! >>>>>>"