github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/tests/integration_tests/batch_add_table/run.sh (about) 1 #!/bin/bash 2 3 set -eu 4 5 CUR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) 6 source $CUR/../_utils/test_prepare 7 WORK_DIR=$OUT_DIR/$TEST_NAME 8 CDC_BINARY=cdc.test 9 SINK_TYPE=$1 10 11 function run_with_fast_create_table() { 12 rm -rf $WORK_DIR && mkdir -p $WORK_DIR 13 14 start_tidb_cluster --workdir $WORK_DIR 15 16 cd $WORK_DIR 17 18 run_sql "set global tidb_enable_fast_create_table=on" ${UP_TIDB_HOST} ${UP_TIDB_PORT} 19 20 run_sql_file $CUR/data/prepare.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} 21 run_sql_file $CUR/data/prepare.sql ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT} 22 23 run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY 24 25 TOPIC_NAME="ticdc-batch-add-table-test-$RANDOM" 26 case $SINK_TYPE in 27 kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; 28 storage) SINK_URI="file://$WORK_DIR/storage_test/$TOPIC_NAME?protocol=canal-json&enable-tidb-extension=true" ;; 29 pulsar) 30 run_pulsar_cluster $WORK_DIR normal 31 SINK_URI="pulsar://127.0.0.1:6650/$TOPIC_NAME?protocol=canal-json&enable-tidb-extension=true" 32 ;; 33 *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; 34 esac 35 run_cdc_cli changefeed create --sink-uri="$SINK_URI" 36 case $SINK_TYPE in 37 kafka) run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; 38 storage) run_storage_consumer $WORK_DIR $SINK_URI "" "" ;; 39 pulsar) run_pulsar_consumer --upstream-uri $SINK_URI ;; 40 esac 41 42 run_sql_file $CUR/data/test.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} 43 # sync_diff can't check non-exist table, so we check expected tables are created in downstream first 44 check_table_exists batch_add_table.finish_mark ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT} 45 check_sync_diff $WORK_DIR $CUR/conf/diff_config.toml 46 47 cleanup_process $CDC_BINARY 48 } 49 50 function run_without_fast_create_table() { 51 rm -rf $WORK_DIR && mkdir -p $WORK_DIR 52 53 start_tidb_cluster --workdir $WORK_DIR 54 55 cd $WORK_DIR 56 57 run_sql "set global tidb_enable_fast_create_table=off" ${UP_TIDB_HOST} ${UP_TIDB_PORT} 58 59 run_sql_file $CUR/data/prepare.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} 60 run_sql_file $CUR/data/prepare.sql ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT} 61 62 run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY 63 64 TOPIC_NAME="ticdc-batch-add-table-test-$RANDOM" 65 case $SINK_TYPE in 66 kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; 67 storage) SINK_URI="file://$WORK_DIR/storage_test/$TOPIC_NAME?protocol=canal-json&enable-tidb-extension=true" ;; 68 pulsar) 69 run_pulsar_cluster $WORK_DIR normal 70 SINK_URI="pulsar://127.0.0.1:6650/$TOPIC_NAME?protocol=canal-json&enable-tidb-extension=true" 71 ;; 72 *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; 73 esac 74 run_cdc_cli changefeed create --sink-uri="$SINK_URI" 75 case $SINK_TYPE in 76 kafka) run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; 77 storage) run_storage_consumer $WORK_DIR $SINK_URI "" "" ;; 78 pulsar) run_pulsar_consumer --upstream-uri $SINK_URI ;; 79 esac 80 81 run_sql_file $CUR/data/test.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} 82 # sync_diff can't check non-exist table, so we check expected tables are created in downstream first 83 check_table_exists batch_add_table.finish_mark ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT} 84 check_sync_diff $WORK_DIR $CUR/conf/diff_config.toml 85 86 cleanup_process $CDC_BINARY 87 } 88 89 trap stop_tidb_cluster EXIT 90 run_without_fast_create_table $* 91 stop_tidb_cluster 92 run_with_fast_create_table $* 93 check_logs $WORK_DIR 94 echo "[$(date)] <<<<<< run test case $TEST_NAME success! >>>>>>"