github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/tests/integration_tests/region_merge/run.sh (about) 1 #!/bin/bash 2 3 set -eu 4 5 CUR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) 6 source $CUR/../_utils/test_prepare 7 WORK_DIR=$OUT_DIR/$TEST_NAME 8 CDC_BINARY=cdc.test 9 SINK_TYPE=$1 10 11 function split_and_random_merge() { 12 pd_addr=$1 13 scale=$2 14 echo "split_and_random_merge scale: $scale" 15 run_sql "SPLIT TABLE region_merge.t1 BETWEEN (-9223372036854775808) AND (9223372036854775807) REGIONS $scale;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} || true 16 run_sql "SELECT count(distinct region_id) from information_schema.tikv_region_status where db_name = 'region_merge' and table_name = 't1';" && 17 cat $OUT_DIR/sql_res.region_merge.txt 18 run_sql "insert into region_merge.t1 values (-9223372036854775808),(0),(1),(9223372036854775807);" ${UP_TIDB_HOST} ${UP_TIDB_PORT} 19 run_sql "delete from region_merge.t1;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} 20 # sleep 5s to wait some region merge 21 sleep 5 22 } 23 24 large_scale=(100 200 400 800 1600 3200 6400 12800 25600 51200) 25 small_scale=(20 40 80) 26 # in CI, we use the small data set 27 test_scale=("${small_scale[@]}") 28 29 function run() { 30 rm -rf $WORK_DIR && mkdir -p $WORK_DIR 31 32 start_tidb_cluster --workdir $WORK_DIR --multiple-upstream-pd true --pd-config $CUR/conf/pd_config.toml 33 34 cd $WORK_DIR 35 36 pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1" 37 run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY 38 TOPIC_NAME="ticdc-region-merge-test-$RANDOM" 39 case $SINK_TYPE in 40 kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; 41 storage) SINK_URI="file://$WORK_DIR/storage_test/$TOPIC_NAME?protocol=canal-json&enable-tidb-extension=true" ;; 42 pulsar) 43 run_pulsar_cluster $WORK_DIR mtls 44 SINK_URI="pulsar+ssl://127.0.0.1:6651/$TOPIC_NAME?protocol=canal-json&enable-tidb-extension=true" 45 ;; 46 *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; 47 esac 48 if [ "$SINK_TYPE" == "pulsar" ]; then 49 cat <<EOF >>$WORK_DIR/pulsar_test.toml 50 [sink.pulsar-config] 51 tls-trust-certs-file-path="${WORK_DIR}/ca.cert.pem" 52 auth-tls-private-key-path="${WORK_DIR}/broker_client.key-pk8.pem" 53 auth-tls-certificate-path="${WORK_DIR}/broker_client.cert.pem" 54 EOF 55 cdc cli changefeed create --sink-uri="$SINK_URI" --config=$WORK_DIR/pulsar_test.toml 56 else 57 cdc cli changefeed create --sink-uri="$SINK_URI" 58 fi 59 case $SINK_TYPE in 60 kafka) run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; 61 storage) run_storage_consumer $WORK_DIR $SINK_URI "" "" ;; 62 pulsar) run_pulsar_consumer --upstream-uri $SINK_URI --ca "${WORK_DIR}/ca.cert.pem" --auth-tls-private-key-path "${WORK_DIR}/broker_client.key-pk8.pem" --auth-tls-certificate-path="${WORK_DIR}/broker_client.cert.pem" ;; 63 esac 64 65 # set max_execution_time to 30s, because split region could block even region has been split. 66 run_sql "SET @@global.MAX_EXECUTION_TIME = 30000;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} 67 run_sql "CREATE DATABASE region_merge;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} 68 run_sql "CREATE TABLE region_merge.t1 (id bigint primary key);" ${UP_TIDB_HOST} ${UP_TIDB_PORT} 69 70 for scale in "${test_scale[@]}"; do 71 split_and_random_merge $pd_addr $scale 72 done 73 74 run_sql "insert into region_merge.t1 values (-9223372036854775808),(0),(1),(9223372036854775807);" ${UP_TIDB_HOST} ${UP_TIDB_PORT} 75 check_table_exists region_merge.t1 ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT} 76 check_sync_diff $WORK_DIR $CUR/conf/diff_config.toml 77 78 cleanup_process $CDC_BINARY 79 } 80 81 trap stop_tidb_cluster EXIT 82 run $* 83 check_logs $WORK_DIR 84 echo "[$(date)] <<<<<< run test case $TEST_NAME success! >>>>>>"