github.com/pingcap/ticdc@v0.0.0-20220526033649-485a10ef2652/tests/cdclog_s3/run.sh (about) 1 #!/bin/bash 2 3 set -e 4 5 CUR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) 6 source $CUR/../_utils/test_prepare 7 WORK_DIR=$OUT_DIR/$TEST_NAME 8 CDC_BINARY=cdc.test 9 10 # start the s3 server 11 export MINIO_ACCESS_KEY=cdcs3accesskey 12 export MINIO_SECRET_KEY=cdcs3secretkey 13 export MINIO_BROWSER=off 14 export AWS_ACCESS_KEY_ID=$MINIO_ACCESS_KEY 15 export AWS_SECRET_ACCESS_KEY=$MINIO_SECRET_KEY 16 export S3_ENDPOINT=127.0.0.1:24927 17 rm -rf "$WORK_DIR" 18 mkdir -p "$WORK_DIR" 19 pkill -9 minio || true 20 bin/minio server --address $S3_ENDPOINT "$WORK_DIR/s3" & 21 MINIO_PID=$! 22 i=0 23 while ! curl -o /dev/null -v -s "http://$S3_ENDPOINT/"; do 24 i=$(($i+1)) 25 if [ $i -gt 7 ]; then 26 echo 'Failed to start minio' 27 exit 1 28 fi 29 sleep 2 30 done 31 32 stop_minio() { 33 kill -2 $MINIO_PID 34 } 35 36 stop() { 37 stop_minio 38 stop_tidb_cluster 39 } 40 41 s3cmd --access_key=$MINIO_ACCESS_KEY --secret_key=$MINIO_SECRET_KEY --host=$S3_ENDPOINT --host-bucket=$S3_ENDPOINT --no-ssl mb s3://logbucket 42 43 function prepare() { 44 stop_tidb_cluster 45 start_tidb_cluster --workdir $WORK_DIR 46 47 cd $WORK_DIR 48 49 # record tso before we create tables to skip the system table DDLs 50 start_ts=$(run_cdc_cli tso query --pd=http://$UP_PD_HOST_1:$UP_PD_PORT_1) 51 52 run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY 53 54 } 55 56 success=0 57 function check_cdclog() { 58 DATA_DIR="$WORK_DIR/s3/logbucket/test" 59 # retrieve table id by log meta 60 if [ ! -f $DATA_DIR/log.meta ]; then 61 return 62 fi 63 table_id=$(cat $DATA_DIR/log.meta | jq | grep t1 | awk -F '"' '{print $2}') 64 if [ ! -d $DATA_DIR/t_$table_id ]; then 65 return 66 fi 67 file_count=$(ls -ahl $DATA_DIR/t_$table_id | grep cdclog | wc -l) 68 if [[ ! "$file_count" -eq "2" ]]; then 69 echo "$TEST_NAME failed, expect 2 row changed files, obtain $file_count" 70 return 71 fi 72 if [ ! -d $DATA_DIR/ddls ]; then 73 return 74 fi 75 ddl_file_count=$(ls -ahl $DATA_DIR/ddls | grep ddl | wc -l) 76 if [[ ! "$ddl_file_count" -eq "1" ]]; then 77 echo "$TEST_NAME failed, expect 1 ddl file, obtain $ddl_file_count" 78 return 79 fi 80 success=1 81 } 82 83 function cdclog_test() { 84 run_sql "drop database if exists $TEST_NAME" 85 run_sql "create database $TEST_NAME" 86 run_sql "create table $TEST_NAME.t1 (c0 int primary key, payload varchar(1024));" 87 88 SINK_URI="s3://logbucket/test?endpoint=http://$S3_ENDPOINT/" 89 90 run_cdc_cli changefeed create --start-ts=0 --sink-uri="$SINK_URI" 91 92 run_sql "create table $TEST_NAME.t2 (c0 int primary key, payload varchar(1024));" 93 94 run_sql "insert into $TEST_NAME.t1 values (1, 'a')" 95 # because flush row changed events interval is 5 second 96 # so sleep 20 second will generate two files 97 sleep 20 98 run_sql "insert into $TEST_NAME.t1 values (2, 'b')" 99 100 i=0 101 while [ $i -lt 30 ] 102 do 103 check_cdclog 104 if [ "$success" == 1 ]; then 105 echo "check log successfully" 106 break 107 fi 108 i=$(( $i + 1 )) 109 echo "check log failed $i-th time, retry later" 110 sleep 2 111 done 112 cleanup_process $CDC_BINARY 113 } 114 115 trap stop EXIT 116 prepare $* 117 cdclog_test $* 118 check_logs $WORK_DIR 119 echo "[$(date)] <<<<<< run test case $TEST_NAME success! >>>>>>"