github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/tests/integration_tests/cli_with_auth/run.sh (about)

     1  #!/bin/bash
     2  
     3  set -eu
     4  
     5  CUR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
     6  source $CUR/../_utils/test_prepare
     7  WORK_DIR=$OUT_DIR/$TEST_NAME
     8  CDC_BINARY=cdc.test
     9  SINK_TYPE=$1
    10  TLS_DIR=$(cd $CUR/../_certificates && pwd)
    11  
    12  export TICDC_USER=ticdc
    13  export TICDC_PASSWORD=ticdc_secret
    14  
    15  function check_changefeed_count() {
    16  	pd_addr=$1
    17  	expected=$2
    18  	feed_count=$(cdc cli changefeed list --pd=$pd_addr | jq '.|length')
    19  	if [[ "$feed_count" != "$expected" ]]; then
    20  		echo "[$(date)] <<<<< unexpect changefeed count! expect ${expected} got ${feed_count} >>>>>"
    21  		exit 1
    22  	fi
    23  	echo "changefeed count ${feed_count} check pass, pd_addr: $pd_addr"
    24  }
    25  
    26  function run() {
    27  	rm -rf $WORK_DIR && mkdir -p $WORK_DIR
    28  
    29  	start_tidb_cluster --workdir $WORK_DIR --multiple-upstream-pd true
    30  	run_sql "CREATE USER 'ticdc'@'%' IDENTIFIED BY 'ticdc_secret';"
    31  
    32  	cd $WORK_DIR
    33  	pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1"
    34  
    35  	# record tso before we create tables to skip the system table DDLs
    36  	start_ts=$(run_cdc_cli_tso_query ${UP_PD_HOST_1} ${UP_PD_PORT_1})
    37  	run_sql "CREATE table test.simple(id int primary key, val int);"
    38  	run_sql "CREATE table test.\`simple-dash\`(id int primary key, val int);"
    39  
    40  	echo " \
    41    [security]
    42     client-user-required = true
    43     client-allowed-user = [\"ticdc\"]
    44    " >$WORK_DIR/server.toml
    45  
    46  	run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --config "$WORK_DIR/server.toml"
    47  
    48  	TOPIC_NAME="ticdc-cli-test-$RANDOM"
    49  	case $SINK_TYPE in
    50  	kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;;
    51  	storage) SINK_URI="file://$WORK_DIR/storage_test/$TOPIC_NAME?protocol=canal-json&enable-tidb-extension=true" ;;
    52  	pulsar)
    53  		run_pulsar_cluster $WORK_DIR normal
    54  		SINK_URI="pulsar://127.0.0.1:6650/$TOPIC_NAME?protocol=canal-json&enable-tidb-extension=true"
    55  		;;
    56  	*) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;;
    57  	esac
    58  
    59  	uuid="custom-changefeed-name"
    60  	run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" --tz="Asia/Shanghai" -c="$uuid"
    61  	case $SINK_TYPE in
    62  	kafka) run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" ;;
    63  	storage) run_storage_consumer $WORK_DIR $SINK_URI "" "" ;;
    64  	pulsar) run_pulsar_consumer --upstream-uri $SINK_URI ;;
    65  	esac
    66  
    67  	# Make sure changefeed is created.
    68  	check_table_exists test.simple ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT}
    69  	check_table_exists test."\`simple-dash\`" ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT}
    70  
    71  	check_changefeed_state "http://${UP_PD_HOST_1}:${UP_PD_PORT_1}" $uuid "normal" "null" ""
    72  
    73  	check_changefeed_count http://${UP_PD_HOST_1}:${UP_PD_PORT_1} 1
    74  	check_changefeed_count http://${UP_PD_HOST_2}:${UP_PD_PORT_2} 1
    75  	check_changefeed_count http://${UP_PD_HOST_3}:${UP_PD_PORT_3} 1
    76  	check_changefeed_count http://${UP_PD_HOST_1}:${UP_PD_PORT_1},http://${UP_PD_HOST_2}:${UP_PD_PORT_2},http://${UP_PD_HOST_3}:${UP_PD_PORT_3} 1
    77  
    78  	# Make sure changefeed can not be created if the name is already exists.
    79  	set +e
    80  	exists=$(run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" --changefeed-id="$uuid" 2>&1 | grep -oE 'already exists')
    81  	set -e
    82  	if [[ -z $exists ]]; then
    83  		echo "[$(date)] <<<<< unexpect output got ${exists} >>>>>"
    84  		exit 1
    85  	fi
    86  
    87  	# Update changefeed failed because changefeed is running
    88  	cat - >"$WORK_DIR/changefeed.toml" <<EOF
    89  case-sensitive = true
    90  [scheduler]
    91  enable-table-across-nodes = true
    92  EOF
    93  	set +e
    94  	update_result=$(cdc cli changefeed update --pd=$pd_addr --config="$WORK_DIR/changefeed.toml" --no-confirm --changefeed-id $uuid)
    95  	set -e
    96  	if [[ ! $update_result == *"can only update changefeed config when it is stopped"* ]]; then
    97  		echo "update changefeed config should fail when changefeed is running, got $update_result"
    98  	fi
    99  
   100  	# Pause changefeed
   101  	run_cdc_cli changefeed --changefeed-id $uuid pause && sleep 3
   102  	check_changefeed_state "http://${UP_PD_HOST_1}:${UP_PD_PORT_1}" $uuid "stopped" "null" ""
   103  
   104  	# Update changefeed
   105  	run_cdc_cli changefeed update --pd=$pd_addr --config="$WORK_DIR/changefeed.toml" --no-confirm --changefeed-id $uuid
   106  	changefeed_info=$(curl -s -X GET "http://127.0.0.1:8300/api/v2/changefeeds/$uuid/meta_info" 2>&1)
   107  	if [[ ! $changefeed_info == *"\"case_sensitive\":true"* ]]; then
   108  		echo "[$(date)] <<<<< changefeed info is not updated as expected ${changefeed_info} >>>>>"
   109  		exit 1
   110  	fi
   111  	if [ "$SINK_TYPE" == "kafka" ]; then
   112  		if [[ ! $changefeed_info == *"\"enable_table_across_nodes\":true"* ]]; then
   113  			echo "[$(date)] <<<<< changefeed info is not updated as expected ${changefeed_info} >>>>>"
   114  			exit 1
   115  		fi
   116  	else
   117  		# Currently, MySQL changefeed does not support scale out feature.
   118  		if [[ $changefeed_info == *"\"enable_table_across_nodes\":true"* ]]; then
   119  			echo "[$(date)] <<<<< changefeed info is not updated as expected ${changefeed_info} >>>>>"
   120  			exit 1
   121  		fi
   122  	fi
   123  
   124  	# Resume changefeed
   125  	run_cdc_cli changefeed --changefeed-id $uuid resume && sleep 3
   126  	check_changefeed_state "http://${UP_PD_HOST_1}:${UP_PD_PORT_1}" $uuid "normal" "null" ""
   127  
   128  	# Remove changefeed
   129  	run_cdc_cli changefeed --changefeed-id $uuid remove && sleep 3
   130  	check_changefeed_count http://${UP_PD_HOST_1}:${UP_PD_PORT_1} 0
   131  
   132  	run_cdc_cli changefeed create --sink-uri="$SINK_URI" --tz="Asia/Shanghai" -c="$uuid" && sleep 3
   133  	check_changefeed_state "http://${UP_PD_HOST_1}:${UP_PD_PORT_1}" $uuid "normal" "null" ""
   134  
   135  	# Make sure bad sink url fails at creating changefeed.
   136  	badsink=$(run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="mysql://badsink" 2>&1 | grep -oE 'fail')
   137  	if [[ -z $badsink ]]; then
   138  		echo "[$(date)] <<<<< unexpect output got ${badsink} >>>>>"
   139  		exit 1
   140  	fi
   141  
   142  	# Test Kafka SSL connection.
   143  	if [ "$SINK_TYPE" == "kafka" ]; then
   144  		SSL_TOPIC_NAME="ticdc-cli-test-ssl-$RANDOM"
   145  		SINK_URI="kafka://127.0.0.1:9093/$SSL_TOPIC_NAME?protocol=open-protocol&ca=${TLS_DIR}/ca.pem&cert=${TLS_DIR}/client.pem&key=${TLS_DIR}/client-key.pem&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760&insecure-skip-verify=true"
   146  		run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" --tz="Asia/Shanghai"
   147  	fi
   148  
   149  	# Smoke test unsafe commands
   150  	echo "y" | run_cdc_cli unsafe delete-service-gc-safepoint
   151  	run_cdc_cli unsafe reset --no-confirm --pd=$pd_addr
   152  	REGION_ID=$(pd-ctl -u=$pd_addr region | jq '.regions[0].id')
   153  	TS=$(cdc cli tso query --pd=$pd_addr)
   154  	# wait for owner online
   155  	sleep 3
   156  	run_cdc_cli unsafe resolve-lock --region=$REGION_ID
   157  	run_cdc_cli unsafe resolve-lock --region=$REGION_ID --ts=$TS
   158  
   159  	# Smoke test change log level
   160  	curl -X POST -d '"warn"' http://127.0.0.1:8300/api/v1/log
   161  	sleep 3
   162  	# make sure TiCDC does not panic
   163  	curl http://127.0.0.1:8300/status
   164  
   165  	cleanup_process $CDC_BINARY
   166  }
   167  
   168  trap stop_tidb_cluster EXIT
   169  run $*
   170  check_logs $WORK_DIR
   171  echo "[$(date)] <<<<<< run test case $TEST_NAME success! >>>>>>"