github.com/m3db/m3@v1.5.0/scripts/docker-integration-tests/multi_cluster_write/test.sh (about) 1 #!/usr/bin/env bash 2 3 set -xe 4 5 source "$M3_PATH"/scripts/docker-integration-tests/common.sh 6 REVISION=$(git rev-parse HEAD) 7 SCRIPT_PATH="$M3_PATH"/scripts/docker-integration-tests/multi_cluster_write 8 COMPOSE_FILE=$SCRIPT_PATH/docker-compose.yml 9 export REVISION 10 11 echo "Run m3dbnode and m3coordinator containers" 12 docker-compose -f ${COMPOSE_FILE} up -d --renew-anon-volumes cluster_a_dbnode01 13 docker-compose -f ${COMPOSE_FILE} up -d --renew-anon-volumes cluster_a_dbnode02 14 docker-compose -f ${COMPOSE_FILE} up -d --renew-anon-volumes cluster_a_coordinator01 15 16 docker-compose -f ${COMPOSE_FILE} up -d --renew-anon-volumes cluster_b_dbnode01 17 docker-compose -f ${COMPOSE_FILE} up -d --renew-anon-volumes cluster_b_dbnode02 18 docker-compose -f ${COMPOSE_FILE} up -d --renew-anon-volumes cluster_b_coordinator01 19 20 # Think of this as a defer func() in golang 21 function defer { 22 docker-compose -f ${COMPOSE_FILE} down || echo "unable to shutdown containers" # CI fails to stop all containers sometimes 23 } 24 trap defer EXIT 25 26 # Setup cluster A. 27 DBNODE_ID_01=cluster_a_m3db_local_1 \ 28 DBNODE_ID_02=cluster_a_m3db_local_2 \ 29 DBNODE_HOST_01=cluster_a_dbnode01 \ 30 DBNODE_HOST_02=cluster_a_dbnode02 \ 31 DBNODE_HEALTH_PORT_01=9012 \ 32 DBNODE_HEALTH_PORT_02=9022 \ 33 COORDINATOR_PORT=7201 \ 34 setup_two_m3db_nodes 35 36 # Setup cluster B. 37 DBNODE_ID_01=cluster_b_m3db_local_1 \ 38 DBNODE_ID_02=cluster_b_m3db_local_2 \ 39 DBNODE_HOST_01=cluster_b_dbnode01 \ 40 DBNODE_HOST_02=cluster_b_dbnode02 \ 41 DBNODE_HEALTH_PORT_01=9112 \ 42 DBNODE_HEALTH_PORT_02=9122 \ 43 COORDINATOR_PORT=17201 \ 44 setup_two_m3db_nodes 45 46 function write_data { 47 namespace=$1 48 id=$2 49 timestamp=$3 50 value=$4 51 port=$5 52 53 respCode=$(curl -s -o /dev/null -X POST -w "%{http_code}" 0.0.0.0:"$port"/write -d '{ 54 "namespace": "'"$namespace"'", 55 "id": "'"$id"'", 56 "datapoint": { 57 "timestamp":'"$timestamp"', 58 "value": '"$value"' 59 } 60 }') 61 62 63 if [[ $respCode -eq "200" ]]; then 64 return 0 65 else 66 return 1 67 fi 68 } 69 70 function read_all { 71 namespace=$1 72 id=$2 73 expected_datapoints=$3 74 port=$4 75 76 received_datapoints=$(curl -sSf -X POST 0.0.0.0:"$port"/fetch -d '{ 77 "namespace": "'"$namespace"'", 78 "id": "'"$id"'", 79 "rangeStart": 0, 80 "rangeEnd":'"$(date +"%s")"' 81 }' | jq '.datapoints | length') 82 83 if [[ $expected_datapoints -eq $received_datapoints ]]; then 84 return 0 85 else 86 return 1 87 fi 88 } 89 90 echo "Write data to cluster_a_dbnode01 using cluster (not node) HTTP endpoint" 91 write_data "agg" "foo" "$(($(date +"%s")))" 12.3456789 9013 92 93 # These should pass immediately since it was written to this cluster synchronously. 94 echo "Expect to read the data back from cluster_a_dbnode01" 95 read_all "agg" "foo" 1 9012 96 97 echo "Expect to read the data back from cluster_a_dbnode02" 98 read_all "agg" "foo" 1 9022 99 100 # These two should eventually succeed once the client asyncronously dual-writes to the 101 # second cluster. 102 echo "Wait for the data to become available (via async dual-writing) from cluster_b_dbnode01" 103 ATTEMPTS=30 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \ 104 read_all "agg" "foo" 1 9112 105 106 echo "Wait for the data to become available (via async dual-writing) from cluster_b_dbnode02" 107 ATTEMPTS=30 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \ 108 read_all "agg" "foo" 1 9122