github.com/m3db/m3@v1.5.0/scripts/docker-integration-tests/prometheus_replication/test.sh (about) 1 #!/usr/bin/env bash 2 3 set -xe 4 5 source "$M3_PATH"/scripts/docker-integration-tests/common.sh 6 REVISION=$(git rev-parse HEAD) 7 COMPOSE_FILE="$M3_PATH"/scripts/docker-integration-tests/prometheus_replication/docker-compose.yml 8 # quay.io/m3db/prometheus_remote_client_golang @ v0.4.3 9 PROMREMOTECLI_IMAGE=quay.io/m3db/prometheus_remote_client_golang:v0.4.3 10 JQ_IMAGE=realguess/jq:1.4@sha256:300c5d9fb1d74154248d155ce182e207cf6630acccbaadd0168e18b15bfaa786 11 export REVISION 12 13 echo "Pull containers required for test" 14 docker pull $PROMREMOTECLI_IMAGE 15 docker pull $JQ_IMAGE 16 17 echo "Run m3dbnode and m3coordinator containers" 18 docker-compose -f ${COMPOSE_FILE} up -d dbnode01 19 docker-compose -f ${COMPOSE_FILE} up -d dbnode02 20 docker-compose -f ${COMPOSE_FILE} up -d coordinator01 21 docker-compose -f ${COMPOSE_FILE} up -d coordinator02 22 23 function defer { 24 docker-compose -f ${COMPOSE_FILE} down || echo "unable to shutdown containers" # CI fails to stop all containers sometimes 25 } 26 trap defer EXIT 27 28 echo "Setup dbnode in first cluster" 29 DBNODE_HOST=dbnode01 \ 30 DBNODE_PORT=9000 \ 31 DBNODE_HEALTH_PORT=9002 \ 32 COORDINATOR_PORT=7201 \ 33 setup_single_m3db_node 34 35 echo "Setup dbnode in second cluster" 36 DBNODE_HOST=dbnode02 \ 37 DBNODE_PORT=9000 \ 38 DBNODE_HEALTH_PORT=19002 \ 39 COORDINATOR_PORT=17201 \ 40 setup_single_m3db_node 41 42 function prometheus_remote_write { 43 local metric_name=$1 44 local datapoint_timestamp=$2 45 local datapoint_value=$3 46 local expect_success=$4 47 local expect_success_err=$5 48 local expect_status=$6 49 local expect_status_err=$7 50 51 network_name="prometheus_replication" 52 network=$(docker network ls | fgrep $network_name | tr -s ' ' | cut -f 1 -d ' ' | tail -n 1) 53 out=$((docker run -it --rm --network $network \ 54 $PROMREMOTECLI_IMAGE \ 55 -u http://coordinator01:7201/api/v1/prom/remote/write \ 56 -t __name__:${metric_name} \ 57 -d ${datapoint_timestamp},${datapoint_value} | grep -v promremotecli_log) || true) 58 success=$(echo $out | grep -v promremotecli_log | docker run --rm -i $JQ_IMAGE jq .success) 59 status=$(echo $out | grep -v promremotecli_log | docker run --rm -i $JQ_IMAGE jq .statusCode) 60 if [[ "$success" != "$expect_success" ]]; then 61 echo $expect_success_err 62 return 1 63 fi 64 if [[ "$status" != "$expect_status" ]]; then 65 echo "${expect_status_err}: actual=${status}" 66 return 1 67 fi 68 echo "Returned success=${success}, status=${status} as expected" 69 return 0 70 } 71 72 function test_replication_forwarding { 73 now=$(date +"%s") 74 75 # Make sure both are up (otherwise forwarding could fail). 76 echo "Test both clusters responding to queries" 77 ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \ 78 '[[ $(curl -s 0.0.0.0:7201/api/v1/query?query=any | jq -r ".data.result | length") -eq 0 ]]' 79 ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \ 80 '[[ $(curl -s 0.0.0.0:17201/api/v1/query?query=any | jq -r ".data.result | length") -eq 0 ]]' 81 82 # Test writing. 83 echo "Test write data to first cluster" 84 prometheus_remote_write \ 85 "foo_replicate" now 42.42 \ 86 true "Expected request to succeed" \ 87 200 "Expected request to return status code 200" 88 89 # Test queries can eventually read back replicated data from second 90 # cluster using port 17201 from the second cluster's coordinator 91 echo "Test read replicated data" 92 ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \ 93 '[[ $(curl -s 0.0.0.0:17201/api/v1/query?query=foo_replicate | jq -r ".data.result | length") -gt 0 ]]' 94 } 95 96 # Run all tests 97 test_replication_forwarding