github.com/m3db/m3@v1.5.0/scripts/docker-integration-tests/prom_remote_write_backend/utils.sh (about) 1 #!/usr/bin/env bash 2 3 set -xe 4 5 source "$M3_PATH"/scripts/docker-integration-tests/common.sh 6 7 function prometheus_remote_write { 8 local metric_name=$1 9 local datapoint_timestamp=$2 10 local datapoint_value=$3 11 local expect_success=$4 12 local expect_success_err=$5 13 local expect_status=$6 14 local expect_status_err=$7 15 16 network_name="prom_remote_write_backend_backend" 17 network=$(docker network ls | grep -F $network_name | tr -s ' ' | cut -f 1 -d ' ' | tail -n 1) 18 19 out=$( (docker run -it --rm --network "$network" \ 20 "$PROMREMOTECLI_IMAGE" \ 21 -u http://m3coordinator01:7201/api/v1/prom/remote/write \ 22 -t __name__:"${metric_name}" \ 23 -d "${datapoint_timestamp}","${datapoint_value}" | grep -v promremotecli_log) || true) 24 25 success=$(echo "$out" | grep -v promremotecli_log | jq .success) 26 status=$(echo "$out" | grep -v promremotecli_log | jq .statusCode) 27 if [[ "$success" != "$expect_success" ]]; then 28 echo "$expect_success_err" 29 return 1 30 fi 31 if [[ "$status" != "$expect_status" ]]; then 32 echo "${expect_status_err}: actual=${status}" 33 return 1 34 fi 35 echo "Returned success=${success}, status=${status} as expected" 36 return 0 37 } 38 39 function wait_until_ready { 40 host=$1 41 # Check readiness probe eventually succeeds 42 echo "Check readiness probe eventually succeeds" 43 ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \ 44 "[[ \$(curl --write-out \"%{http_code}\" --silent --output /dev/null $host/ready) -eq \"200\" ]]" 45 } 46 47 function query_metric { 48 metric_name=$1 49 host=$2 50 ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \ 51 "[[ \$(curl -sSf $host/api/v1/query?query=$metric_name | jq -r .data.result[0].value[1]) -gt 0 ]]" 52 } 53 54 function wait_until_leader_elected { 55 ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \ 56 "[[ \$(curl localhost:6001/status localhost:6002/status | grep leader) ]]" 57 } 58 59 function cleanup { 60 local compose_file=$1 61 local success=$2 62 if [[ "$success" != "true" ]]; then 63 echo "Test failure, printing docker-compose logs" 64 docker-compose -f "${compose_file}" logs 65 fi 66 67 docker-compose -f "${compose_file}" down || echo "unable to shutdown containers" # CI fails to stop all containers sometimes 68 } 69 70 function initialize_m3_via_coordinator_admin { 71 echo "Initializing aggregator topology" 72 curl -vvvsSf -X POST -H "Cluster-Environment-Name: override_test_env" localhost:7201/api/v1/services/m3aggregator/placement/init -d '{ 73 "num_shards": 64, 74 "replication_factor": 2, 75 "instances": [ 76 { 77 "id": "m3aggregator01", 78 "isolation_group": "availability-zone-a", 79 "zone": "embedded", 80 "weight": 100, 81 "endpoint": "m3aggregator01:6000", 82 "hostname": "m3aggregator01", 83 "port": 6000 84 }, 85 { 86 "id": "m3aggregator02", 87 "isolation_group": "availability-zone-b", 88 "zone": "embedded", 89 "weight": 100, 90 "endpoint": "m3aggregator02:6000", 91 "hostname": "m3aggregator02", 92 "port": 6000 93 } 94 ] 95 }' 96 97 echo "Initializing m3msg inbound topic for m3aggregator ingestion from m3coordinators" 98 curl -vvvsSf -X POST -H "Topic-Name: aggregator_ingest" -H "Cluster-Environment-Name: override_test_env" localhost:7201/api/v1/topic/init -d '{ 99 "numberOfShards": 64 100 }' 101 102 # Do this after placement and topic for m3aggregator is created. 103 echo "Adding m3aggregator as a consumer to the aggregator ingest topic" 104 curl -vvvsSf -X POST -H "Topic-Name: aggregator_ingest" -H "Cluster-Environment-Name: override_test_env" localhost:7201/api/v1/topic -d '{ 105 "consumerService": { 106 "serviceId": { 107 "name": "m3aggregator", 108 "environment": "override_test_env", 109 "zone": "embedded" 110 }, 111 "consumptionType": "REPLICATED", 112 "messageTtlNanos": "600000000000" 113 } 114 }' # msgs will be discarded after 600000000000ns = 10mins 115 116 echo "Initializing m3coordinator topology" 117 curl -vvvsSf -X POST localhost:7201/api/v1/services/m3coordinator/placement/init -d '{ 118 "instances": [ 119 { 120 "id": "m3coordinator01", 121 "zone": "embedded", 122 "endpoint": "m3coordinator01:7507", 123 "hostname": "m3coordinator01", 124 "port": 7507 125 } 126 ] 127 }' 128 echo "Done initializing m3coordinator topology" 129 130 echo "Validating m3coordinator topology" 131 [ "$(curl -sSf localhost:7201/api/v1/services/m3coordinator/placement | jq .placement.instances.m3coordinator01.id)" == '"m3coordinator01"' ] 132 echo "Done validating topology" 133 134 # Do this after placement for m3coordinator is created. 135 echo "Initializing m3msg outbound topic for m3coordinator ingestion from m3aggregators" 136 curl -vvvsSf -X POST -H "Topic-Name: aggregated_metrics" -H "Cluster-Environment-Name: override_test_env" 0.0.0.0:7201/api/v1/topic/init -d '{ 137 "numberOfShards": 64 138 }' 139 140 echo "Adding m3coordinator as a consumer to the aggregator publish topic" 141 curl -vvvsSf -X POST -H "Topic-Name: aggregated_metrics" -H "Cluster-Environment-Name: override_test_env" 0.0.0.0:7201/api/v1/topic -d '{ 142 "consumerService": { 143 "serviceId": { 144 "name": "m3coordinator", 145 "environment": "default_env", 146 "zone": "embedded" 147 }, 148 "consumptionType": "SHARED", 149 "messageTtlNanos": "600000000000" 150 } 151 }' # msgs will be discarded after 600000000000ns = 10mins 152 }