github.com/redhat-appstudio/e2e-tests@v0.0.0-20240520140907-9709f6f59323/tests/load-tests/run-max-concurrency.sh (about) 1 #!/bin/bash 2 export MY_GITHUB_ORG GITHUB_TOKEN 3 4 output_dir="${OUTPUT_DIR:-.}" 5 6 export TEKTON_PERF_PROFILE_CPU_PERIOD=${TEKTON_PERF_PROFILE_CPU_PERIOD:-${THRESHOLD:-300}} 7 USER_PREFIX=${USER_PREFIX:-testuser} 8 9 OPENSHIFT_API="${OPENSHIFT_API:-$(yq '.clusters[0].cluster.server' "$KUBECONFIG")}" 10 OPENSHIFT_USERNAME="${OPENSHIFT_USERNAME:-kubeadmin}" 11 OPENSHIFT_PASSWORD="${OPENSHIFT_PASSWORD:-$(cat "$KUBEADMIN_PASSWORD_FILE")}" 12 13 load_test() { 14 local threads iteration index iteration_index 15 threads=${1:-1} 16 iteration=$(printf "%04d" "${2:-1}") 17 index=$(printf "%04d" "$threads") 18 iteration_index="${iteration}-${index}" 19 ## Enable CPU profiling in Tekton 20 if [ "${TEKTON_PERF_ENABLE_CPU_PROFILING:-}" == "true" ]; then 21 echo "Starting CPU profiling with pprof" 22 for p in $(oc get pods -n openshift-pipelines -l app=tekton-pipelines-controller -o name); do 23 pod="${p##*/}" 24 file="tekton-pipelines-controller.$pod.cpu-profile.$iteration_index" 25 oc exec -n openshift-pipelines "$p" -- bash -c "curl -SsL --max-time $((TEKTON_PERF_PROFILE_CPU_PERIOD + 10)) localhost:8008/debug/pprof/profile?seconds=${TEKTON_PERF_PROFILE_CPU_PERIOD} | base64" | base64 -d >"$output_dir/$file.pprof" & 26 echo $! >"$output_dir/$file.pid" 27 done 28 for p in $(oc get pods -n tekton-results -l app.kubernetes.io/name=tekton-results-watcher -o name); do 29 pod="${p##*/}" 30 file=tekton-results-watcher.$pod.cpu-profile.$iteration_index 31 oc exec -n tekton-results "$p" -c watcher -- bash -c "curl -SsL --max-time $((TEKTON_PERF_PROFILE_CPU_PERIOD + 10)) localhost:8008/debug/pprof/profile?seconds=${TEKTON_PERF_PROFILE_CPU_PERIOD} | base64" | base64 -d >"$output_dir/$file.pprof" & 32 echo $! >"$output_dir/$file.pid" 33 done 34 fi 35 ## Enable memory profiling in Tekton 36 if [ "${TEKTON_PERF_ENABLE_MEMORY_PROFILING:-}" == "true" ]; then 37 echo "Starting memory profiling of Tekton controller with pprof" 38 for p in $(oc get pods -n openshift-pipelines -l app=tekton-pipelines-controller -o name); do 39 pod="${p##*/}" 40 file="tekton-pipelines-controller.$pod.memory-profile.$iteration_index" 41 oc exec -n openshift-pipelines "$p" -- bash -c "curl -SsL --max-time $((TEKTON_PERF_PROFILE_CPU_PERIOD + 10)) localhost:8008/debug/pprof/heap?seconds=${TEKTON_PERF_PROFILE_CPU_PERIOD} | base64" | base64 -d >"$output_dir/$file.pprof" & 42 echo $! >"$output_dir/$file.pid" 43 done 44 echo "Starting memory profiling of Tekton results watcher with pprof" 45 for p in $(oc get pods -n tekton-results -l app.kubernetes.io/name=tekton-results-watcher -o name); do 46 pod="${p##*/}" 47 file=tekton-results-watcher.$pod.memory-profile.$iteration_index 48 oc exec -n tekton-results "$p" -c watcher -- bash -c "curl -SsL --max-time $((TEKTON_PERF_PROFILE_CPU_PERIOD + 10)) localhost:8008/debug/pprof/heap?seconds=${TEKTON_PERF_PROFILE_CPU_PERIOD} | base64" | base64 -d >"$output_dir/$file.pprof" & 49 echo $! >"$output_dir/$file.pid" 50 done 51 fi 52 rm -rvf "$output_dir/load-test.json" 53 rm -rvf "$output_dir/load-test.log" 54 go run loadtest.go \ 55 --component-repo "${COMPONENT_REPO:-https://github.com/nodeshift-starters/devfile-sample.git}" \ 56 --username "$USER_PREFIX-$index" \ 57 --users 1 \ 58 -w="${WAIT_PIPELINES:-true}" \ 59 -i="${WAIT_INTEGRATION_TESTS:-false}" \ 60 -d="${WAIT_DEPLOYMENTS:-false}" \ 61 -l \ 62 -o "$output_dir" \ 63 -t "$threads" \ 64 --disable-metrics="${DISABLE_METRICS:-false}" \ 65 --pushgateway-url "${PUSHGATEWAY_URL:-rhtapqe.com}" \ 66 --enable-progress-bars="${ENABLE_PROGRESS_BARS:-false}" \ 67 --pipeline-skip-initial-checks="${PIPELINE_SKIP_INITIAL_CHECKS:-true}" 68 if [ "${TEKTON_PERF_ENABLE_CPU_PROFILING:-}" == "true" ] || [ "${TEKTON_PERF_ENABLE_MEMORY_PROFILING:-}" == "true" ]; then 69 echo "Waiting for the Tekton profiling to finish up to ${TEKTON_PERF_PROFILE_CPU_PERIOD}s" 70 for pid_file in $(find "$output_dir" -name 'tekton*.pid'); do 71 wait "$(cat "$pid_file")" 72 rm -rvf "$pid_file" 73 done 74 echo "Getting Tekton controller goroutine dump" 75 for p in $(oc get pods -n openshift-pipelines -l app=tekton-pipelines-controller -o name); do 76 pod="${p##*/}" 77 for i in 0 1 2; do 78 file="tekton-pipelines-controller.$pod.goroutine-dump-$i.$iteration_index" 79 oc exec -n tekton-results "$p" -- bash -c "curl -SsL localhost:8008/debug/pprof/goroutine?debug=$i | base64" | base64 -d >"$output_dir/$file.pprof" 80 done 81 done 82 echo "Getting Tekton results watcher goroutine dump" 83 for p in $(oc get pods -n tekton-results -l app.kubernetes.io/name=tekton-results-watcher -o name); do 84 pod="${p##*/}" 85 for i in 0 1 2; do 86 file="tekton-results-watcher.$pod.goroutine-dump-$i.$iteration_index" 87 oc exec -n tekton-results "$p" -c watcher -- bash -c "curl -SsL localhost:8008/debug/pprof/goroutine?debug=$i | base64" | base64 -d >"$output_dir/$file.pprof" 88 done 89 done 90 fi 91 } 92 93 remove_finalizers() { 94 res=$1 95 while [ "$(oc get "$res" -A -o json | jq -rc '.items[] | select(.metadata.namespace | startswith("'"$USER_PREFIX"'"))' | wc -l)" != "0" ]; do 96 echo "## Removing finalizers for all $res" 97 while read -r line; do 98 echo "$line '{\"metadata\":{\"finalizers\":[]}}' --type=merge;" 99 done <<<"$(oc get "$res" -A -o json | jq -rc '.items[] | select(.metadata.namespace | startswith("'"$USER_PREFIX"'")) | "oc patch '"$res"' " + .metadata.name + " -n " + .metadata.namespace + " -p "')" | bash -s 100 done 101 } 102 103 clean_namespaces() { 104 echo "Deleting resources from previous steps" 105 for res in pipelineruns.tekton.dev components.appstudio.redhat.com componentdetectionqueries.appstudio.redhat.com snapshotenvironmentbindings.appstudio.redhat.com applications.appstudio.redhat.com; do 106 echo -e " * $res" 107 if [ -n "${DELETE_INCLUDE_FINALIZERS:-}" ]; then 108 remove_finalizers "$res" & 109 echo "## Deleting all $res" 110 fi 111 oc get "$res" -A -o json | jq -rc '.items[] | select(.metadata.namespace | startswith("'"$USER_PREFIX"'"))| "oc delete '"$res"' " + .metadata.name + " -n " + .metadata.namespace + " --ignore-not-found=true;"' | bash -s 112 done 113 oc get usersignups.toolchain.dev.openshift.com -A -o name | grep "$USER_PREFIX" | xargs oc delete -n toolchain-host-operator --ignore-not-found=true 114 attempts=60 115 attempt=1 116 sleep="5s" 117 while [ "$attempt" -le "$attempts" ]; do 118 echo " * Waiting $sleep until all namespaces with '$USER_PREFIX' prefix are gone [attempt $attempt/$attempts]" 119 oc get ns | grep -E "^$USER_PREFIX" >/dev/null 2>&1 || break 1 120 sleep "$sleep" 121 attempt=$((attempt + 1)) 122 done 123 if [ "$attempt" -le "$attempts" ]; then 124 echo " * All the namespaces with '$USER_PREFIX' are gone!" 125 else 126 echo " * WARNING: Timeout waiting for namespaces with '$USER_PREFIX' to be gone. The following namespaces still exist:" 127 oc get ns | grep -E "^$USER_PREFIX" 128 fi 129 } 130 131 max_concurrency() { 132 local iteration index iteration_index 133 # Max length of compliant username is 20 characters. We add "-XXXX-XXXX" suffix for the test users' name so max length of the prefix is 10. 134 # See https://github.com/codeready-toolchain/toolchain-common/blob/master/pkg/usersignup/usersignup.go#L16 135 if [ ${#USER_PREFIX} -gt 10 ]; then 136 echo "Maximal allowed length of user prefix is 10 characters. The '$USER_PREFIX' length of ${#USER_PREFIX} exceeds the limit." 137 exit 1 138 else 139 output="$output_dir/load-tests.max-concurrency.json" 140 IFS="," read -r -a maxConcurrencySteps <<<"$(echo "${MAX_CONCURRENCY_STEPS:-1\ 5\ 10\ 25\ 50\ 100\ 150\ 200}" | sed 's/ /,/g')" 141 maxThreads=${MAX_THREADS:-10} 142 threshold=${THRESHOLD:-300} 143 echo '{"startTimestamp":"'"$(date +%FT%T%:z)"'", "maxThreads": '"$maxThreads"', "maxConcurrencySteps": "'"${maxConcurrencySteps[*]}"'", "threshold": '"$threshold"', "maxConcurrencyReached": 0, "computedConcurrency": 0, "workloadKPI": 0, "endTimestamp": "", "errorsTotal": -1}' | jq >"$output" 144 iteration=0 145 for t in "${maxConcurrencySteps[@]}"; do 146 iteration="$((iteration + 1))" 147 if (("$t" > "$maxThreads")); then 148 break 149 fi 150 oc login "$OPENSHIFT_API" -u "$OPENSHIFT_USERNAME" -p "$OPENSHIFT_PASSWORD" 151 clean_namespaces 152 load_test "$t" "$iteration" 153 iteration_index="$(printf "%04d" "$iteration")-$(printf "%04d" "$t")" 154 jq ".metadata.\"max-concurrency\".iteration = \"$(printf "%04d" "$iteration")\"" "$output_dir/load-tests.json" >"$output_dir/$$.json" && mv -f "$output_dir/$$.json" "$output_dir/load-tests.json" 155 cp -vf "$output_dir/load-tests.json" "$output_dir/load-tests.max-concurrency.$iteration_index.json" 156 cp -vf "$output_dir/load-tests.log" "$output_dir/load-tests.max-concurrency.$iteration_index.log" 157 workloadKPI=$(jq '.workloadKPI' "$output_dir/load-tests.json") 158 if awk "BEGIN { exit !($workloadKPI > $threshold)}"; then 159 echo "The average time a workload took to succeed (${workloadKPI}s) has exceeded a threshold of ${threshold}s with $t threads." 160 workloadKPIOld=$(jq '.workloadKPI' "$output") 161 threadsOld=$(jq '.maxConcurrencyReached' "$output") 162 computedConcurrency=$(python3 -c "import sys; t = float(sys.argv[1]); a = float(sys.argv[2]); b = float(sys.argv[3]); c = float(sys.argv[4]); d = float(sys.argv[5]); print((t - b) / ((d - b) / (c - a)) + a)" "$threshold" "$threadsOld" "$workloadKPIOld" "$t" "$workloadKPI") 163 jq ".computedConcurrency = $computedConcurrency" "$output" >"$output_dir/$$.json" && mv -f "$output_dir/$$.json" "$output" 164 break 165 else 166 jq ".maxConcurrencyReached = $t" "$output" >"$output_dir/$$.json" && mv -f "$output_dir/$$.json" "$output" 167 jq ".workloadKPI = $workloadKPI" "$output" >"$output_dir/$$.json" && mv -f "$output_dir/$$.json" "$output" 168 jq ".computedConcurrency = $t" "$output" >"$output_dir/$$.json" && mv -f "$output_dir/$$.json" "$output" 169 jq '.endTimestamp = "'"$(date +%FT%T%:z)"'"' "$output" >"$output_dir/$$.json" && mv -f "$output_dir/$$.json" "$output" 170 errorsTotal=$(jq '.errorsTotal' "$output_dir/load-tests.json") 171 jq ".errorsTotal = $errorsTotal" "$output" >"$output_dir/$$.json" && mv -f "$output_dir/$$.json" "$output" 172 fi 173 done 174 DRY_RUN=false ./clear.sh "$USER_PREFIX" 175 fi 176 } 177 178 max_concurrency