github.com/redhat-appstudio/e2e-tests@v0.0.0-20240520140907-9709f6f59323/tests/load-tests/run.sh (about)

     1  #!/bin/bash
     2  export MY_GITHUB_ORG GITHUB_TOKEN
     3  
     4  # Check if the RANDOM_STRING environment variable is declared. If it's declared, include the -r flag when invoking loadtest.go
     5  if [ -n "${RANDOM_PREFIX+x}" ]; then
     6      RANDOM_PREFIX_FLAG="-r"
     7  else
     8      RANDOM_PREFIX_FLAG=""
     9  fi
    10  
    11  TEKTON_PERF_PROFILE_CPU_PERIOD=${TEKTON_PERF_PROFILE_CPU_PERIOD:-300}
    12  
    13  output_dir="${OUTPUT_DIR:-.}"
    14  USER_PREFIX=${USER_PREFIX:-testuser}
    15  # Max length of compliant username is 20 characters. We add -"-XXXXX-XXXX" suffix for the test users' name so max length of the prefix is 9.
    16  # Random string addition will not apply for Openshift-CI load tests
    17  # Therefore: the "-XXXXXX" addition to user prefix will be the PR number for OpenShift Presubmit jobs
    18  # The same addition will be for the 5 chars long random string for non OpenShift-CI load tests
    19  # The last "XXXX" would be for the suffix running index added to the test users in cmd/loadTests.go
    20  # Comment: run-max-concurrency.sh is used for max-concurrency type of Open-Shift-CI jobs and it wasn't changed
    21  # See https://github.com/codeready-toolchain/toolchain-common/blob/master/pkg/usersignup/usersignup.go#L16
    22  
    23  # If adding random prefix we can allow only up to 9 characters long user prefix
    24  if [ "${RANDOM_PREFIX_FLAG}" == "-r" ] && [ ${#USER_PREFIX} -gt 9 ]; then
    25      echo "Maximal allowed length of user prefix is 9 characters. The '$USER_PREFIX' length of ${#USER_PREFIX} exceeds the limit."
    26      exit 1
    27  # If adding not adding random prefix we can allow only up to 15 characters long user prefix
    28  elif [ "${RANDOM_PREFIX_FLAG}" == "" ] && [ ${#USER_PREFIX} -gt 15 ]; then
    29      echo "Maximal allowed length of user prefix is 15 characters. The '$USER_PREFIX' length of ${#USER_PREFIX} exceeds the limit."
    30  else
    31      ## Enable CPU profiling in Tekton
    32      if [ "${TEKTON_PERF_ENABLE_CPU_PROFILING:-}" == "true" ]; then
    33          echo "Starting CPU profiling with pprof"
    34          for p in $(oc get pods -n openshift-pipelines -l app=tekton-pipelines-controller -o name); do
    35              pod="${p##*/}"
    36              file="tekton-pipelines-controller.$pod.cpu-profile"
    37              oc exec -n openshift-pipelines "$p" -- bash -c "curl -SsL --max-time $((TEKTON_PERF_PROFILE_CPU_PERIOD + 10)) localhost:8008/debug/pprof/profile?seconds=${TEKTON_PERF_PROFILE_CPU_PERIOD} | base64" | base64 -d >"$output_dir/$file.pprof" &
    38              echo $! >"$output_dir/$file.pid"
    39          done
    40          p=$(oc get pods -n tekton-results -l app.kubernetes.io/name=tekton-results-watcher -o name)
    41          pod="${p##*/}"
    42          file=tekton-results-watcher.$pod.cpu-profile
    43          oc exec -n tekton-results "$p" -c watcher -- bash -c "curl -SsL --max-time $((TEKTON_PERF_PROFILE_CPU_PERIOD + 10)) localhost:8008/debug/pprof/profile?seconds=${TEKTON_PERF_PROFILE_CPU_PERIOD} | base64" | base64 -d >"$output_dir/$file.pprof" &
    44          echo $! >"$output_dir/$file.pid"
    45      fi
    46      ## Enable memory profiling in Tekton
    47      if [ "${TEKTON_PERF_ENABLE_MEMORY_PROFILING:-}" == "true" ]; then
    48          file=tekton-pipelines-controller.memory-profile
    49          echo "Starting memory profiling of Tekton controller with pprof"
    50          for p in $(oc get pods -n openshift-pipelines -l app=tekton-pipelines-controller -o name); do
    51              pod="${p##*/}"
    52              file="tekton-pipelines-controller.$pod.memory-profile"
    53              oc exec -n openshift-pipelines "$p" -- bash -c "curl -SsL --max-time $((TEKTON_PERF_PROFILE_CPU_PERIOD + 10)) localhost:8008/debug/pprof/heap?seconds=${TEKTON_PERF_PROFILE_CPU_PERIOD} | base64" | base64 -d >"$output_dir/$file.pprof" &
    54              echo $! >"$output_dir/$file.pid"
    55          done
    56          echo "Starting memory profiling of Tekton results watcher with pprof"
    57          for p in $(oc get pods -n tekton-results -l app.kubernetes.io/name=tekton-results-watcher -o name); do
    58              pod="${p##*/}"
    59              file=tekton-results-watcher.$pod.memory-profile
    60              oc exec -n tekton-results "$p" -c watcher -- bash -c "curl -SsL --max-time $((TEKTON_PERF_PROFILE_CPU_PERIOD + 10)) localhost:8008/debug/pprof/heap?seconds=${TEKTON_PERF_PROFILE_CPU_PERIOD} | base64" | base64 -d >"$output_dir/$file.pprof" &
    61              echo $! >"$output_dir/$file.pid"
    62          done
    63      fi
    64      ## Switch KubeScheduler Debugging on
    65      if [ -n "$KUBE_SCHEDULER_LOG_LEVEL" ]; then
    66          echo "Checking KubeScheduler log level"
    67          if [ "$(oc get KubeScheduler cluster -o jsonpath="{.spec.logLevel}")" == "$KUBE_SCHEDULER_LOG_LEVEL" ]; then
    68              echo "KubeScheduler log level is already at $KUBE_SCHEDULER_LOG_LEVEL level"
    69          else
    70              echo "Setting KubeScheduler log level to $KUBE_SCHEDULER_LOG_LEVEL"
    71              oc patch KubeScheduler cluster --type=json -p='[{"op": "add", "path": "/spec/logLevel", "value": "'"$KUBE_SCHEDULER_LOG_LEVEL"'"}]'
    72              echo "Waiting for kube scheduler to start NodeInstallerProgressing"
    73              oc wait --for=condition=NodeInstallerProgressing kubescheduler/cluster -n openshift-kube-scheduler --timeout=300s
    74          fi
    75          echo "Waiting for all kube scheduler pods to finish NodeInstallerProgressing"
    76          oc wait --for=condition=NodeInstallerProgressing=False kubescheduler/cluster -n openshift-kube-scheduler --timeout=900s
    77          echo "All kube scheduler pods are now at log level $KUBE_SCHEDULER_LOG_LEVEL, starting to capture logs"
    78          oc logs -f -n openshift-kube-scheduler --prefix -l app=openshift-kube-scheduler --tail=-1 2>&1 >"$output_dir/openshift-kube-scheduler.log" &
    79          KUBE_SCHEDULER_LOG_PID=$!
    80      fi
    81      ## Run the actual load test
    82      ## To enable progress bar , add `--enable-progress-bars` in [OPTIONS]
    83      go run loadtest.go \
    84          --component-repo "${COMPONENT_REPO:-https://github.com/devfile-samples/devfile-sample-code-with-quarkus}" \
    85          --username "$USER_PREFIX" \
    86          --users "${USERS_PER_THREAD:-50}" \
    87          --test-scenario-git-url "${TEST_SCENARIO_GIT_URL:-https://github.com/konflux-ci/integration-examples.git}" \
    88          --test-scenario-revision "${TEST_SCENARIO_REVISION:-main}" \
    89          --test-scenario-path-in-repo "${TEST_SCENARIO_PATH_IN_REPO:-pipelines/integration_resolver_pipeline_pass.yaml}" \
    90          -w="${WAIT_PIPELINES:-true}" \
    91          -i="${WAIT_INTEGRATION_TESTS:-true}" \
    92          -d="${WAIT_DEPLOYMENTS:-true}" \
    93          -l \
    94          -o "$output_dir" \
    95          -t "${THREADS:-1}" \
    96          $RANDOM_PREFIX_FLAG \
    97          --disable-metrics="${DISABLE_METRICS:-false}" \
    98          --pushgateway-url "${PUSHGATEWAY_URL:-rhtapqe.com}" \
    99          --enable-progress-bars="${ENABLE_PROGRESS_BARS:-false}" \
   100          --pipeline-skip-initial-checks="${PIPELINE_SKIP_INITIAL_CHECKS:-true}"
   101  
   102      DRY_RUN=false ./clear.sh "$USER_PREFIX"
   103  
   104      if [ "${TEKTON_PERF_ENABLE_CPU_PROFILING:-}" == "true" ] || [ "${TEKTON_PERF_ENABLE_MEMORY_PROFILING:-}" == "true" ]; then
   105          echo "Waiting for the Tekton profiling to finish up to ${TEKTON_PERF_PROFILE_CPU_PERIOD}s"
   106          for pid_file in $(find $output_dir -name 'tekton*.pid'); do
   107              wait "$(cat "$pid_file")"
   108              rm -rvf "$pid_file"
   109          done
   110          echo "Getting Tekton controller goroutine dump"
   111          for p in $(oc get pods -n openshift-pipelines -l app=tekton-pipelines-controller -o name); do
   112              pod="${p##*/}"
   113              for i in 0 1 2; do
   114                  file="tekton-pipelines-controller.$pod.goroutine-dump-$i"
   115                  oc exec -n tekton-results "$p" -- bash -c "curl -SsL localhost:8008/debug/pprof/goroutine?debug=$i | base64" | base64 -d >"$output_dir/$file.pprof"
   116              done
   117          done
   118          echo "Getting Tekton results watcher goroutine dump"
   119          for p in $(oc get pods -n tekton-results -l app.kubernetes.io/name=tekton-results-watcher -o name); do
   120              pod="${p##*/}"
   121              for i in 0 1 2; do
   122                  file="tekton-results-watcher.$pod.goroutine-dump-$i"
   123                  oc exec -n tekton-results "$p" -c watcher -- bash -c "curl -SsL localhost:8008/debug/pprof/goroutine?debug=$i | base64" | base64 -d >"$output_dir/$file.pprof"
   124              done
   125          done
   126      fi
   127      if [ -n "$KUBE_SCHEDULER_LOG_LEVEL" ]; then
   128          echo "Killing kube collector log collector"
   129          kill "$KUBE_SCHEDULER_LOG_PID"
   130      fi
   131  fi