github.com/redhat-appstudio/e2e-tests@v0.0.0-20230619105049-9a422b2094d7/tests/load-tests/ci-scripts/max-concurrency/collect-results.sh (about)

     1  #!/bin/bash
     2  
     3  set -o nounset
     4  set -o errexit
     5  set -o pipefail
     6  
     7  # shellcheck disable=SC1090
     8  source "/usr/local/ci-secrets/redhat-appstudio-load-test/load-test-scenario.${1:-concurrent}"
     9  
    10  pushd "${2:-.}"
    11  
    12  echo "Collecting load test results"
    13  cp -vf ./tests/load-tests/load-tests.max-concurrency.*.log "$ARTIFACT_DIR"
    14  cp -vf ./tests/load-tests/load-tests.max-concurrency.json "$ARTIFACT_DIR"
    15  
    16  echo "Setting up tool to collect monitoring data..."
    17  python3 -m venv venv
    18  set +u
    19  # shellcheck disable=SC1091
    20  source venv/bin/activate
    21  set -u
    22  python3 -m pip install -U pip
    23  python3 -m pip install -U pip
    24  python3 -m pip install -e "git+https://github.com/redhat-performance/opl.git#egg=opl-rhcloud-perf-team-core&subdirectory=core"
    25  
    26  for monitoring_collection_data in ./tests/load-tests/load-tests.max-concurrency.*.json; do
    27      cp -f "$monitoring_collection_data" "$ARTIFACT_DIR"
    28      index=$(echo "$monitoring_collection_data" | sed -e 's,.*/load-tests.max-concurrency.\([0-9]\+\).json,\1,')
    29      monitoring_collection_log="$ARTIFACT_DIR/monitoring-collection.$index.log"
    30  
    31      ## Monitoring data
    32      echo "Collecting monitoring data for step $index..."
    33      mstart=$(date --utc --date "$(status_data.py --status-data-file "$monitoring_collection_data" --get timestamp)" --iso-8601=seconds)
    34      mend=$(date --utc --date "$(status_data.py --status-data-file "$monitoring_collection_data" --get endTimestamp)" --iso-8601=seconds)
    35      mhost=$(oc -n openshift-monitoring get route -l app.kubernetes.io/name=thanos-query -o json | jq --raw-output '.items[0].spec.host')
    36      status_data.py \
    37          --status-data-file "$monitoring_collection_data" \
    38          --additional ./tests/load-tests/cluster_read_config.yaml \
    39          --monitoring-start "$mstart" \
    40          --monitoring-end "$mend" \
    41          --prometheus-host "https://$mhost" \
    42          --prometheus-port 443 \
    43          --prometheus-token "$(oc whoami -t)" \
    44          -d &>"$monitoring_collection_log"
    45  done
    46  set +u
    47  deactivate
    48  set -u
    49  
    50  csv_delim=";"
    51  csv_delim_quoted="\"$csv_delim\""
    52  dt_format='"%Y-%m-%dT%H:%M:%SZ"'
    53  
    54  ## Max concurrency scalability
    55  max_concurrency_csv=$ARTIFACT_DIR/max-concurrency.csv
    56  echo "Threads\
    57  ${csv_delim}Errors\
    58  ${csv_delim}UserAvgTime\
    59  ${csv_delim}UserMaxTime\
    60  ${csv_delim}ResourcesAvgTime\
    61  ${csv_delim}ResourcesMaxTime\
    62  ${csv_delim}PipelineRunAvgTime\
    63  ${csv_delim}PipelineRunMaxTime\
    64  ${csv_delim}ClusterCPUUsageAvg\
    65  ${csv_delim}ClusterDiskUsageAvg\
    66  ${csv_delim}ClusterMemoryUsageAvg\
    67  ${csv_delim}ClusterPodCountAvg\
    68  ${csv_delim}ClusterPVCInUseAvg\
    69  ${csv_delim}ClusterPipelineRunCountAvg\
    70  ${csv_delim}ClusterPipelineWorkqueueDepthAvg" \
    71      >"$max_concurrency_csv"
    72  cat ./tests/load-tests/load-tests.max-concurrency.*.json |
    73      jq -rc "(.threads | tostring) \
    74      + $csv_delim_quoted + (.errorsTotal | tostring) \
    75      + $csv_delim_quoted + (.createUserTimeAvg | tostring) \
    76      + $csv_delim_quoted + (.createUserTimeMax | tostring) \
    77      + $csv_delim_quoted + (.createResourcesTimeAvg | tostring) \
    78      + $csv_delim_quoted + (.createResourcesTimeMax | tostring) \
    79      + $csv_delim_quoted + (.runPipelineSucceededTimeAvg | tostring) \
    80      + $csv_delim_quoted + (.runPipelineSucceededTimeMax | tostring) \
    81      + $csv_delim_quoted + (.measurements.cluster_cpu_usage_seconds_total_rate.mean | tostring) \
    82      + $csv_delim_quoted + (.measurements.cluster_disk_throughput_total.mean | tostring) \
    83      + $csv_delim_quoted + (.measurements.cluster_memory_usage_rss_total.mean | tostring) \
    84      + $csv_delim_quoted + (.measurements.cluster_pods_count.mean | tostring) \
    85      + $csv_delim_quoted + (.measurements.storage_count_attachable_volumes_in_use.mean | tostring) \
    86      + $csv_delim_quoted + (.measurements.tekton_pipelines_controller_running_pipelineruns_count.mean | tostring) \
    87      + $csv_delim_quoted + (.measurements.tekton_tekton_pipelines_controller_workqueue_depth.mean | tostring)" \
    88          >>"$max_concurrency_csv"
    89  
    90  ## PipelineRun timestamps
    91  echo "Collecting PipelineRun timestamps..."
    92  pipelinerun_timestamps=$ARTIFACT_DIR/pipelineruns.tekton.dev_timestamps.csv
    93  echo "PipelineRun${csv_delim}Namespace${csv_delim}Succeeded${csv_delim}Reason${csv_delim}Message${csv_delim}Created${csv_delim}Started${csv_delim}FinallyStarted${csv_delim}Completed${csv_delim}Created->Started${csv_delim}Started->FinallyStarted${csv_delim}FinallyStarted->Completed${csv_delim}SucceededDuration${csv_delim}FailedDuration" >"$pipelinerun_timestamps"
    94  jq_cmd=".items[] | (.metadata.name) \
    95  + $csv_delim_quoted + (.metadata.namespace) \
    96  + $csv_delim_quoted + (.status.conditions[0].status) \
    97  + $csv_delim_quoted + (.status.conditions[0].reason) \
    98  + $csv_delim_quoted + (.status.conditions[0].message|split($csv_delim_quoted)|join(\"_\")) \
    99  + $csv_delim_quoted + (.metadata.creationTimestamp) \
   100  + $csv_delim_quoted + (.status.startTime) \
   101  + $csv_delim_quoted + (.status.finallyStartTime) \
   102  + $csv_delim_quoted + (.status.completionTime) \
   103  + $csv_delim_quoted + (if .status.startTime != null and .metadata.creationTimestamp != null then ((.status.startTime | strptime($dt_format) | mktime) - (.metadata.creationTimestamp | strptime($dt_format) | mktime) | tostring) else \"\" end) \
   104  + $csv_delim_quoted + (if .status.finallyStartTime != null and .status.startTime != null then ((.status.finallyStartTime | strptime($dt_format) | mktime) - (.status.startTime | strptime($dt_format) | mktime) | tostring) else \"\" end) \
   105  + $csv_delim_quoted + (if .status.completionTime != null and .status.finallyStartTime != null then ((.status.completionTime | strptime($dt_format) | mktime) - (.status.finallyStartTime | strptime($dt_format) | mktime) | tostring) else \"\" end) \
   106  + $csv_delim_quoted + (if .status.conditions[0].status == \"True\" and .status.completionTime != null and .metadata.creationTimestamp != null then ((.status.completionTime | strptime($dt_format) | mktime) - (.metadata.creationTimestamp | strptime($dt_format) | mktime) | tostring) else \"\" end) \
   107  + $csv_delim_quoted + (if .status.conditions[0].status == \"False\" and .status.completionTime != null and .metadata.creationTimestamp != null then ((.status.completionTime | strptime($dt_format) | mktime) - (.metadata.creationTimestamp | strptime($dt_format) | mktime) | tostring) else \"\" end)"
   108  oc get pipelineruns.tekton.dev -A -o json | jq "$jq_cmd" | sed -e "s/\n//g" -e "s/^\"//g" -e "s/\"$//g" -e "s/Z;/;/g" >>"$pipelinerun_timestamps"
   109  
   110  popd