github.com/dmaizel/tests@v0.0.0-20210728163746-cae6a2d9cee8/cmd/checkmetrics/history/history.sh (about) 1 #!/usr/bin/env bash 2 # 3 # Copyright (c) 2019 Intel Corporation 4 # 5 # SPDX-License-Identifier: Apache-2.0 6 7 # Extract Jenkins metrics server historic data. 8 # Useful when re-setting the checkmetrics baseline data. 9 10 set -e 11 12 export KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}" 13 14 # Base dir of where we store the downloaded data. 15 datadir=$(dirname "$0")/data 16 17 # How many recent builds do we evaluate 18 NUM_BUILDS=5 19 20 # What is the default set of repos (Jenkins jobs) we evaluate 21 default_repos=() 22 default_repos+=("kata-containers-2.0-metrics-ubuntu-20-04-PR") 23 default_repos+=("kata-containers-2.0-tests-metrics-ubuntu-20-04-PR") 24 repos=() 25 26 # What test results do we evaluate for each build 27 tests=() 28 test_queries=() 29 tests+=("boot-times") 30 test_queries+=(".\"boot-times\".Results | [.[] | .\"to-workload\".Result] | add / length") 31 32 tests+=("memory-footprint") 33 test_queries+=(".\"memory-footprint\".Results | .[] | .average.Result") 34 35 tests+=("memory-footprint-ksm") 36 test_queries+=(".\"memory-footprint-ksm\".Results | .[] | .average.Result") 37 38 # What is the base URL of the Jenkins server 39 url_base="http://jenkins.katacontainers.io/job" 40 41 # Where do we find the recent build number information 42 url_index="api/json" 43 44 # Where do we get the actual build results from 45 url_artifacts="artifact/go/src/github.com/kata-containers/tests/metrics/results/artifacts" 46 47 # Gather up the results (json) files from all the defined repos for the range 48 # of dates? 49 gather_data() { 50 for repo in "${repos[@]}"; do 51 echo "Getting history for repo $repo" 52 local outpath="${indexdir}/${repo}" 53 local outname="${outpath}/index.json" 54 mkdir -p "${outpath}" 55 local url="${url_base}/${repo}/${url_index}" 56 # First, we need the index file for the job so we can get the list of the 57 # last 'n' jobs run. 58 curl -L -o ${outname} $url 59 60 builds=$(jq '.builds | .[] | .number' ${outname} | head -n ${NUM_BUILDS}) 61 62 echo "Examining builds: $builds" 63 64 # For each build, for each test, pull down the json results file, if it 65 # exists 66 for build in $builds; do 67 echo "Get results for build $build" 68 local builddir="${resultsdir}/${repo}/${build}" 69 mkdir -p ${builddir} 70 local build_url="${url_base}/${repo}/${build}/${url_artifacts}/${testfilename}" 71 echo "Pulling result from $build_url" 72 for test in "${tests[@]}"; do 73 local testfile=${builddir}/${KATA_HYPERVISOR}-${test}.json 74 local test_url="${build_url}/${KATA_HYPERVISOR}-${test}.json" 75 echo " $test_url" 76 # Can fail if the build failed to generate any results 77 curl -L -o ${testfile} $test_url || true 78 done 79 done 80 done 81 } 82 83 # For each test type, process all the relevant data files in the results subdir. 84 # *NOTE*, this does *not* take into account the number or list of build numbers we 85 # pulled down - it will evaluate all files it finds. If you want to only evaluate 86 # the data you pulled, ensure the result directory is empty (or non-existant) before 87 # you run the script. 88 process_data() { 89 local count=0 90 for test in "${tests[@]}"; do 91 query="${test_queries[$count]}" 92 echo "Processing $test" 93 echo " Query '$query'" 94 count=$((count+1)) 95 96 local allvalues="" 97 local found=0 98 local total=0 99 local min=$(printf "%u" -1) 100 local max=0 101 files=$(find ${resultsdir} -name ${KATA_HYPERVISOR}-${test}.json -print) 102 for file in ${files}; do 103 echo " Look at file $file" 104 value=$(jq "$query" $file || true) 105 echo " Result $value" 106 if [ -n "$value" ]; then 107 allvalues="$value $allvalues" 108 found=$((found+1)) 109 total=$(echo $total+$value | bc) 110 111 (( $(echo "$value > $max" | bc) )) && max=${value} 112 (( $(echo "$value < $min" | bc) )) && min=${value} 113 fi 114 done 115 116 mean=$(echo "scale=2; $total/$found" | bc) 117 minpc=$(echo "scale=2; ($min/$mean)*100" | bc) 118 maxpc=$(echo "scale=2; ($max/$mean)*100" | bc) 119 pc_95=$(echo "scale=2; $mean*0.95" | bc) 120 pc_105=$(echo "scale=2; $mean*1.05" | bc) 121 122 echo "allvalues are [$allvalues]" 123 echo "${test}: mean $mean, 95% mean ${pc_95}, 105% mean ${pc_105}" 124 echo " min $min ($minpc% of mean), max $max ($maxpc% of mean)" 125 done 126 } 127 128 help() { 129 usage=$(cat << EOF 130 Usage: $0 [-h] [options] 131 Description: 132 Gather statistics from recent Jenkins CI metrics builds. The resulting 133 data is useful for configuring the metrics slave checkmetrics baselines. 134 135 To change which metrics tests are evaluated, edit the values in this 136 script directly. Default tests evaluated are: 137 "${tests[@]}" 138 139 Options: 140 -d <path>, Directory to store downloaded data (default: ${datadir}) 141 -h, Print this help 142 -n <n>, Fetch last 'n' build data from Jenkins server (default: ${NUM_BUILDS}) 143 Note: The statistics calculations include *all* data files in the 144 directory: ${resultsdir}. If previous data exists, it will be counted. 145 -r <remote>, Which Jenkins build jobs to gather data from. 146 (default: "${default_repos[@]}") 147 EOF 148 ) 149 echo "$usage" 150 } 151 152 main() { 153 local OPTIND 154 while getopts "d:hn:r:" opt;do 155 case ${opt} in 156 d) 157 datadir="${OPTARG}" 158 ;; 159 h) 160 help 161 exit 0; 162 ;; 163 n) 164 NUM_BUILDS="${OPTARG}" 165 ;; 166 r) 167 repos+=("${OPTARG}") 168 ;; 169 ?) 170 # parse failure 171 help 172 echo "Failed to parse arguments" >&2 173 exit -1 174 ;; 175 esac 176 done 177 shift $((OPTIND-1)) 178 179 [ -z "${repos[@]}" ] && repos=(${default_repos[@]}) 180 181 resultsdir="${datadir}/results" 182 indexdir="${datadir}/indexes" 183 184 gather_data 185 process_data 186 } 187 188 main "$@"