github.com/dmaizel/tests@v0.0.0-20210728163746-cae6a2d9cee8/metrics/storage/fio.sh (about) 1 #!/bin/bash 2 # 3 # Copyright (c) 2018 Intel Corporation 4 # 5 # SPDX-License-Identifier: Apache-2.0 6 7 # Description of the test: 8 # Use fio to gather storate IO metrics. 9 # The fio configuration can be modified via environment variables. 10 # This test is only designed to handle a single file and class of job 11 # in fio. If you require a more complex fio test then you are probably 12 # better off writing that by hand or creating a new metrics test. 13 14 set -e 15 16 # General env 17 SCRIPT_PATH=$(dirname "$(readlink -f "$0")") 18 source "${SCRIPT_PATH}/../lib/common.bash" 19 20 # Items for our local Dockerfile/image creation 21 TEST_NAME="fio" 22 IMAGE="local-fio" 23 DOCKERFILE="${SCRIPT_PATH}/fio_dockerfile/Dockerfile" 24 CONTAINER_NAME="fio_test" 25 26 # How much RAM and how many (v)CPUs do we give the container under test? 27 CONTAINER_RAM=${CONTAINER_RAM:-2G} 28 CONTAINER_CPUS=${CONTAINER_CPUS:-1} 29 30 # Important paths from the pov of both the host and the guest. 31 # Some of them are mapped into both via a docker volume mount. 32 HOST_OUTPUT_DIRNAME="${HOST_OUTPUT_DIRNAME:-${SCRIPT_PATH}/fio_output}" 33 HOST_INPUT_DIRNAME="${HOST_INPUT_DIRNAME:-${SCRIPT_PATH}/fio_input}" 34 # Used on the host side if we do a volume mount test. 35 HOST_TEST_DIRNAME="${HOST_TEST_DIRNAME:-/tmp}" 36 GUEST_OUTPUT_DIRNAME="/output" 37 GUEST_INPUT_DIRNAME="/input" 38 39 # This is the dir that fio will actually run the test upon in the container. 40 # By default the tests will run on the container 'root mount'. If you set 41 # TEST_VOLUME_MOUNT, then we will mount a volume mount over that test directory 42 # and the tests will happen on a volume mount. 43 TEST_VOLUME_MOUNT=${TEST_VOLUME_MOUNT:-0} 44 GUEST_TEST_DIRNAME="/testdir" 45 46 # These define which variety of the tests we will run 47 # Which of the read/write/rw tests will we run (we will run all 48 # that are set to 1). 49 # 50 # By default we do direct random read and write tests (not readwrite) 51 FIO_READTEST=${FIO_READTEST:-1} 52 FIO_WRITETEST=${FIO_WRITETEST:-1} 53 FIO_READWRITETEST=${FIO_READWRITETEST:-0} 54 FIO_DIRECT=${FIO_DIRECT:-1} 55 FIO_RANDOM=${FIO_RANDOM:-1} 56 57 # The blocksizes we will test. We have a separate set for direct or not 58 # as direct mode can only use blocksize or larger sizes. 59 FIO_BLOCKSIZES="${FIO_BLOCKSIZES:-128 256 512 1k 2k 4k 8k 16k 32k 64k}" 60 FIO_DIRECT_BLOCKSIZES="${FIO_DIRECT_BLOCKSIZES:-4k 8k 16k 32k 64k}" 61 62 # Other FIO parameters that are tweakable by setting in the environment. 63 # Values from here are directly injected into the fio jobfiles used for 64 # running the tests. 65 FIO_NUMJOBS=${FIO_NUMJOBS:-4} 66 # By default run a time base test 67 FIO_TIMEBASED=${FIO_TIMEBASED:-1} 68 # And 60s seems a good balance to get us repeatable numbers in not too long a time. 69 FIO_RUNTIME=${FIO_RUNTIME:-60} 70 # By default we have no ramp (warmup) time. 71 FIO_RAMPTIME=${FIO_RAMPTIME:-0} 72 # Drop the caches in the guest using fio. Note, we need CAP_SYS_ADMIN in the container 73 # for this to work. 74 FIO_INVALIDATE=${FIO_INVALIDATE:-1} 75 # Do not use fallocate. Not all the filesystem types we can test (such as 9p) support 76 # this - which can then generate errors in the JSON datastream. 77 FIO_FALLOCATE=${FIO_FALLOCATE:-none} 78 # When running 'direct', the file size should not really matter as nothing should be 79 # cached. 80 # If you are running cached (direct=0), then it is likely this whole file will fit in 81 # the buffercache. You can make this filesize larger than the container RAM size to 82 # make a cached test mostly miss the cache - but, depending on the runtime and setup, 83 # you might just end up hitting the host side buffercache. You could of course make the 84 # filesize bigger than the host RAM size... it might take fio some time to create that 85 # testfile though. 86 FIO_FILESIZE=${FIO_FILESIZE:-1G} 87 FIO_IOENGINE=${FIO_IOENGINE:-libaio} 88 FIO_IODEPTH=${FIO_IODEPTH:-16} 89 90 # Generate the fio jobfiles into the host directory that will then be shared into 91 # the container to run the actual tests. 92 # 93 # We iterate through the combination of linear/random and read/write/rw and generate 94 # all files - even if the test config will not then use them all. it just makes this 95 # loop simpler, and the extra file overhead is tiny. 96 # 97 # Note - the jobfiles *only* contain test-invariant items - that is, anything that 98 # changes between each iteration of fio in this test (such as the blocksize and the 99 # direct setting) is dynamically set on the fio commandline at runtime. 100 generate_jobfiles() { 101 local n 102 local t 103 104 for n in "" rand; do 105 for t in read write rw; do 106 local testtype="${n}$t" 107 local filebase="fio-${testtype}" 108 local filename="${filebase}.job" 109 local destfile="${HOST_INPUT_DIRNAME}/${filename}" 110 111 echo "; Kata metrics auto generated fio job file" > "${destfile}" 112 echo "[global]" >> "${destfile}" 113 echo "directory=$GUEST_TEST_DIRNAME" >> "${destfile}" 114 echo "filename=$filebase" >> "${destfile}" 115 echo "rw=$t" >> "${destfile}" 116 echo "numjobs=$FIO_NUMJOBS" >> "${destfile}" 117 echo "time_based=$FIO_TIMEBASED" >> "${destfile}" 118 echo "runtime=$FIO_RUNTIME" >> "${destfile}" 119 echo "ramp_time=$FIO_RAMPTIME" >> "${destfile}" 120 echo "invalidate=$FIO_INVALIDATE" >> "${destfile}" 121 echo "fallocate=$FIO_FALLOCATE" >> "${destfile}" 122 echo "[file1]" >> "${destfile}" 123 echo "size=$FIO_FILESIZE" >> "${destfile}" 124 echo "ioengine=$FIO_IOENGINE" >> "${destfile}" 125 echo "iodepth=$FIO_IODEPTH" >> "${destfile}" 126 done 127 done 128 } 129 130 # Initialise the system, including getting the container up and running and 131 # disconnected, ready to run the tests via `docker exec`. 132 init() { 133 # Check tools/commands dependencies 134 cmds=("docker") 135 136 init_env 137 check_cmds "${cmds[@]}" 138 139 # Ensure our docker image is up to date 140 check_dockerfiles_images "$IMAGE" "$DOCKERFILE" 141 142 # Ensure we have the local input and output directories created. 143 mkdir -p ${HOST_OUTPUT_DIRNAME} || true 144 mkdir -p ${HOST_INPUT_DIRNAME} || true 145 146 # We need to set some level of priv enablement to let fio in the 147 # container be able to execute its 'invalidate'. 148 RUNTIME_EXTRA_ARGS="--cap-add=SYS_ADMIN" 149 150 # And set the CPUs and RAM up... 151 RUNTIME_EXTRA_ARGS="${RUNTIME_EXTRA_ARGS} --cpus=${CONTAINER_CPUS}" 152 RUNTIME_EXTRA_ARGS="${RUNTIME_EXTRA_ARGS} -m=${CONTAINER_RAM}" 153 154 # If we are in volume test mode then mount a volume over the top of the testdir 155 # in the container. 156 # Otherwise, the default is to test on the 'root mount'. 157 if [ "$TEST_VOLUME_MOUNT" -eq 1 ]; then 158 RUNTIME_EXTRA_ARGS="${RUNTIME_EXTRA_ARGS} -v ${HOST_TEST_DIRNAME}:${GUEST_TEST_DIRNAME}" 159 fi 160 161 # Go pre-create the fio job files 162 generate_jobfiles 163 164 # Run up the work container, in detached state, ready to then issue 'execs' 165 # to it. Do the input and output volume mounts now. 166 docker run -d --rm --name="${CONTAINER_NAME}" --runtime=$RUNTIME ${RUNTIME_EXTRA_ARGS} -v ${HOST_OUTPUT_DIRNAME}:${GUEST_OUTPUT_DIRNAME} -v ${HOST_INPUT_DIRNAME}:${GUEST_INPUT_DIRNAME} $IMAGE 167 } 168 169 # Drop the host side caches. We may even want to do this if we are in 'direct' mode as 170 # the fio 'direct' applies to the guest, and the container map/mount from the host to the 171 # guest might enable host side cacheing (that is an option on QEMU for instance). 172 dump_host_caches() { 173 # Make sure we flush things down 174 sync 175 # And then drop the caches 176 sudo bash -c "echo 3 > /proc/sys/vm/drop_caches" 177 } 178 179 180 # Generate the metrics JSON output files. 181 # arg1: the name of the fio generated JSON results file 182 # The name of that input file dictates the name of the final metrics 183 # json output file as well. 184 generate_results() { 185 # Set the TEST_NAME to define the json output file 186 TEST_NAME=${1%.json} 187 188 metrics_json_init 189 metrics_json_start_array 190 191 local json="$(cat << EOF 192 { 193 "testimage" : "${IMAGE}", 194 "container_RAM" : "${CONTAINER_RAM}", 195 "container_CPUS" : "${CONTAINER_CPUS}", 196 "volume_test" : "${TEST_VOLUME_MOUNT}", 197 "readtest" : "${FIO_READTEST}", 198 "writetest" : "${FIO_WRITETEST}", 199 "readwritetest" : "${FIO_READWRITETEST}", 200 "fio_direct" : "${FIO_DIRECT}", 201 "fio_random" : "${FIO_RANDOM}", 202 "fio_blocksize" : "${FIO_BLOCKSIZE}", 203 "fio_numjobs" : "${FIO_NUMJOBS}", 204 "fio_timebased" : "${FIO_TIMEBASED}", 205 "fio_runtime" : "${FIO_RUNTIME}", 206 "fio_invalidate" : "${FIO_INVALIDATE}", 207 "fio_filesize" : "${FIO_FILESIZE}", 208 "fio_ioengine" : "${FIO_IOENGINE}", 209 "fio_iodepth" : "${FIO_IODEPTH}" 210 } 211 EOF 212 )" 213 214 metrics_json_add_array_element "$json" 215 metrics_json_end_array "Config" 216 217 # And store the raw JSON emitted by fio itself. 218 metrics_json_start_array 219 # Read in the fio generated results 220 json="$(cat ${HOST_OUTPUT_DIRNAME}/$1)" 221 metrics_json_add_array_element "$json" 222 metrics_json_end_array "Raw" 223 224 metrics_json_save 225 } 226 227 # Run the actual tests. Arguments: 228 # $1 - Do we set the fio 'direct' parameter 229 # $2 - Do we do the random access test (rather than linear test) 230 # 231 # This function will run all/none of the read/write/rw tests depending on their 232 # relevant environment settings. 233 run_test() { 234 local dodirect=$1 235 local dorandom=$2 236 local randprefix="" 237 local fioopts="--bs=${FIO_BLOCKSIZE} --output-format=json" 238 local filename="" 239 240 [ "$dorandom" -eq 1 ] && randprefix="rand" 241 [ "$dodirect" -eq 1 ] && fioopts="${fioopts} --direct=1" 242 243 if [ "${FIO_READTEST}" -eq 1 ]; then 244 filebase="fio-${randprefix}read" 245 filename="${filebase}.job" 246 outputfilename="${filebase}-${FIO_BLOCKSIZE}.json" 247 fioopts="${fioopts} --output=${GUEST_OUTPUT_DIRNAME}/$outputfilename" 248 dump_host_caches 249 echo " ${filebase}-${FIO_BLOCKSIZE}" 250 local output=$(docker exec "${CONTAINER_NAME}" fio $fioopts ${GUEST_INPUT_DIRNAME}/$filename) 251 generate_results "$outputfilename" 252 fi 253 254 if [ "${FIO_WRITETEST}" -eq 1 ]; then 255 filebase="fio-${randprefix}write" 256 filename="${filebase}.job" 257 outputfilename="${filebase}-${FIO_BLOCKSIZE}.json" 258 fioopts="${fioopts} --output=${GUEST_OUTPUT_DIRNAME}/$outputfilename" 259 dump_host_caches 260 echo " ${filebase}-${FIO_BLOCKSIZE}" 261 local output=$(docker exec "${CONTAINER_NAME}" fio $fioopts ${GUEST_INPUT_DIRNAME}/$filename) 262 generate_results "$outputfilename" 263 fi 264 265 if [ "${FIO_READWRITETEST}" -eq 1 ]; then 266 filebase="fio-${randprefix}rw" 267 filename="${filebase}.job" 268 outputfilename="${filebase}-${FIO_BLOCKSIZE}.json" 269 fioopts="${fioopts} --output=${GUEST_OUTPUT_DIRNAME}/$outputfilename" 270 dump_host_caches 271 echo " ${filebase}-${FIO_BLOCKSIZE}" 272 local output=$(docker exec "${CONTAINER_NAME}" fio $fioopts ${GUEST_INPUT_DIRNAME}/$filename) 273 generate_results "$outputfilename" 274 fi 275 } 276 277 main() { 278 # Decide which blocksize set we need 279 if [ "${FIO_DIRECT}" -eq 1 ] ; then 280 local blocksizes="${FIO_DIRECT_BLOCKSIZES}" 281 else 282 local blocksizes="${FIO_BLOCKSIZES}" 283 fi 284 285 # run the set of tests for each defined blocksize 286 for b in $blocksizes; do 287 FIO_BLOCKSIZE=$b 288 run_test "${FIO_DIRECT}" "${FIO_RANDOM}" 289 done 290 } 291 292 cleanup() { 293 docker kill "${CONTAINER_NAME}" || true 294 clean_env 295 } 296 297 init 298 main 299 cleanup