github.com/opencontainers/runc@v1.2.0-rc.1.0.20240520010911-492dc558cdd6/tests/integration/cgroups.bats (about) 1 #!/usr/bin/env bats 2 3 load helpers 4 5 function teardown() { 6 teardown_bundle 7 } 8 9 function setup() { 10 setup_busybox 11 } 12 13 @test "runc create (no limits + no cgrouppath + no permission) succeeds" { 14 runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_permissions 15 [ "$status" -eq 0 ] 16 } 17 18 @test "runc create (rootless + no limits + cgrouppath + no permission) fails with permission error" { 19 requires rootless rootless_no_cgroup 20 21 set_cgroups_path 22 23 runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_permissions 24 [ "$status" -eq 1 ] 25 [[ "$output" == *"unable to apply cgroup configuration"*"permission denied"* ]] 26 } 27 28 @test "runc create (rootless + limits + no cgrouppath + no permission) fails with informative error" { 29 requires rootless rootless_no_cgroup 30 31 set_resources_limit 32 33 runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_permissions 34 [ "$status" -eq 1 ] 35 [[ "$output" == *"rootless needs no limits + no cgrouppath when no permission is granted for cgroups"* ]] || 36 [[ "$output" == *"cannot set pids limit: container could not join or create cgroup"* ]] 37 } 38 39 @test "runc create (limits + cgrouppath + permission on the cgroup dir) succeeds" { 40 [ $EUID -ne 0 ] && requires rootless_cgroup 41 42 set_cgroups_path 43 set_resources_limit 44 45 runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_permissions 46 [ "$status" -eq 0 ] 47 if [ -v CGROUP_V2 ]; then 48 if [ -v RUNC_USE_SYSTEMD ]; then 49 if [ $EUID -eq 0 ]; then 50 check_cgroup_value "cgroup.controllers" "$(cat /sys/fs/cgroup/machine.slice/cgroup.controllers)" 51 else 52 # Filter out controllers that systemd is unable to delegate. 53 check_cgroup_value "cgroup.controllers" "$(sed 's/ \(hugetlb\|misc\|rdma\)//g' </sys/fs/cgroup/user.slice/user-${EUID}.slice/cgroup.controllers)" 54 fi 55 else 56 check_cgroup_value "cgroup.controllers" "$(cat /sys/fs/cgroup/cgroup.controllers)" 57 fi 58 fi 59 } 60 61 @test "runc exec (limits + cgrouppath + permission on the cgroup dir) succeeds" { 62 [ $EUID -ne 0 ] && requires rootless_cgroup 63 64 set_cgroups_path 65 set_resources_limit 66 67 runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_permissions 68 [ "$status" -eq 0 ] 69 70 runc exec test_cgroups_permissions echo "cgroups_exec" 71 [ "$status" -eq 0 ] 72 [[ ${lines[0]} == *"cgroups_exec"* ]] 73 } 74 75 @test "runc exec (cgroup v2 + init process in non-root cgroup) succeeds" { 76 requires root cgroups_v2 77 78 set_cgroups_path 79 set_cgroup_mount_writable 80 81 runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_group 82 [ "$status" -eq 0 ] 83 84 runc exec test_cgroups_group cat /sys/fs/cgroup/cgroup.controllers 85 [ "$status" -eq 0 ] 86 [[ ${lines[0]} == *"memory"* ]] 87 88 runc exec test_cgroups_group cat /proc/self/cgroup 89 [ "$status" -eq 0 ] 90 [[ ${lines[0]} = "0::/" ]] 91 92 runc exec test_cgroups_group mkdir /sys/fs/cgroup/foo 93 [ "$status" -eq 0 ] 94 95 runc exec test_cgroups_group sh -c "echo 1 > /sys/fs/cgroup/foo/cgroup.procs" 96 [ "$status" -eq 0 ] 97 98 # the init process is now in "/foo", but an exec process can still join "/" 99 # because we haven't enabled any domain controller. 100 runc exec test_cgroups_group cat /proc/self/cgroup 101 [ "$status" -eq 0 ] 102 [[ ${lines[0]} = "0::/" ]] 103 104 # turn on a domain controller (memory) 105 runc exec test_cgroups_group sh -euxc 'echo $$ > /sys/fs/cgroup/foo/cgroup.procs; echo +memory > /sys/fs/cgroup/cgroup.subtree_control' 106 [ "$status" -eq 0 ] 107 108 # an exec process can no longer join "/" after turning on a domain controller. 109 # falls back to "/foo". 110 runc exec test_cgroups_group cat /proc/self/cgroup 111 [ "$status" -eq 0 ] 112 [[ ${lines[0]} = "0::/foo" ]] 113 114 # teardown: remove "/foo" 115 # shellcheck disable=SC2016 116 runc exec test_cgroups_group sh -uxc 'echo -memory > /sys/fs/cgroup/cgroup.subtree_control; for f in $(cat /sys/fs/cgroup/foo/cgroup.procs); do echo $f > /sys/fs/cgroup/cgroup.procs; done; rmdir /sys/fs/cgroup/foo' 117 runc exec test_cgroups_group test ! -d /sys/fs/cgroup/foo 118 [ "$status" -eq 0 ] 119 # 120 } 121 122 @test "runc run (cgroup v1 + unified resources should fail)" { 123 requires root cgroups_v1 124 125 set_cgroups_path 126 set_resources_limit 127 update_config '.linux.resources.unified |= {"memory.min": "131072"}' 128 129 runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_unified 130 [ "$status" -ne 0 ] 131 [[ "$output" == *'invalid configuration'* ]] 132 } 133 134 @test "runc run (blkio weight)" { 135 requires cgroups_v2 cgroups_io_weight 136 [ $EUID -ne 0 ] && requires rootless_cgroup 137 138 set_cgroups_path 139 update_config '.linux.resources.blockIO |= {"weight": 750}' 140 141 runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_unified 142 [ "$status" -eq 0 ] 143 144 runc exec test_cgroups_unified sh -c 'cat /sys/fs/cgroup/io.bfq.weight' 145 if [[ "$status" -eq 0 ]]; then 146 [ "$output" = 'default 750' ] 147 else 148 runc exec test_cgroups_unified sh -c 'cat /sys/fs/cgroup/io.weight' 149 [ "$output" = 'default 7475' ] 150 fi 151 } 152 153 @test "runc run (per-device io weight for bfq)" { 154 requires root # to create a loop device 155 156 dd if=/dev/zero of=backing.img bs=4096 count=1 157 dev=$(losetup --find --show backing.img) || skip "unable to create a loop device" 158 159 # See if BFQ scheduler is available. 160 if ! { grep -qw bfq "/sys/block/${dev#/dev/}/queue/scheduler" && 161 echo bfq >"/sys/block/${dev#/dev/}/queue/scheduler"; }; then 162 losetup -d "$dev" 163 skip "BFQ scheduler not available" 164 fi 165 166 set_cgroups_path 167 168 IFS=$' \t:' read -r major minor <<<"$(lsblk -nd -o MAJ:MIN "$dev")" 169 update_config ' .linux.devices += [{path: "'"$dev"'", type: "b", major: '"$major"', minor: '"$minor"'}] 170 | .linux.resources.blockIO.weight |= 333 171 | .linux.resources.blockIO.weightDevice |= [ 172 { major: '"$major"', minor: '"$minor"', weight: 444 } 173 ]' 174 runc run -d --console-socket "$CONSOLE_SOCKET" test_dev_weight 175 [ "$status" -eq 0 ] 176 177 # The loop device itself is no longer needed. 178 losetup -d "$dev" 179 180 if [ -v CGROUP_V2 ]; then 181 file="io.bfq.weight" 182 else 183 file="blkio.bfq.weight_device" 184 fi 185 weights=$(get_cgroup_value $file) 186 [[ "$weights" == *"default 333"* ]] 187 [[ "$weights" == *"$major:$minor 444"* ]] 188 } 189 190 @test "runc run (cpu.idle)" { 191 requires cgroups_cpu_idle 192 [ $EUID -ne 0 ] && requires rootless_cgroup 193 194 set_cgroups_path 195 update_config '.linux.resources.cpu.idle = 1' 196 197 runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_unified 198 [ "$status" -eq 0 ] 199 check_cgroup_value "cpu.idle" "1" 200 } 201 202 # Convert size in KB to hugetlb size suffix. 203 convert_hugetlb_size() { 204 local size=$1 205 local units=("KB" "MB" "GB") 206 local idx=0 207 208 while ((size >= 1024)); do 209 ((size /= 1024)) 210 ((idx++)) 211 done 212 213 echo "$size${units[$idx]}" 214 } 215 216 @test "runc run (hugetlb limits)" { 217 requires cgroups_hugetlb 218 [ $EUID -ne 0 ] && requires rootless_cgroup 219 # shellcheck disable=SC2012 # ls is fine here. 220 mapfile -t sizes_kb < <(ls /sys/kernel/mm/hugepages/ | sed -e 's/.*hugepages-//' -e 's/kB$//') # 221 if [ "${#sizes_kb[@]}" -lt 1 ]; then 222 skip "requires hugetlb" 223 fi 224 225 # Create two arrays: 226 # - sizes: hugetlb cgroup file suffixes; 227 # - limits: limits for each size. 228 for size in "${sizes_kb[@]}"; do 229 sizes+=("$(convert_hugetlb_size "$size")") 230 # Limit to 1 page. 231 limits+=("$((size * 1024))") 232 done 233 234 # Set per-size limits. 235 for ((i = 0; i < ${#sizes[@]}; i++)); do 236 size="${sizes[$i]}" 237 limit="${limits[$i]}" 238 update_config '.linux.resources.hugepageLimits += [{ pagesize: "'"$size"'", limit: '"$limit"' }]' 239 done 240 241 set_cgroups_path 242 runc run -d --console-socket "$CONSOLE_SOCKET" test_hugetlb 243 [ "$status" -eq 0 ] 244 245 lim="max" 246 [ -v CGROUP_V1 ] && lim="limit_in_bytes" 247 248 optional=("") 249 # Add rsvd, if available. 250 if test -f "$(get_cgroup_path hugetlb)/hugetlb.${sizes[0]}.rsvd.$lim"; then 251 optional+=(".rsvd") 252 fi 253 254 # Check if the limits are as expected. 255 for ((i = 0; i < ${#sizes[@]}; i++)); do 256 size="${sizes[$i]}" 257 limit="${limits[$i]}" 258 for rsvd in "${optional[@]}"; do 259 param="hugetlb.${size}${rsvd}.$lim" 260 echo "checking $param" 261 check_cgroup_value "$param" "$limit" 262 done 263 done 264 } 265 266 @test "runc run (cgroup v2 resources.unified only)" { 267 requires root cgroups_v2 268 269 set_cgroups_path 270 update_config ' .linux.resources.unified |= { 271 "memory.min": "131072", 272 "memory.low": "524288", 273 "memory.high": "5242880", 274 "memory.max": "20484096", 275 "memory.swap.max": "20971520", 276 "pids.max": "99", 277 "cpu.max": "10000 100000", 278 "cpu.weight": "42" 279 }' 280 281 runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_unified 282 [ "$status" -eq 0 ] 283 284 runc exec test_cgroups_unified sh -c 'cd /sys/fs/cgroup && grep . *.min *.max *.low *.high' 285 [ "$status" -eq 0 ] 286 echo "$output" 287 288 echo "$output" | grep -q '^memory.min:131072$' 289 echo "$output" | grep -q '^memory.low:524288$' 290 echo "$output" | grep -q '^memory.high:5242880$' 291 echo "$output" | grep -q '^memory.max:20484096$' 292 echo "$output" | grep -q '^memory.swap.max:20971520$' 293 echo "$output" | grep -q '^pids.max:99$' 294 echo "$output" | grep -q '^cpu.max:10000 100000$' 295 296 check_systemd_value "MemoryMin" 131072 297 check_systemd_value "MemoryLow" 524288 298 check_systemd_value "MemoryHigh" 5242880 299 check_systemd_value "MemoryMax" 20484096 300 check_systemd_value "MemorySwapMax" 20971520 301 check_systemd_value "TasksMax" 99 302 check_cpu_quota 10000 100000 "100ms" 303 check_cpu_weight 42 304 } 305 306 @test "runc run (cgroup v2 resources.unified override)" { 307 requires root cgroups_v2 308 309 set_cgroups_path 310 # CPU shares of 3333 corresponds to CPU weight of 128. 311 update_config ' .linux.resources.memory |= {"limit": 33554432} 312 | .linux.resources.cpu |= { 313 "shares": 3333, 314 "quota": 40000, 315 "period": 100000 316 } 317 | .linux.resources.unified |= { 318 "memory.min": "131072", 319 "memory.max": "40484864", 320 "pids.max": "42", 321 "cpu.max": "5000 50000", 322 "cpu.weight": "42" 323 }' 324 325 runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_unified 326 [ "$status" -eq 0 ] 327 328 runc exec test_cgroups_unified cat /sys/fs/cgroup/memory.min 329 [ "$status" -eq 0 ] 330 [ "$output" = '131072' ] 331 332 runc exec test_cgroups_unified cat /sys/fs/cgroup/memory.max 333 [ "$status" -eq 0 ] 334 [ "$output" = '40484864' ] 335 336 runc exec test_cgroups_unified cat /sys/fs/cgroup/pids.max 337 [ "$status" -eq 0 ] 338 [ "$output" = '42' ] 339 check_systemd_value "TasksMax" 42 340 341 check_cpu_quota 5000 50000 "100ms" 342 343 check_cpu_weight 42 344 } 345 346 @test "runc run (cgroupv2 mount inside container)" { 347 requires cgroups_v2 348 [ $EUID -ne 0 ] && requires rootless_cgroup 349 350 set_cgroups_path 351 352 runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_unified 353 [ "$status" -eq 0 ] 354 355 # Make sure we don't have any extra cgroups inside 356 runc exec test_cgroups_unified find /sys/fs/cgroup/ -type d 357 [ "$status" -eq 0 ] 358 [ "$(wc -l <<<"$output")" -eq 1 ] 359 } 360 361 @test "runc exec (cgroup v1+hybrid joins correct cgroup)" { 362 requires root cgroups_hybrid 363 364 set_cgroups_path 365 366 runc run --pid-file pid.txt -d --console-socket "$CONSOLE_SOCKET" test_cgroups_group 367 [ "$status" -eq 0 ] 368 369 pid=$(cat pid.txt) 370 run_cgroup=$(tail -1 </proc/"$pid"/cgroup) 371 [[ "$run_cgroup" == *"runc-cgroups-integration-test"* ]] 372 373 runc exec test_cgroups_group cat /proc/self/cgroup 374 [ "$status" -eq 0 ] 375 exec_cgroup=${lines[-1]} 376 [[ $exec_cgroup == *"runc-cgroups-integration-test"* ]] 377 378 # check that the cgroups v2 path is the same for both processes 379 [ "$run_cgroup" = "$exec_cgroup" ] 380 } 381 382 @test "runc exec should refuse a paused container" { 383 requires cgroups_freezer 384 [ $EUID -ne 0 ] && requires rootless_cgroup 385 386 set_cgroups_path 387 388 runc run -d --console-socket "$CONSOLE_SOCKET" ct1 389 [ "$status" -eq 0 ] 390 runc pause ct1 391 [ "$status" -eq 0 ] 392 393 # Exec should not timeout or succeed. 394 runc exec ct1 echo ok 395 [ "$status" -eq 255 ] 396 [[ "$output" == *"cannot exec in a paused container"* ]] 397 } 398 399 @test "runc exec --ignore-paused" { 400 requires cgroups_freezer 401 [ $EUID -ne 0 ] && requires rootless_cgroup 402 403 set_cgroups_path 404 405 runc run -d --console-socket "$CONSOLE_SOCKET" ct1 406 [ "$status" -eq 0 ] 407 runc pause ct1 408 [ "$status" -eq 0 ] 409 410 # Resume the container a bit later. 411 ( 412 sleep 2 413 runc resume ct1 414 ) & 415 416 # Exec should not timeout or succeed. 417 runc exec --ignore-paused ct1 echo ok 418 [ "$status" -eq 0 ] 419 [ "$output" = "ok" ] 420 } 421 422 @test "runc run/create should error for a non-empty cgroup" { 423 [ $EUID -ne 0 ] && requires rootless_cgroup 424 425 set_cgroups_path 426 427 runc run -d --console-socket "$CONSOLE_SOCKET" ct1 428 [ "$status" -eq 0 ] 429 430 # Run a second container sharing the cgroup with the first one. 431 runc --debug run -d --console-socket "$CONSOLE_SOCKET" ct2 432 [ "$status" -ne 0 ] 433 [[ "$output" == *"container's cgroup is not empty"* ]] 434 435 # Same but using runc create. 436 runc create --console-socket "$CONSOLE_SOCKET" ct3 437 [ "$status" -ne 0 ] 438 [[ "$output" == *"container's cgroup is not empty"* ]] 439 } 440 441 @test "runc run/create should refuse pre-existing frozen cgroup" { 442 requires cgroups_freezer 443 [ $EUID -ne 0 ] && requires rootless_cgroup 444 445 set_cgroups_path 446 447 if [ -v CGROUP_V1 ]; then 448 FREEZER_DIR="${CGROUP_FREEZER_BASE_PATH}/${REL_CGROUPS_PATH}" 449 FREEZER="${FREEZER_DIR}/freezer.state" 450 STATE="FROZEN" 451 else 452 FREEZER_DIR="${CGROUP_V2_PATH}" 453 FREEZER="${FREEZER_DIR}/cgroup.freeze" 454 STATE="1" 455 fi 456 457 # Create and freeze the cgroup. 458 mkdir -p "$FREEZER_DIR" 459 echo "$STATE" >"$FREEZER" 460 461 # Start a container. 462 runc run -d --console-socket "$CONSOLE_SOCKET" ct1 463 [ "$status" -eq 1 ] 464 # A warning should be printed. 465 [[ "$output" == *"container's cgroup unexpectedly frozen"* ]] 466 467 # Same check for runc create. 468 runc create --console-socket "$CONSOLE_SOCKET" ct2 469 [ "$status" -eq 1 ] 470 # A warning should be printed. 471 [[ "$output" == *"container's cgroup unexpectedly frozen"* ]] 472 473 # Cleanup. 474 rmdir "$FREEZER_DIR" 475 }