github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/test/root/cgroup_test.go (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package root 16 17 import ( 18 "bufio" 19 "context" 20 "fmt" 21 "io/ioutil" 22 "os" 23 "os/exec" 24 "path/filepath" 25 "strconv" 26 "strings" 27 "testing" 28 "time" 29 30 "github.com/SagerNet/gvisor/pkg/test/dockerutil" 31 "github.com/SagerNet/gvisor/pkg/test/testutil" 32 "github.com/SagerNet/gvisor/runsc/cgroup" 33 ) 34 35 func verifyPid(pid int, path string) error { 36 f, err := os.Open(path) 37 if err != nil { 38 return err 39 } 40 defer f.Close() 41 42 var gots []int 43 scanner := bufio.NewScanner(f) 44 for scanner.Scan() { 45 got, err := strconv.Atoi(scanner.Text()) 46 if err != nil { 47 return err 48 } 49 if got == pid { 50 return nil 51 } 52 gots = append(gots, got) 53 } 54 if scanner.Err() != nil { 55 return scanner.Err() 56 } 57 return fmt.Errorf("got: %v, want: %d", gots, pid) 58 } 59 60 func TestMemCgroup(t *testing.T) { 61 ctx := context.Background() 62 d := dockerutil.MakeContainer(ctx, t) 63 defer d.CleanUp(ctx) 64 65 // Start a new container and allocate the specified about of memory. 66 allocMemSize := 128 << 20 67 allocMemLimit := 2 * allocMemSize 68 69 if err := d.Spawn(ctx, dockerutil.RunOpts{ 70 Image: "basic/ubuntu", 71 Memory: allocMemLimit, // Must be in bytes. 72 }, "python3", "-c", fmt.Sprintf("import time; s = 'a' * %d; time.sleep(100)", allocMemSize)); err != nil { 73 t.Fatalf("docker run failed: %v", err) 74 } 75 76 // Extract the ID to lookup the cgroup. 77 gid := d.ID() 78 t.Logf("cgroup ID: %s", gid) 79 80 // Wait when the container will allocate memory. 81 memUsage := 0 82 start := time.Now() 83 for time.Since(start) < 30*time.Second { 84 // Sleep for a brief period of time after spawning the 85 // container (so that Docker can create the cgroup etc. 86 // or after looping below (so the application can start). 87 time.Sleep(100 * time.Millisecond) 88 89 // Read the cgroup memory limit. 90 path := filepath.Join("/sys/fs/cgroup/memory/docker", gid, "memory.limit_in_bytes") 91 outRaw, err := ioutil.ReadFile(path) 92 if err != nil { 93 // It's possible that the container does not exist yet. 94 continue 95 } 96 out := strings.TrimSpace(string(outRaw)) 97 memLimit, err := strconv.Atoi(out) 98 if err != nil { 99 t.Fatalf("Atoi(%v): %v", out, err) 100 } 101 if memLimit != allocMemLimit { 102 // The group may not have had the correct limit set yet. 103 continue 104 } 105 106 // Read the cgroup memory usage. 107 path = filepath.Join("/sys/fs/cgroup/memory/docker", gid, "memory.max_usage_in_bytes") 108 outRaw, err = ioutil.ReadFile(path) 109 if err != nil { 110 t.Fatalf("error reading usage: %v", err) 111 } 112 out = strings.TrimSpace(string(outRaw)) 113 memUsage, err = strconv.Atoi(out) 114 if err != nil { 115 t.Fatalf("Atoi(%v): %v", out, err) 116 } 117 t.Logf("read usage: %v, wanted: %v", memUsage, allocMemSize) 118 119 // Are we done? 120 if memUsage >= allocMemSize { 121 return 122 } 123 } 124 125 t.Fatalf("%vMB is less than %vMB", memUsage>>20, allocMemSize>>20) 126 } 127 128 // TestCgroup sets cgroup options and checks that cgroup was properly configured. 129 func TestCgroup(t *testing.T) { 130 ctx := context.Background() 131 d := dockerutil.MakeContainer(ctx, t) 132 defer d.CleanUp(ctx) 133 134 // This is not a comprehensive list of attributes. 135 // 136 // Note that we are specifically missing cpusets, which fail if specified. 137 // In any case, it's unclear if cpusets can be reliably tested here: these 138 // are often run on a single core virtual machine, and there is only a single 139 // CPU available in our current set, and every container's set. 140 attrs := []struct { 141 field string 142 value int64 143 ctrl string 144 file string 145 want string 146 skipIfNotFound bool 147 }{ 148 { 149 field: "cpu-shares", 150 value: 1000, 151 ctrl: "cpu", 152 file: "cpu.shares", 153 want: "1000", 154 }, 155 { 156 field: "cpu-period", 157 value: 2000, 158 ctrl: "cpu", 159 file: "cpu.cfs_period_us", 160 want: "2000", 161 }, 162 { 163 field: "cpu-quota", 164 value: 3000, 165 ctrl: "cpu", 166 file: "cpu.cfs_quota_us", 167 want: "3000", 168 }, 169 { 170 field: "kernel-memory", 171 value: 100 << 20, 172 ctrl: "memory", 173 file: "memory.kmem.limit_in_bytes", 174 want: "104857600", 175 }, 176 { 177 field: "memory", 178 value: 1 << 30, 179 ctrl: "memory", 180 file: "memory.limit_in_bytes", 181 want: "1073741824", 182 }, 183 { 184 field: "memory-reservation", 185 value: 500 << 20, 186 ctrl: "memory", 187 file: "memory.soft_limit_in_bytes", 188 want: "524288000", 189 }, 190 { 191 field: "memory-swap", 192 value: 2 << 30, 193 ctrl: "memory", 194 file: "memory.memsw.limit_in_bytes", 195 want: "2147483648", 196 skipIfNotFound: true, // swap may be disabled on the machine. 197 }, 198 { 199 field: "memory-swappiness", 200 value: 5, 201 ctrl: "memory", 202 file: "memory.swappiness", 203 want: "5", 204 }, 205 { 206 field: "blkio-weight", 207 value: 750, 208 ctrl: "blkio", 209 file: "blkio.weight", 210 want: "750", 211 skipIfNotFound: true, // blkio groups may not be available. 212 }, 213 { 214 field: "pids-limit", 215 value: 1000, 216 ctrl: "pids", 217 file: "pids.max", 218 want: "1000", 219 }, 220 } 221 222 // Make configs. 223 conf, hostconf, _ := d.ConfigsFrom(dockerutil.RunOpts{ 224 Image: "basic/alpine", 225 }, "sleep", "10000") 226 227 // Add Cgroup arguments to configs. 228 for _, attr := range attrs { 229 switch attr.field { 230 case "cpu-shares": 231 hostconf.Resources.CPUShares = attr.value 232 case "cpu-period": 233 hostconf.Resources.CPUPeriod = attr.value 234 case "cpu-quota": 235 hostconf.Resources.CPUQuota = attr.value 236 case "kernel-memory": 237 hostconf.Resources.KernelMemory = attr.value 238 case "memory": 239 hostconf.Resources.Memory = attr.value 240 case "memory-reservation": 241 hostconf.Resources.MemoryReservation = attr.value 242 case "memory-swap": 243 hostconf.Resources.MemorySwap = attr.value 244 case "memory-swappiness": 245 val := attr.value 246 hostconf.Resources.MemorySwappiness = &val 247 case "blkio-weight": 248 hostconf.Resources.BlkioWeight = uint16(attr.value) 249 case "pids-limit": 250 val := attr.value 251 hostconf.Resources.PidsLimit = &val 252 } 253 } 254 255 // Create container. 256 if err := d.CreateFrom(ctx, "basic/alpine", conf, hostconf, nil); err != nil { 257 t.Fatalf("create failed with: %v", err) 258 } 259 260 // Start container. 261 if err := d.Start(ctx); err != nil { 262 t.Fatalf("start failed with: %v", err) 263 } 264 265 // Lookup the relevant cgroup ID. 266 gid := d.ID() 267 t.Logf("cgroup ID: %s", gid) 268 269 // Check list of attributes defined above. 270 for _, attr := range attrs { 271 path := filepath.Join("/sys/fs/cgroup", attr.ctrl, "docker", gid, attr.file) 272 out, err := ioutil.ReadFile(path) 273 if err != nil { 274 if os.IsNotExist(err) && attr.skipIfNotFound { 275 t.Logf("skipped %s/%s", attr.ctrl, attr.file) 276 continue 277 } 278 t.Fatalf("failed to read %q: %v", path, err) 279 } 280 if got := strings.TrimSpace(string(out)); got != attr.want { 281 t.Errorf("field: %q, cgroup attribute %s/%s, got: %q, want: %q", attr.field, attr.ctrl, attr.file, got, attr.want) 282 } 283 } 284 285 // Check that sandbox is inside cgroup. 286 controllers := []string{ 287 "blkio", 288 "cpu", 289 "cpuset", 290 "memory", 291 "net_cls", 292 "net_prio", 293 "devices", 294 "freezer", 295 "perf_event", 296 "pids", 297 "systemd", 298 } 299 pid, err := d.SandboxPid(ctx) 300 if err != nil { 301 t.Fatalf("SandboxPid: %v", err) 302 } 303 for _, ctrl := range controllers { 304 path := filepath.Join("/sys/fs/cgroup", ctrl, "docker", gid, "cgroup.procs") 305 if err := verifyPid(pid, path); err != nil { 306 t.Errorf("cgroup control %q processes: %v", ctrl, err) 307 } 308 } 309 } 310 311 // TestCgroupParent sets the "CgroupParent" option and checks that the child and 312 // parent's cgroups are created correctly relative to each other. 313 func TestCgroupParent(t *testing.T) { 314 ctx := context.Background() 315 d := dockerutil.MakeContainer(ctx, t) 316 defer d.CleanUp(ctx) 317 318 // Construct a known cgroup name. 319 parent := testutil.RandomID("runsc-") 320 conf, hostconf, _ := d.ConfigsFrom(dockerutil.RunOpts{ 321 Image: "basic/alpine", 322 }, "sleep", "10000") 323 hostconf.Resources.CgroupParent = parent 324 325 if err := d.CreateFrom(ctx, "basic/alpine", conf, hostconf, nil); err != nil { 326 t.Fatalf("create failed with: %v", err) 327 } 328 329 if err := d.Start(ctx); err != nil { 330 t.Fatalf("start failed with: %v", err) 331 } 332 333 // Extract the ID to look up the cgroup. 334 gid := d.ID() 335 t.Logf("cgroup ID: %s", gid) 336 337 // Check that sandbox is inside cgroup. 338 pid, err := d.SandboxPid(ctx) 339 if err != nil { 340 t.Fatalf("SandboxPid: %v", err) 341 } 342 343 // Finds cgroup for the sandbox's parent process to check that cgroup is 344 // created in the right location relative to the parent. 345 cmd := fmt.Sprintf("grep PPid: /proc/%d/status | sed 's/PPid:\\s//'", pid) 346 ppidStr, err := exec.Command("bash", "-c", cmd).CombinedOutput() 347 if err != nil { 348 t.Fatalf("Executing %q: %v", cmd, err) 349 } 350 ppid, err := strconv.Atoi(strings.TrimSpace(string(ppidStr))) 351 if err != nil { 352 t.Fatalf("invalid PID (%s): %v", ppidStr, err) 353 } 354 cgroups, err := cgroup.NewFromPid(ppid) 355 if err != nil { 356 t.Fatalf("cgroup.NewFromPid(%d): %v", ppid, err) 357 } 358 path := filepath.Join(cgroups.MakePath("cpuacct"), parent, gid, "cgroup.procs") 359 if err := verifyPid(pid, path); err != nil { 360 t.Errorf("cgroup control %q processes: %v", "memory", err) 361 } 362 }