github.com/google/syzkaller@v0.0.0-20240517125934-c0f1611a36d6/sys/linux/cgroup.txt (about) 1 # Copyright 2018 syzkaller project authors. All rights reserved. 2 # Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. 3 4 include <uapi/linux/fcntl.h> 5 6 resource fd_cgroup[fd] 7 resource fd_cgroup_type[fd] 8 resource fd_cgroup_subtree[fd] 9 resource fd_cgroup_int[fd] 10 resource fd_cgroup_pid[fd] 11 resource fd_cgroup_netprio_ifpriomap[fd] 12 resource fd_cgroup_devices[fd] 13 resource fd_cgroup_freezer_state[fd] 14 resource fd_cgroup_pressure[fd] 15 16 mkdirat$cgroup_root(fd const[AT_FDCWD], path ptr[in, string[cgroup_dirs]], mode const[0x1ff]) 17 mkdirat$cgroup(fd fd_cgroup, path ptr[in, string[cgroup_names]], mode const[0x1ff]) 18 openat$cgroup_root(fd const[AT_FDCWD], file ptr[in, string[cgroup_dirs]], flags const[CGROUP_OPEN_FLAGS], mode const[0]) fd_cgroup 19 openat$cgroup(fd fd_cgroup, file ptr[in, string[cgroup_names]], flags const[CGROUP_OPEN_FLAGS], mode const[0]) fd_cgroup 20 openat$cgroup_ro(fd fd_cgroup, file ptr[in, string[cgroup_ctrl_read]], flags const[O_RDONLY], mode const[0]) fd 21 openat$cgroup_int(fd fd_cgroup, file ptr[in, string[cgroup_ctrl_int]], flags const[O_RDWR], mode const[0]) fd_cgroup_int 22 openat$cgroup_procs(fd fd_cgroup, file ptr[in, string[cgroup_proc_files]], flags const[O_RDWR], mode const[0]) fd_cgroup_pid 23 openat$cgroup_subtree(fd fd_cgroup, file ptr[in, string["cgroup.subtree_control"]], flags const[O_RDWR], mode const[0]) fd_cgroup_subtree 24 openat$cgroup_type(fd fd_cgroup, file ptr[in, string["cgroup.type"]], flags const[O_RDWR], mode const[0]) fd_cgroup_type 25 openat$cgroup_pressure(fd fd_cgroup, file ptr[in, string[cgroup_pressure_files]], flags const[O_RDWR], mode const[0]) fd_cgroup_pressure 26 write$cgroup_int(fd fd_cgroup_int, buf ptr[in, fmt[hex, int64]], len bytesize[buf]) 27 write$cgroup_pid(fd fd_cgroup_pid, buf ptr[in, fmt[hex, pid]], len bytesize[buf]) 28 write$cgroup_subtree(fd fd_cgroup_subtree, buf ptr[in, cgroup_subtree], len bytesize[buf]) 29 write$cgroup_type(fd fd_cgroup_type, buf ptr[in, string["threaded"]], len bytesize[buf]) 30 write$cgroup_pressure(fd fd_cgroup_pressure, buf ptr[in, cgroup_pressure], len bytesize[buf]) 31 32 cgroup_subtree { 33 controls array[cgroup_control] 34 } [packed] 35 36 cgroup_control { 37 sign flags[cgroup_control_signs, int8] 38 subsys stringnoz[cgroup_subsystems] 39 sp const[' ', int8] 40 } [packed] 41 42 cgroup_dirs = "./cgroup/syz0", "./cgroup/syz1", "./cgroup.cpu/syz0", "./cgroup.cpu/syz1", "./cgroup.net/syz0", "./cgroup.net/syz1" 43 cgroup_names = "syz0", "syz1" 44 cgroup_control_signs = '+', '-' 45 cgroup_subsystems = "cpu", "memory", "io", "pids", "rdma", "net", "net_cls", "net_prio", "devices", "blkio", "freezer", "cpuset", "cpuacct", "perf_event", "hugetlb", "rlimit" 46 cgroup_proc_files = "cgroup.procs", "cgroup.threads", "tasks" 47 cgroup_ctrl_read = "cgroup.controllers", "cgroup.events", "cgroup.freeze", "cgroup.kill", "cgroup.stat", "cpu.stat", "io.stat", "memory.current", "memory.events", "memory.events.local", "memory.stat", "memory.swap.current", "memory.swap.events", "memory.numa_stat", "pids.current", "pids.events", "rdma.current", "cpuacct.stat", "cpuacct.usage_all", "cpuacct.usage_percpu", "cpuacct.usage_percpu_sys", "cpuacct.usage_percpu_user", "cpuacct.usage_sys", "cpuacct.usage_user", "cpuset.effective_cpus", "cpuset.effective_mems", "cpuset.memory_pressure", "cpuset.memory_pressure_enabled", "net_prio.prioidx", "devices.list", "freezer.state", "freezer.self_freezing", "freezer.parent_freezing", "hugetlb.2MB.usage_in_bytes", "hugetlb.2MB.rsvd.usage_in_bytes", "hugetlb.1GB.usage_in_bytes", "hugetlb.1GB.rsvd.usage_in_bytes", "blkio.bfq.avg_queue_size", "blkio.bfq.dequeue", "blkio.bfq.empty_time", "blkio.bfq.group_wait_time", "blkio.bfq.idle_time", "blkio.bfq.io_merged", "blkio.bfq.io_merged_recursive", "blkio.bfq.io_queued", "blkio.bfq.io_queued_recursive", "blkio.bfq.io_service_bytes", "blkio.bfq.io_service_bytes_recursive", "blkio.bfq.io_service_time", "blkio.bfq.io_service_time_recursive", "blkio.bfq.io_serviced", "blkio.bfq.io_serviced_recursive", "blkio.bfq.io_wait_time", "blkio.bfq.io_wait_time_recursive", "blkio.bfq.sectors", "blkio.bfq.sectors_recursive", "blkio.bfq.time", "blkio.bfq.time_recursive", "blkio.throttle.io_service_bytes", "blkio.throttle.io_service_bytes_recursive", "blkio.throttle.io_serviced", "blkio.throttle.io_serviced_recursive" 48 cgroup_ctrl_int = "cgroup.max.depth", "cgroup.max.descendants", "cpu.weight", "cpu.weight.nice", "io.bfq.weight", "io.latency", "io.max", "io.weight", "memory.high", "memory.low", "memory.max", "memory.min", "memory.swap.max", "memory.swap.high", "memory.swap.max", "memory.oom.group", "pids.max", "rdma.max", "cgroup.clone_children", "cpuacct.usage", "cpuset.cpu_exclusive", "cpuset.cpus", "cpuset.mem_exclusive", "cpuset.mem_hardwall", "cpuset.memory_migrate", "cpuset.memory_spread_page", "cpuset.memory_spread_slab", "cpuset.mems", "cpuset.sched_load_balance", "cpuset.sched_relax_domain_level", "hugetlb.2MB.failcnt", "hugetlb.2MB.limit_in_bytes", "hugetlb.2MB.max_usage_in_bytes", "hugetlb.2MB.rsvd.failcnt", "hugetlb.2MB.rsvd.limit_in_bytes", "hugetlb.2MB.rsvd.max_usage_in_bytes", "hugetlb.1GB.failcnt", "hugetlb.1GB.limit_in_bytes", "hugetlb.1GB.max_usage_in_bytes", "hugetlb.1GB.rsvd.failcnt", "hugetlb.1GB.rsvd.limit_in_bytes", "hugetlb.1GB.rsvd.max_usage_in_bytes", "net_cls.classid", "notify_on_release", "cpu.idle", "cpu.max", "cpu.max.burst", "blkio.reset_stats", "blkio.throttle.read_bps_device", "blkio.throttle.read_iops_device", "blkio.throttle.write_bps_device", "blkio.throttle.write_iops_device" 49 cgroup_pressure_files = "cpu.pressure", "io.pressure", "memory.pressure" 50 51 # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # 52 53 # net_prio (.ifpriomap) 54 cgroup_netprio_ifpriomap { 55 dev stringnoz[devnames] 56 space const[0x32, int8] 57 prio int8['0':'9'] 58 null_term string[""] 59 } [packed] 60 61 openat$cgroup_netprio_ifpriomap(fd fd_cgroup, file ptr[in, string["net_prio.ifpriomap"]], flags const[O_RDWR], mode const[0]) fd_cgroup_netprio_ifpriomap 62 write$cgroup_netprio_ifpriomap(fd fd_cgroup_netprio_ifpriomap, buf ptr[in, cgroup_netprio_ifpriomap], len bytesize[buf]) 63 64 # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # 65 66 # devices (.allow, .deny) 67 cgroup_devices_files = "devices.allow", "devices.deny" 68 69 # device types: (a)ll, (c)har, (b)lock 70 cgroup_devices_type = "a", "c", "b" 71 72 # access: (r)ead, (w)rite, (m)knod, or a combination of them 73 cgroup_devices_access = "r", "w", "m", "rw", "rm", "wm", "rwm" 74 75 # TODO: device_major_minor is in form MAJOR:MINOR, where '*' is used for all. 76 # It is non-trivial to describe valid MAJOR:MINOR as MAJOR takes values from 77 # a wide range while not all such devices might exist in the system. 78 cgroup_devices_towrite { 79 dev stringnoz[cgroup_devices_type] 80 device_major_minor stringnoz[" *:* "] 81 access string[cgroup_devices_access] 82 } [packed] 83 84 openat$cgroup_devices(fd fd_cgroup, file ptr[in, string[cgroup_devices_files]], flags const[O_RDWR], mode const[0]) fd_cgroup_devices 85 write$cgroup_devices(fd fd_cgroup_devices, buf ptr[in, cgroup_devices_towrite], len bytesize[buf]) 86 87 # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # 88 89 # freezer (.state) 90 cgroup_freezer_states = "THAWED", "FREEZING", "FROZEN" 91 openat$cgroup_freezer_state(fd fd_cgroup, file ptr[in, string["freezer.state"]], flags const[O_RDWR], mode const[0]) fd_cgroup_freezer_state 92 write$cgroup_freezer_state(fd fd_cgroup_freezer_state, buf ptr[in, string[cgroup_freezer_states]], len bytesize[buf]) 93 94 define CGROUP_OPEN_FLAGS O_RDWR | O_PATH 95 96 cgroup_pressure { 97 type stringnoz[cgroup_pressure_types] 98 sp0 const[' ', int8] 99 threshold_us fmt[dec, int64] 100 sp1 const[' ', int8] 101 window_us fmt[dec, int64] 102 z const[0, int8] 103 } 104 105 cgroup_pressure_types = "full", "some"