github.com/lrita/numa@v1.0.2/numa.go (about) 1 package numa 2 3 import ( 4 "fmt" 5 ) 6 7 var ( 8 available bool 9 // The max possible node count, which represents the node count of local 10 // platform supporting. 11 // nnodemax =@nodemask_sz+1 12 nnodemax int 13 // The max configured(enabled/setuped) node, which represents the 14 // available node count of local platform. 15 // nconfigurednode =@maxconfigurednode+1 16 nconfigurednode int 17 // The max possible cpu count, which represents the cpu count of local 18 // platform supporting. 19 // ncpumax =@cpumask_sz+1 20 ncpumax int 21 // nconfiguredcpu =@maxconfiguredcpu 22 nconfiguredcpu int 23 24 memnodes Bitmask 25 numanodes Bitmask 26 27 cpu2node map[int]int 28 node2cpu map[int]Bitmask 29 ) 30 31 const ( 32 // The memory policy of GetMemPolicy/SetMemPolicy. 33 MPOL_DEFAULT = iota 34 MPOL_PREFERRED 35 MPOL_BIND 36 MPOL_INTERLEAVE 37 MPOL_LOCAL 38 MPOL_MAX 39 40 // MPOL_F_STATIC_NODES since Linux 2.6.26 41 // A nonempty nodemask specifies physical node ids. Linux does will not 42 // remap the nodemask when the process moves to a different cpuset context, 43 // nor when the set of nodes allowed by the process's current cpuset 44 // context changes. 45 MPOL_F_STATIC_NODES = 1 << 15 46 47 // MPOL_F_RELATIVE_NODES since Linux 2.6.26 48 // A nonempty nodemask specifies node ids that are relative to the set 49 // of node ids allowed by the process's current cpuset. 50 MPOL_F_RELATIVE_NODES = 1 << 14 51 52 // MPOL_MODE_FLAGS is the union of all possible optional mode flags passed 53 // to either SetMemPolicy() or mbind(). 54 MPOL_MODE_FLAGS = MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES 55 ) 56 57 const ( 58 // Flags for get_mem_policy 59 // return next IL node or node of address 60 // Warning: MPOL_F_NODE is unsupported and subject to change. Don't use. 61 MPOL_F_NODE = 1 << iota 62 // look up vma using address 63 MPOL_F_ADDR 64 // query nodes allowed in cpuset 65 MPOL_F_MEMS_ALLOWED 66 ) 67 68 const ( 69 // Flags for mbind 70 // Verify existing pages in the mapping 71 MPOL_MF_STRICT = 1 << iota 72 // Move pages owned by this process to conform to mapping 73 MPOL_MF_MOVE 74 // Move every page to conform to mapping 75 MPOL_MF_MOVE_ALL 76 // Modifies '_MOVE: lazy migrate on fault 77 MPOL_MF_LAZY 78 // Internal flags start here 79 POL_MF_INTERNAL 80 81 MPOL_MF_VALID = MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL 82 ) 83 84 // Available returns current platform is whether support NUMA. 85 // @ int numa_available(void) 86 func Available() bool { 87 return available 88 } 89 90 // MaxNodeID returns the max id of current configured NUMA nodes. 91 // @numa_max_node_int 92 func MaxNodeID() int { 93 return nconfigurednode - 1 94 } 95 96 // MaxPossibleNodeID returns the max possible node id of this platform supported. 97 // The possible node id always larger than max node id. 98 func MaxPossibleNodeID() int { 99 return nnodemax - 1 100 } 101 102 // NodeCount returns the count of current configured NUMA nodes. 103 // 104 // NOTE: this function's behavior matches the documentation (ie: it 105 // returns a count of nodes with memory) despite the poor function 106 // naming. We also cannot use the similarly poorly named 107 // numa_all_nodes_ptr as it only tracks nodes with memory from which 108 // the calling process can allocate. Think sparse nodes, memory-less 109 // nodes, cpusets... 110 // @numa_num_configured_nodes 111 func NodeCount() int { 112 return memnodes.OnesCount() 113 } 114 115 // NodeMask returns the mask of current configured nodes. 116 func NodeMask() Bitmask { 117 return memnodes.Clone() 118 } 119 120 // NodePossibleCount returns the possible NUMA nodes count of current platform 121 // supported. 122 func NodePossibleCount() int { 123 return nnodemax 124 } 125 126 // CPUPossibleCount returns the possible cpu count of current platform supported. 127 func CPUPossibleCount() int { 128 return ncpumax 129 } 130 131 // CPUCount returns the current configured(enabled/detected) cpu count, which 132 // is different with runtime.NumCPU(). 133 func CPUCount() int { 134 return nconfiguredcpu 135 } 136 137 // RunningNodesMask return the bitmask of current process using NUMA nodes. 138 // @numa_get_run_node_mask_v2 139 func RunningNodesMask() (Bitmask, error) { 140 nodemask := NewBitmask(NodePossibleCount()) 141 cpumask := NewBitmask(CPUPossibleCount()) 142 if _, err := GetSchedAffinity(0, cpumask); err != nil { 143 return nil, err 144 } 145 for i := 0; i < cpumask.Len(); i++ { 146 if !cpumask.Get(i) { 147 continue 148 } 149 n, err := CPUToNode(i) 150 if err != nil { 151 return nil, err 152 } 153 nodemask.Set(n, true) 154 } 155 return nodemask, nil 156 } 157 158 // RunningCPUMask return the cpu bitmask of current process running on. 159 func RunningCPUMask() (Bitmask, error) { 160 cpumask := NewBitmask(CPUPossibleCount()) 161 if _, err := GetSchedAffinity(0, cpumask); err != nil { 162 return nil, err 163 } 164 return cpumask[:len(NewBitmask(CPUCount()))], nil 165 } 166 167 // NodeToCPUMask returns the cpumask of given node id. 168 // @numa_node_to_cpus_v2 169 func NodeToCPUMask(node int) (Bitmask, error) { 170 if node > MaxPossibleNodeID() { 171 return nil, fmt.Errorf("node %d is out of range", node) 172 } 173 cpumask, ok := node2cpu[node] 174 if !ok { 175 return nil, fmt.Errorf("node %d not found", node) 176 } 177 return cpumask.Clone(), nil 178 } 179 180 // CPUToNode returns the node id by given cpu id. 181 func CPUToNode(cpu int) (int, error) { 182 node, ok := cpu2node[cpu] 183 if !ok { 184 return 0, fmt.Errorf("cpu %d not found", cpu) 185 } 186 return node, nil 187 } 188 189 // RunOnNode set current process run on given node. 190 // The special node -1 will set current process on all available nodes. 191 // @numa_run_on_node 192 func RunOnNode(node int) (err error) { 193 var cpumask Bitmask 194 switch { 195 case node == -1: 196 cpumask = NewBitmask(CPUPossibleCount()) 197 cpumask.SetAll() 198 case node >= 0: 199 cpumask, err = NodeToCPUMask(node) 200 if err != nil { 201 return err 202 } 203 default: 204 return fmt.Errorf("invalided node %d", node) 205 } 206 return SetSchedAffinity(0, cpumask) 207 } 208 209 // GetMemAllowedNodeMask returns the bitmask of current process allowed running 210 // nodes. 211 // @numa_get_mems_allowed 212 func GetMemAllowedNodeMask() (Bitmask, error) { 213 mask := NewBitmask(NodePossibleCount()) 214 if _, err := GetMemPolicy(mask, nil, MPOL_F_MEMS_ALLOWED); err != nil { 215 return nil, err 216 } 217 return mask, nil 218 } 219 220 // RunOnNodeMask run current process to the given nodes. 221 // @numa_run_on_node_mask_v2 222 func RunOnNodeMask(mask Bitmask) error { 223 cpumask := NewBitmask(CPUPossibleCount()) 224 m := mask.Clone() 225 for i := 0; i < mask.Len(); i++ { 226 if !m.Get(i) { 227 continue 228 } 229 if !memnodes.Get(i) { 230 continue 231 } 232 cpu, err := NodeToCPUMask(i) 233 if err != nil { 234 return err 235 } 236 for j := 0; j < cpu.Len(); j++ { 237 cpumask.Set(j, true) 238 } 239 } 240 return SetSchedAffinity(0, cpumask) 241 } 242 243 // Bind bind current process on those nodes which given by a bitmask. 244 // @numa_bind_v2 245 func Bind(mask Bitmask) error { 246 if err := RunOnNodeMask(mask); err != nil { 247 return err 248 } 249 return SetMemPolicy(MPOL_BIND, mask) 250 }