github.com/lrita/numa@v1.0.2/numa_test.go (about) 1 package numa 2 3 import ( 4 "runtime" 5 "sync" 6 "syscall" 7 "testing" 8 "unsafe" 9 10 "github.com/stretchr/testify/require" 11 ) 12 13 func TestNotAvailable(t *testing.T) { 14 if Available() { 15 t.Skip("skip by available") 16 } 17 assert := require.New(t) 18 _, err := GetMemPolicy(nil, nil, 0) 19 assert.Equal(syscall.ENOSYS, err) 20 assert.Equal(syscall.ENOSYS, SetMemPolicy(MPOL_DEFAULT, nil)) 21 22 assert.Equal(syscall.ENOSYS, Bind(nil)) 23 assert.Equal(syscall.ENOSYS, MBind(nil, 0, 0, 0, nil)) 24 25 _, err = GetSchedAffinity(0, nil) 26 assert.Equal(syscall.ENOSYS, err) 27 assert.Equal(syscall.ENOSYS, SetSchedAffinity(0, nil)) 28 29 assert.Equal(syscall.ENOSYS, RunOnNode(-1)) 30 assert.Equal(syscall.ENOSYS, RunOnNode(0)) 31 assert.Error(RunOnNode(NodePossibleCount() + 1)) 32 assert.Error(RunOnNode(-2)) 33 34 for i := 0; i < CPUCount()+10; i++ { 35 node, err := CPUToNode(i) 36 if i < CPUCount() { 37 assert.NoError(err) 38 assert.Equal(0, node) 39 } else { 40 assert.Error(err) 41 } 42 } 43 44 _, err = NodeToCPUMask(NodePossibleCount() + 1) 45 assert.Error(err) 46 _, err = NodeToCPUMask(NodePossibleCount() + 1) 47 assert.Error(err) 48 49 _, err = RunningNodesMask() 50 assert.Error(err) 51 52 _, err = RunningCPUMask() 53 assert.Error(err) 54 55 assert.Equal(syscall.ENOSYS, RunOnNodeMask(NodeMask())) 56 } 57 58 func TestNodeMemSize64(t *testing.T) { 59 var ( 60 assert = require.New(t) 61 nodemask = NodeMask() 62 ) 63 if !Available() { 64 for i := 0; i < nodemask.Len(); i++ { 65 _, _, err := NodeMemSize64(i) 66 assert.Equal(syscall.ENOSYS, err) 67 } 68 } else { 69 for i := 0; i < nodemask.Len(); i++ { 70 if !nodemask.Get(i) { 71 continue 72 } 73 total, freed, err := NodeMemSize64(i) 74 assert.NoError(err) 75 assert.True(total > 0) 76 assert.True(freed >= 0) 77 } 78 } 79 } 80 81 func TestNUMAAPI(t *testing.T) { 82 assert := require.New(t) 83 assert.True(MaxNodeID() >= 0, "MaxNodeID() >= 0") 84 assert.True(MaxPossibleNodeID() >= 0, "MaxPossibleNodeID() >= 0") 85 assert.True(MaxPossibleNodeID() >= MaxNodeID()) 86 assert.True(NodeCount() > 0, "NodeCount() > 0") 87 assert.True(NodePossibleCount() > 0, "NodePossibleCount() > 0") 88 assert.True(NodePossibleCount() >= NodeCount()) 89 assert.True(CPUCount() > 0) 90 } 91 92 func TestMemPolicy(t *testing.T) { 93 if !Available() { 94 t.Skip() 95 } 96 assert := require.New(t) 97 98 t.Log("nnodemask = ", nnodemax) 99 t.Log("nconfigurednode =", nconfigurednode) 100 t.Log("ncpumask =", ncpumax) 101 t.Log("nconfiguredcpu =", nconfiguredcpu) 102 103 mode, err := GetMemPolicy(nil, nil, 0) 104 assert.NoError(err) 105 assert.True(mode >= 0 && mode < MPOL_MAX, "%#v", mode) 106 assert.NoError(SetMemPolicy(MPOL_DEFAULT, nil)) 107 } 108 109 func TestGetMemAllowedNodeMaskAndBind(t *testing.T) { 110 assert := require.New(t) 111 mask, err := GetMemAllowedNodeMask() 112 if Available() { 113 assert.NoError(err) 114 assert.True(mask.OnesCount() > 0) 115 assert.NoError(Bind(mask)) 116 } else { 117 assert.Equal(syscall.ENOSYS, err) 118 t.Skip("skip by not available") 119 } 120 } 121 122 func TestRunOnNodeAndRunningNodesMask(t *testing.T) { 123 if !Available() { 124 t.Skip("skip by not available") 125 } 126 assert := require.New(t) 127 mask, err := RunningNodesMask() 128 assert.NoError(err) 129 assert.True(mask.OnesCount() > 0) 130 for i := 0; i < mask.Len(); i++ { 131 if !mask.Get(i) { 132 continue 133 } 134 assert.NoError(RunOnNode(i), "run on node %d", i) 135 136 cpumask, err := NodeToCPUMask(i) 137 assert.NoError(err) 138 assert.True(cpumask.OnesCount() > 0) 139 140 gotmask, err := RunningCPUMask() 141 assert.NoError(err) 142 assert.Equal(cpumask, gotmask) 143 144 for j := 0; j < cpumask.Len(); j++ { 145 if !cpumask.Get(j) { 146 continue 147 } 148 node, err := CPUToNode(j) 149 assert.NoError(err) 150 assert.Equal(i, node) 151 } 152 } 153 154 assert.NoError(RunOnNode(-1)) 155 assert.Error(RunOnNode(-2)) 156 assert.Error(RunOnNode(1 << 20)) 157 158 _, err = CPUToNode(CPUPossibleCount()) 159 assert.Error(err) 160 } 161 162 func TestMBind(t *testing.T) { 163 if !Available() { 164 t.Skip("skip by not available") 165 } 166 assert := require.New(t) 167 168 assert.Equal(syscall.EINVAL, 169 MBind(unsafe.Pointer(t), 100, MPOL_DEFAULT, 0, nil)) 170 } 171 172 func TestGetNodeAndCPU(t *testing.T) { 173 if !Available() { 174 t.Skip("skip by not available") 175 } 176 var ( 177 nodem = NewBitmask(NodePossibleCount()) 178 mu sync.Mutex 179 wg sync.WaitGroup 180 assert = require.New(t) 181 ) 182 cpum := make([]Bitmask, NodePossibleCount()) 183 for i := 0; i < len(cpum); i++ { 184 cpum[i] = NewBitmask(CPUPossibleCount()) 185 } 186 for i := 0; i < CPUCount(); i++ { 187 wg.Add(1) 188 go func() { 189 defer wg.Done() 190 for i := 0; i < 1000; i++ { 191 cpu, node := GetCPUAndNode() 192 mu.Lock() 193 cpum[node].Set(cpu, true) 194 nodem.Set(node, true) 195 mu.Unlock() 196 runtime.Gosched() 197 } 198 }() 199 } 200 wg.Wait() 201 202 nmask := NodeMask() 203 for i := 0; i < nodem.Len(); i++ { 204 if !nodem.Get(i) { 205 continue 206 } 207 assert.True(nmask.Get(i), "node %d", i) 208 cpumask, err := NodeToCPUMask(i) 209 assert.NoError(err) 210 cmask := cpum[i] 211 for j := 0; j < cmask.Len(); j++ { 212 if !cmask.Get(j) { 213 continue 214 } 215 assert.True(cpumask.Get(j), "cpu %d @ node %d", j, i) 216 } 217 } 218 } 219 220 func BenchmarkGetCPUAndNode(b *testing.B) { 221 b.RunParallel(func(bp *testing.PB) { 222 for bp.Next() { 223 GetCPUAndNode() 224 } 225 }) 226 }