github.com/Prakhar-Agarwal-byte/moby@v0.0.0-20231027092010-a14e3e8ab87e/libnetwork/ipam/parallel_test.go (about) 1 package ipam 2 3 import ( 4 "context" 5 "fmt" 6 "math/rand" 7 "net" 8 "sort" 9 "sync" 10 "sync/atomic" 11 "testing" 12 "time" 13 14 "github.com/Prakhar-Agarwal-byte/moby/libnetwork/ipamapi" 15 "github.com/Prakhar-Agarwal-byte/moby/libnetwork/ipamutils" 16 "golang.org/x/sync/errgroup" 17 "golang.org/x/sync/semaphore" 18 "gotest.tools/v3/assert" 19 is "gotest.tools/v3/assert/cmp" 20 ) 21 22 const ( 23 all = iota 24 even 25 odd 26 ) 27 28 type releaseMode uint 29 30 type testContext struct { 31 a *Allocator 32 opts map[string]string 33 ipList []*net.IPNet 34 ipMap map[string]bool 35 pid string 36 maxIP int 37 } 38 39 func newTestContext(t *testing.T, mask int, options map[string]string) *testContext { 40 a, err := NewAllocator(ipamutils.GetLocalScopeDefaultNetworks(), ipamutils.GetGlobalScopeDefaultNetworks()) 41 if err != nil { 42 t.Fatal(err) 43 } 44 network := fmt.Sprintf("192.168.100.0/%d", mask) 45 // total ips 2^(32-mask) - 2 (network and broadcast) 46 totalIps := 1<<uint(32-mask) - 2 47 48 pid, _, _, err := a.RequestPool(localAddressSpace, network, "", nil, false) 49 if err != nil { 50 t.Fatal(err) 51 } 52 53 return &testContext{ 54 a: a, 55 opts: options, 56 ipList: make([]*net.IPNet, 0, totalIps), 57 ipMap: make(map[string]bool), 58 pid: pid, 59 maxIP: totalIps, 60 } 61 } 62 63 func TestDebug(t *testing.T) { 64 tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"}) 65 tctx.a.RequestAddress(tctx.pid, nil, map[string]string{ipamapi.AllocSerialPrefix: "true"}) 66 tctx.a.RequestAddress(tctx.pid, nil, map[string]string{ipamapi.AllocSerialPrefix: "true"}) 67 } 68 69 type op struct { 70 id int32 71 add bool 72 name string 73 } 74 75 func (o *op) String() string { 76 return fmt.Sprintf("%+v", *o) 77 } 78 79 func TestRequestPoolParallel(t *testing.T) { 80 a, err := NewAllocator(ipamutils.GetLocalScopeDefaultNetworks(), ipamutils.GetGlobalScopeDefaultNetworks()) 81 if err != nil { 82 t.Fatal(err) 83 } 84 var operationIndex int32 85 ch := make(chan *op, 240) 86 87 group := new(errgroup.Group) 88 defer func() { 89 if err := group.Wait(); err != nil { 90 t.Fatal(err) 91 } 92 }() 93 94 for i := 0; i < 120; i++ { 95 group.Go(func() error { 96 name, _, _, err := a.RequestPool("GlobalDefault", "", "", nil, false) 97 if err != nil { 98 t.Log(err) // log so we can see the error in real time rather than at the end when we actually call "Wait". 99 return fmt.Errorf("request error %v", err) 100 } 101 idx := atomic.AddInt32(&operationIndex, 1) 102 ch <- &op{idx, true, name} 103 time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond) 104 idx = atomic.AddInt32(&operationIndex, 1) 105 err = a.ReleasePool(name) 106 if err != nil { 107 t.Log(err) // log so we can see the error in real time rather than at the end when we actually call "Wait". 108 return fmt.Errorf("release error %v", err) 109 } 110 ch <- &op{idx, false, name} 111 return nil 112 }) 113 } 114 115 // map of events 116 m := make(map[string][]*op) 117 for i := 0; i < 240; i++ { 118 x := <-ch 119 ops, ok := m[x.name] 120 if !ok { 121 ops = make([]*op, 0, 10) 122 } 123 ops = append(ops, x) 124 m[x.name] = ops 125 } 126 127 // Post processing to avoid event reordering on the channel 128 for pool, ops := range m { 129 sort.Slice(ops[:], func(i, j int) bool { 130 return ops[i].id < ops[j].id 131 }) 132 expected := true 133 for _, op := range ops { 134 if op.add != expected { 135 t.Fatalf("Operations for %v not valid %v, operations %v", pool, op, ops) 136 } 137 expected = !expected 138 } 139 } 140 } 141 142 func TestFullAllocateRelease(t *testing.T) { 143 for _, parallelism := range []int64{2, 4, 8} { 144 for _, mask := range []int{29, 25, 24, 21} { 145 tctx := newTestContext(t, mask, map[string]string{ipamapi.AllocSerialPrefix: "true"}) 146 allocate(t, tctx, parallelism) 147 release(t, tctx, all, parallelism) 148 } 149 } 150 } 151 152 func TestOddAllocateRelease(t *testing.T) { 153 for _, parallelism := range []int64{2, 4, 8} { 154 for _, mask := range []int{29, 25, 24, 21} { 155 tctx := newTestContext(t, mask, map[string]string{ipamapi.AllocSerialPrefix: "true"}) 156 allocate(t, tctx, parallelism) 157 release(t, tctx, odd, parallelism) 158 } 159 } 160 } 161 162 func TestFullAllocateSerialReleaseParallel(t *testing.T) { 163 for _, parallelism := range []int64{1, 4, 8} { 164 tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"}) 165 allocate(t, tctx, 1) 166 release(t, tctx, all, parallelism) 167 } 168 } 169 170 func TestOddAllocateSerialReleaseParallel(t *testing.T) { 171 for _, parallelism := range []int64{1, 4, 8} { 172 tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"}) 173 allocate(t, tctx, 1) 174 release(t, tctx, odd, parallelism) 175 } 176 } 177 178 func TestEvenAllocateSerialReleaseParallel(t *testing.T) { 179 for _, parallelism := range []int64{1, 4, 8} { 180 tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"}) 181 allocate(t, tctx, 1) 182 release(t, tctx, even, parallelism) 183 } 184 } 185 186 func allocate(t *testing.T, tctx *testContext, parallel int64) { 187 // Allocate the whole space 188 parallelExec := semaphore.NewWeighted(parallel) 189 routineNum := tctx.maxIP + 10 190 ch := make(chan *net.IPNet, routineNum) 191 var id int 192 var wg sync.WaitGroup 193 // routine loop 194 for { 195 wg.Add(1) 196 go func(id int) { 197 parallelExec.Acquire(context.Background(), 1) 198 ip, _, _ := tctx.a.RequestAddress(tctx.pid, nil, tctx.opts) 199 ch <- ip 200 parallelExec.Release(1) 201 wg.Done() 202 }(id) 203 id++ 204 if id == routineNum { 205 break 206 } 207 } 208 209 // give time to all the go routines to finish 210 wg.Wait() 211 212 // process results 213 for i := 0; i < routineNum; i++ { 214 ip := <-ch 215 if ip == nil { 216 continue 217 } 218 if there, ok := tctx.ipMap[ip.String()]; ok && there { 219 t.Fatalf("Got duplicate IP %s", ip.String()) 220 } 221 tctx.ipList = append(tctx.ipList, ip) 222 tctx.ipMap[ip.String()] = true 223 } 224 225 assert.Assert(t, is.Len(tctx.ipList, tctx.maxIP)) 226 } 227 228 func release(t *testing.T, tctx *testContext, mode releaseMode, parallel int64) { 229 var startIndex, increment, stopIndex, length int 230 switch mode { 231 case all: 232 startIndex = 0 233 increment = 1 234 stopIndex = tctx.maxIP - 1 235 length = tctx.maxIP 236 case odd, even: 237 if mode == odd { 238 startIndex = 1 239 } 240 increment = 2 241 stopIndex = tctx.maxIP - 1 242 length = tctx.maxIP / 2 243 if tctx.maxIP%2 > 0 { 244 length++ 245 } 246 default: 247 t.Fatal("unsupported mode yet") 248 } 249 250 ipIndex := make([]int, 0, length) 251 // calculate the index to release from the ipList 252 for i := startIndex; ; i += increment { 253 ipIndex = append(ipIndex, i) 254 if i+increment > stopIndex { 255 break 256 } 257 } 258 259 var id int 260 parallelExec := semaphore.NewWeighted(parallel) 261 ch := make(chan *net.IPNet, len(ipIndex)) 262 group := new(errgroup.Group) 263 for index := range ipIndex { 264 index := index 265 group.Go(func() error { 266 parallelExec.Acquire(context.Background(), 1) 267 err := tctx.a.ReleaseAddress(tctx.pid, tctx.ipList[index].IP) 268 if err != nil { 269 return fmt.Errorf("routine %d got %v", id, err) 270 } 271 ch <- tctx.ipList[index] 272 parallelExec.Release(1) 273 return nil 274 }) 275 id++ 276 } 277 278 if err := group.Wait(); err != nil { 279 t.Fatal(err) 280 } 281 282 for i := 0; i < len(ipIndex); i++ { 283 ip := <-ch 284 285 // check if it is really free 286 _, _, err := tctx.a.RequestAddress(tctx.pid, ip.IP, nil) 287 assert.Check(t, err, "ip %v not properly released", ip) 288 if err != nil { 289 t.Fatalf("ip %v not properly released, error:%v", ip, err) 290 } 291 err = tctx.a.ReleaseAddress(tctx.pid, ip.IP) 292 assert.NilError(t, err) 293 294 if there, ok := tctx.ipMap[ip.String()]; !ok || !there { 295 t.Fatalf("ip %v got double deallocated", ip) 296 } 297 tctx.ipMap[ip.String()] = false 298 for j, v := range tctx.ipList { 299 if v == ip { 300 tctx.ipList = append(tctx.ipList[:j], tctx.ipList[j+1:]...) 301 break 302 } 303 } 304 } 305 306 assert.Check(t, is.Len(tctx.ipList, tctx.maxIP-length)) 307 }