github.com/Heebron/moby@v0.0.0-20221111184709-6eab4f55faf7/libnetwork/ipam/parallel_test.go (about)

     1  package ipam
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"math/rand"
     7  	"net"
     8  	"sort"
     9  	"sync"
    10  	"sync/atomic"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/docker/docker/libnetwork/ipamapi"
    15  	"golang.org/x/sync/errgroup"
    16  	"golang.org/x/sync/semaphore"
    17  	"gotest.tools/v3/assert"
    18  	is "gotest.tools/v3/assert/cmp"
    19  )
    20  
    21  const (
    22  	all = iota
    23  	even
    24  	odd
    25  )
    26  
    27  type releaseMode uint
    28  
    29  type testContext struct {
    30  	a      *Allocator
    31  	opts   map[string]string
    32  	ipList []*net.IPNet
    33  	ipMap  map[string]bool
    34  	pid    string
    35  	maxIP  int
    36  }
    37  
    38  func newTestContext(t *testing.T, mask int, options map[string]string) *testContext {
    39  	a, err := getAllocator(false)
    40  	if err != nil {
    41  		t.Fatal(err)
    42  	}
    43  	a.addrSpaces["giallo"] = &addrSpace{
    44  		id:      dsConfigKey + "/" + "giallo",
    45  		ds:      a.addrSpaces[localAddressSpace].ds,
    46  		alloc:   a.addrSpaces[localAddressSpace].alloc,
    47  		scope:   a.addrSpaces[localAddressSpace].scope,
    48  		subnets: map[SubnetKey]*PoolData{},
    49  	}
    50  
    51  	network := fmt.Sprintf("192.168.100.0/%d", mask)
    52  	// total ips 2^(32-mask) - 2 (network and broadcast)
    53  	totalIps := 1<<uint(32-mask) - 2
    54  
    55  	pid, _, _, err := a.RequestPool("giallo", network, "", nil, false)
    56  	if err != nil {
    57  		t.Fatal(err)
    58  	}
    59  
    60  	return &testContext{
    61  		a:      a,
    62  		opts:   options,
    63  		ipList: make([]*net.IPNet, 0, totalIps),
    64  		ipMap:  make(map[string]bool),
    65  		pid:    pid,
    66  		maxIP:  totalIps,
    67  	}
    68  }
    69  
    70  func TestDebug(t *testing.T) {
    71  	tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"})
    72  	tctx.a.RequestAddress(tctx.pid, nil, map[string]string{ipamapi.AllocSerialPrefix: "true"})
    73  	tctx.a.RequestAddress(tctx.pid, nil, map[string]string{ipamapi.AllocSerialPrefix: "true"})
    74  }
    75  
    76  type op struct {
    77  	id   int32
    78  	add  bool
    79  	name string
    80  }
    81  
    82  func (o *op) String() string {
    83  	return fmt.Sprintf("%+v", *o)
    84  }
    85  
    86  func TestRequestPoolParallel(t *testing.T) {
    87  	a, err := getAllocator(false)
    88  	if err != nil {
    89  		t.Fatal(err)
    90  	}
    91  	var operationIndex int32
    92  	ch := make(chan *op, 240)
    93  
    94  	group := new(errgroup.Group)
    95  	defer func() {
    96  		if err := group.Wait(); err != nil {
    97  			t.Fatal(err)
    98  		}
    99  	}()
   100  
   101  	for i := 0; i < 120; i++ {
   102  		group.Go(func() error {
   103  			name, _, _, err := a.RequestPool("GlobalDefault", "", "", nil, false)
   104  			if err != nil {
   105  				t.Log(err) // log so we can see the error in real time rather than at the end when we actually call "Wait".
   106  				return fmt.Errorf("request error %v", err)
   107  			}
   108  			idx := atomic.AddInt32(&operationIndex, 1)
   109  			ch <- &op{idx, true, name}
   110  			time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond)
   111  			idx = atomic.AddInt32(&operationIndex, 1)
   112  			err = a.ReleasePool(name)
   113  			if err != nil {
   114  				t.Log(err) // log so we can see the error in real time rather than at the end when we actually call "Wait".
   115  				return fmt.Errorf("release error %v", err)
   116  			}
   117  			ch <- &op{idx, false, name}
   118  			return nil
   119  		})
   120  	}
   121  
   122  	// map of events
   123  	m := make(map[string][]*op)
   124  	for i := 0; i < 240; i++ {
   125  		x := <-ch
   126  		ops, ok := m[x.name]
   127  		if !ok {
   128  			ops = make([]*op, 0, 10)
   129  		}
   130  		ops = append(ops, x)
   131  		m[x.name] = ops
   132  	}
   133  
   134  	// Post processing to avoid event reordering on the channel
   135  	for pool, ops := range m {
   136  		sort.Slice(ops[:], func(i, j int) bool {
   137  			return ops[i].id < ops[j].id
   138  		})
   139  		expected := true
   140  		for _, op := range ops {
   141  			if op.add != expected {
   142  				t.Fatalf("Operations for %v not valid %v, operations %v", pool, op, ops)
   143  			}
   144  			expected = !expected
   145  		}
   146  	}
   147  }
   148  
   149  func TestFullAllocateRelease(t *testing.T) {
   150  	for _, parallelism := range []int64{2, 4, 8} {
   151  		for _, mask := range []int{29, 25, 24, 21} {
   152  			tctx := newTestContext(t, mask, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   153  			allocate(t, tctx, parallelism)
   154  			release(t, tctx, all, parallelism)
   155  		}
   156  	}
   157  }
   158  
   159  func TestOddAllocateRelease(t *testing.T) {
   160  	for _, parallelism := range []int64{2, 4, 8} {
   161  		for _, mask := range []int{29, 25, 24, 21} {
   162  			tctx := newTestContext(t, mask, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   163  			allocate(t, tctx, parallelism)
   164  			release(t, tctx, odd, parallelism)
   165  		}
   166  	}
   167  }
   168  
   169  func TestFullAllocateSerialReleaseParallel(t *testing.T) {
   170  	for _, parallelism := range []int64{1, 4, 8} {
   171  		tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   172  		allocate(t, tctx, 1)
   173  		release(t, tctx, all, parallelism)
   174  	}
   175  }
   176  
   177  func TestOddAllocateSerialReleaseParallel(t *testing.T) {
   178  	for _, parallelism := range []int64{1, 4, 8} {
   179  		tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   180  		allocate(t, tctx, 1)
   181  		release(t, tctx, odd, parallelism)
   182  	}
   183  }
   184  
   185  func TestEvenAllocateSerialReleaseParallel(t *testing.T) {
   186  	for _, parallelism := range []int64{1, 4, 8} {
   187  		tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   188  		allocate(t, tctx, 1)
   189  		release(t, tctx, even, parallelism)
   190  	}
   191  }
   192  
   193  func allocate(t *testing.T, tctx *testContext, parallel int64) {
   194  	// Allocate the whole space
   195  	parallelExec := semaphore.NewWeighted(parallel)
   196  	routineNum := tctx.maxIP + 10
   197  	ch := make(chan *net.IPNet, routineNum)
   198  	var id int
   199  	var wg sync.WaitGroup
   200  	// routine loop
   201  	for {
   202  		wg.Add(1)
   203  		go func(id int) {
   204  			parallelExec.Acquire(context.Background(), 1)
   205  			ip, _, _ := tctx.a.RequestAddress(tctx.pid, nil, tctx.opts)
   206  			ch <- ip
   207  			parallelExec.Release(1)
   208  			wg.Done()
   209  		}(id)
   210  		id++
   211  		if id == routineNum {
   212  			break
   213  		}
   214  	}
   215  
   216  	// give time to all the go routines to finish
   217  	wg.Wait()
   218  
   219  	// process results
   220  	for i := 0; i < routineNum; i++ {
   221  		ip := <-ch
   222  		if ip == nil {
   223  			continue
   224  		}
   225  		if there, ok := tctx.ipMap[ip.String()]; ok && there {
   226  			t.Fatalf("Got duplicate IP %s", ip.String())
   227  		}
   228  		tctx.ipList = append(tctx.ipList, ip)
   229  		tctx.ipMap[ip.String()] = true
   230  	}
   231  
   232  	assert.Assert(t, is.Len(tctx.ipList, tctx.maxIP))
   233  }
   234  
   235  func release(t *testing.T, tctx *testContext, mode releaseMode, parallel int64) {
   236  	var startIndex, increment, stopIndex, length int
   237  	switch mode {
   238  	case all:
   239  		startIndex = 0
   240  		increment = 1
   241  		stopIndex = tctx.maxIP - 1
   242  		length = tctx.maxIP
   243  	case odd, even:
   244  		if mode == odd {
   245  			startIndex = 1
   246  		}
   247  		increment = 2
   248  		stopIndex = tctx.maxIP - 1
   249  		length = tctx.maxIP / 2
   250  		if tctx.maxIP%2 > 0 {
   251  			length++
   252  		}
   253  	default:
   254  		t.Fatal("unsupported mode yet")
   255  	}
   256  
   257  	ipIndex := make([]int, 0, length)
   258  	// calculate the index to release from the ipList
   259  	for i := startIndex; ; i += increment {
   260  		ipIndex = append(ipIndex, i)
   261  		if i+increment > stopIndex {
   262  			break
   263  		}
   264  	}
   265  
   266  	var id int
   267  	parallelExec := semaphore.NewWeighted(parallel)
   268  	ch := make(chan *net.IPNet, len(ipIndex))
   269  	group := new(errgroup.Group)
   270  	for index := range ipIndex {
   271  		index := index
   272  		group.Go(func() error {
   273  			parallelExec.Acquire(context.Background(), 1)
   274  			err := tctx.a.ReleaseAddress(tctx.pid, tctx.ipList[index].IP)
   275  			if err != nil {
   276  				return fmt.Errorf("routine %d got %v", id, err)
   277  			}
   278  			ch <- tctx.ipList[index]
   279  			parallelExec.Release(1)
   280  			return nil
   281  		})
   282  		id++
   283  	}
   284  
   285  	if err := group.Wait(); err != nil {
   286  		t.Fatal(err)
   287  	}
   288  
   289  	for i := 0; i < len(ipIndex); i++ {
   290  		ip := <-ch
   291  
   292  		// check if it is really free
   293  		_, _, err := tctx.a.RequestAddress(tctx.pid, ip.IP, nil)
   294  		assert.Check(t, err, "ip %v not properly released", ip)
   295  		if err != nil {
   296  			t.Fatalf("ip %v not properly released, error:%v", ip, err)
   297  		}
   298  		err = tctx.a.ReleaseAddress(tctx.pid, ip.IP)
   299  		assert.NilError(t, err)
   300  
   301  		if there, ok := tctx.ipMap[ip.String()]; !ok || !there {
   302  			t.Fatalf("ip %v got double deallocated", ip)
   303  		}
   304  		tctx.ipMap[ip.String()] = false
   305  		for j, v := range tctx.ipList {
   306  			if v == ip {
   307  				tctx.ipList = append(tctx.ipList[:j], tctx.ipList[j+1:]...)
   308  				break
   309  			}
   310  		}
   311  	}
   312  
   313  	assert.Check(t, is.Len(tctx.ipList, tctx.maxIP-length))
   314  }