github.com/docker/engine@v22.0.0-20211208180946-d456264580cf+incompatible/libnetwork/ipam/parallel_test.go (about)

     1  package ipam
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"math/rand"
     7  	"net"
     8  	"sort"
     9  	"sync"
    10  	"sync/atomic"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/docker/docker/libnetwork/ipamapi"
    15  	"golang.org/x/sync/errgroup"
    16  	"golang.org/x/sync/semaphore"
    17  	"gotest.tools/v3/assert"
    18  	is "gotest.tools/v3/assert/cmp"
    19  )
    20  
    21  const (
    22  	all = iota
    23  	even
    24  	odd
    25  )
    26  
    27  type releaseMode uint
    28  
    29  type testContext struct {
    30  	a      *Allocator
    31  	opts   map[string]string
    32  	ipList []*net.IPNet
    33  	ipMap  map[string]bool
    34  	pid    string
    35  	maxIP  int
    36  }
    37  
    38  func newTestContext(t *testing.T, mask int, options map[string]string) *testContext {
    39  	a, err := getAllocator(false)
    40  	if err != nil {
    41  		t.Fatal(err)
    42  	}
    43  	a.addrSpaces["giallo"] = &addrSpace{
    44  		id:      dsConfigKey + "/" + "giallo",
    45  		ds:      a.addrSpaces[localAddressSpace].ds,
    46  		alloc:   a.addrSpaces[localAddressSpace].alloc,
    47  		scope:   a.addrSpaces[localAddressSpace].scope,
    48  		subnets: map[SubnetKey]*PoolData{},
    49  	}
    50  
    51  	network := fmt.Sprintf("192.168.100.0/%d", mask)
    52  	// total ips 2^(32-mask) - 2 (network and broadcast)
    53  	totalIps := 1<<uint(32-mask) - 2
    54  
    55  	pid, _, _, err := a.RequestPool("giallo", network, "", nil, false)
    56  	if err != nil {
    57  		t.Fatal(err)
    58  	}
    59  
    60  	return &testContext{
    61  		a:      a,
    62  		opts:   options,
    63  		ipList: make([]*net.IPNet, 0, totalIps),
    64  		ipMap:  make(map[string]bool),
    65  		pid:    pid,
    66  		maxIP:  totalIps,
    67  	}
    68  }
    69  
    70  func TestDebug(t *testing.T) {
    71  	tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"})
    72  	tctx.a.RequestAddress(tctx.pid, nil, map[string]string{ipamapi.AllocSerialPrefix: "true"})
    73  	tctx.a.RequestAddress(tctx.pid, nil, map[string]string{ipamapi.AllocSerialPrefix: "true"})
    74  }
    75  
    76  type op struct {
    77  	id   int32
    78  	add  bool
    79  	name string
    80  }
    81  
    82  func (o *op) String() string {
    83  	return fmt.Sprintf("%+v", *o)
    84  }
    85  
    86  func TestRequestPoolParallel(t *testing.T) {
    87  	a, err := getAllocator(false)
    88  	if err != nil {
    89  		t.Fatal(err)
    90  	}
    91  	var operationIndex int32
    92  	ch := make(chan *op, 240)
    93  
    94  	group := new(errgroup.Group)
    95  	defer func() {
    96  		if err := group.Wait(); err != nil {
    97  			t.Fatal(err)
    98  		}
    99  	}()
   100  
   101  	for i := 0; i < 120; i++ {
   102  		group.Go(func() error {
   103  			name, _, _, err := a.RequestPool("GlobalDefault", "", "", nil, false)
   104  			if err != nil {
   105  				t.Log(err) // log so we can see the error in real time rather than at the end when we actually call "Wait".
   106  				return fmt.Errorf("request error %v", err)
   107  			}
   108  			idx := atomic.AddInt32(&operationIndex, 1)
   109  			ch <- &op{idx, true, name}
   110  			time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond)
   111  			idx = atomic.AddInt32(&operationIndex, 1)
   112  			err = a.ReleasePool(name)
   113  			if err != nil {
   114  				t.Log(err) // log so we can see the error in real time rather than at the end when we actually call "Wait".
   115  				return fmt.Errorf("release error %v", err)
   116  			}
   117  			ch <- &op{idx, false, name}
   118  			return nil
   119  		})
   120  	}
   121  
   122  	// map of events
   123  	m := make(map[string][]*op)
   124  	for i := 0; i < 240; i++ {
   125  		x := <-ch
   126  		ops, ok := m[x.name]
   127  		if !ok {
   128  			ops = make([]*op, 0, 10)
   129  		}
   130  		ops = append(ops, x)
   131  		m[x.name] = ops
   132  	}
   133  
   134  	// Post processing to avoid event reordering on the channel
   135  	for pool, ops := range m {
   136  		sort.Slice(ops[:], func(i, j int) bool {
   137  			return ops[i].id < ops[j].id
   138  		})
   139  		expected := true
   140  		for _, op := range ops {
   141  			if op.add != expected {
   142  				t.Fatalf("Operations for %v not valid %v, operations %v", pool, op, ops)
   143  			}
   144  			expected = !expected
   145  		}
   146  	}
   147  }
   148  
   149  func TestFullAllocateRelease(t *testing.T) {
   150  	for _, parallelism := range []int64{2, 4, 8} {
   151  		for _, mask := range []int{29, 25, 24, 21} {
   152  			tctx := newTestContext(t, mask, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   153  			allocate(t, tctx, parallelism)
   154  			release(t, tctx, all, parallelism)
   155  		}
   156  	}
   157  }
   158  
   159  func TestOddAllocateRelease(t *testing.T) {
   160  	for _, parallelism := range []int64{2, 4, 8} {
   161  		for _, mask := range []int{29, 25, 24, 21} {
   162  			tctx := newTestContext(t, mask, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   163  			allocate(t, tctx, parallelism)
   164  			release(t, tctx, odd, parallelism)
   165  		}
   166  	}
   167  }
   168  
   169  func TestFullAllocateSerialReleaseParallel(t *testing.T) {
   170  	for _, parallelism := range []int64{1, 4, 8} {
   171  		tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   172  		allocate(t, tctx, 1)
   173  		release(t, tctx, all, parallelism)
   174  	}
   175  }
   176  
   177  func TestOddAllocateSerialReleaseParallel(t *testing.T) {
   178  	for _, parallelism := range []int64{1, 4, 8} {
   179  		tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   180  		allocate(t, tctx, 1)
   181  		release(t, tctx, odd, parallelism)
   182  	}
   183  }
   184  
   185  func TestEvenAllocateSerialReleaseParallel(t *testing.T) {
   186  	for _, parallelism := range []int64{1, 4, 8} {
   187  		tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   188  		allocate(t, tctx, 1)
   189  		release(t, tctx, even, parallelism)
   190  	}
   191  }
   192  
   193  func allocate(t *testing.T, tctx *testContext, parallel int64) {
   194  	// Allocate the whole space
   195  	parallelExec := semaphore.NewWeighted(parallel)
   196  	routineNum := tctx.maxIP + 10
   197  	ch := make(chan *net.IPNet, routineNum)
   198  	var id int
   199  	var wg sync.WaitGroup
   200  	// routine loop
   201  	for {
   202  		wg.Add(1)
   203  		go func(id int) {
   204  			parallelExec.Acquire(context.Background(), 1)
   205  			ip, _, _ := tctx.a.RequestAddress(tctx.pid, nil, tctx.opts)
   206  			ch <- ip
   207  			parallelExec.Release(1)
   208  			wg.Done()
   209  		}(id)
   210  		id++
   211  		if id == routineNum {
   212  			break
   213  		}
   214  	}
   215  
   216  	// give time to all the go routines to finish
   217  	wg.Wait()
   218  
   219  	// process results
   220  	for i := 0; i < routineNum; i++ {
   221  		ip := <-ch
   222  		if ip == nil {
   223  			continue
   224  		}
   225  		if there, ok := tctx.ipMap[ip.String()]; ok && there {
   226  			t.Fatalf("Got duplicate IP %s", ip.String())
   227  			break
   228  		}
   229  		tctx.ipList = append(tctx.ipList, ip)
   230  		tctx.ipMap[ip.String()] = true
   231  	}
   232  
   233  	assert.Check(t, is.Len(tctx.ipList, tctx.maxIP))
   234  	if len(tctx.ipList) != tctx.maxIP {
   235  		t.Fatal("mismatch number allocation")
   236  	}
   237  }
   238  
   239  func release(t *testing.T, tctx *testContext, mode releaseMode, parallel int64) {
   240  	var startIndex, increment, stopIndex, length int
   241  	switch mode {
   242  	case all:
   243  		startIndex = 0
   244  		increment = 1
   245  		stopIndex = tctx.maxIP - 1
   246  		length = tctx.maxIP
   247  	case odd, even:
   248  		if mode == odd {
   249  			startIndex = 1
   250  		}
   251  		increment = 2
   252  		stopIndex = tctx.maxIP - 1
   253  		length = tctx.maxIP / 2
   254  		if tctx.maxIP%2 > 0 {
   255  			length++
   256  		}
   257  	default:
   258  		t.Fatal("unsupported mode yet")
   259  	}
   260  
   261  	ipIndex := make([]int, 0, length)
   262  	// calculate the index to release from the ipList
   263  	for i := startIndex; ; i += increment {
   264  		ipIndex = append(ipIndex, i)
   265  		if i+increment > stopIndex {
   266  			break
   267  		}
   268  	}
   269  
   270  	var id int
   271  	parallelExec := semaphore.NewWeighted(parallel)
   272  	ch := make(chan *net.IPNet, len(ipIndex))
   273  	group := new(errgroup.Group)
   274  	for index := range ipIndex {
   275  		index := index
   276  		group.Go(func() error {
   277  			parallelExec.Acquire(context.Background(), 1)
   278  			err := tctx.a.ReleaseAddress(tctx.pid, tctx.ipList[index].IP)
   279  			if err != nil {
   280  				return fmt.Errorf("routine %d got %v", id, err)
   281  			}
   282  			ch <- tctx.ipList[index]
   283  			parallelExec.Release(1)
   284  			return nil
   285  		})
   286  		id++
   287  	}
   288  
   289  	if err := group.Wait(); err != nil {
   290  		t.Fatal(err)
   291  	}
   292  
   293  	for i := 0; i < len(ipIndex); i++ {
   294  		ip := <-ch
   295  
   296  		// check if it is really free
   297  		_, _, err := tctx.a.RequestAddress(tctx.pid, ip.IP, nil)
   298  		assert.Check(t, err, "ip %v not properly released", ip)
   299  		if err != nil {
   300  			t.Fatalf("ip %v not properly released, error:%v", ip, err)
   301  		}
   302  		err = tctx.a.ReleaseAddress(tctx.pid, ip.IP)
   303  		assert.NilError(t, err)
   304  
   305  		if there, ok := tctx.ipMap[ip.String()]; !ok || !there {
   306  			t.Fatalf("ip %v got double deallocated", ip)
   307  		}
   308  		tctx.ipMap[ip.String()] = false
   309  		for j, v := range tctx.ipList {
   310  			if v == ip {
   311  				tctx.ipList = append(tctx.ipList[:j], tctx.ipList[j+1:]...)
   312  				break
   313  			}
   314  		}
   315  	}
   316  
   317  	assert.Check(t, is.Len(tctx.ipList, tctx.maxIP-length))
   318  }