github.com/kaisenlinux/docker.io@v0.0.0-20230510090727-ea55db55fac7/libnetwork/ipam/parallel_test.go (about)

     1  package ipam
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"math/rand"
     7  	"net"
     8  	"sort"
     9  	"sync"
    10  	"sync/atomic"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/docker/libnetwork/ipamapi"
    15  	"golang.org/x/sync/semaphore"
    16  	"gotest.tools/v3/assert"
    17  	is "gotest.tools/v3/assert/cmp"
    18  )
    19  
    20  const (
    21  	all = iota
    22  	even
    23  	odd
    24  )
    25  
    26  type releaseMode uint
    27  
    28  type testContext struct {
    29  	a      *Allocator
    30  	opts   map[string]string
    31  	ipList []*net.IPNet
    32  	ipMap  map[string]bool
    33  	pid    string
    34  	maxIP  int
    35  }
    36  
    37  func newTestContext(t *testing.T, mask int, options map[string]string) *testContext {
    38  	a, err := getAllocator(false)
    39  	if err != nil {
    40  		t.Fatal(err)
    41  	}
    42  	a.addrSpaces["giallo"] = &addrSpace{
    43  		id:      dsConfigKey + "/" + "giallo",
    44  		ds:      a.addrSpaces[localAddressSpace].ds,
    45  		alloc:   a.addrSpaces[localAddressSpace].alloc,
    46  		scope:   a.addrSpaces[localAddressSpace].scope,
    47  		subnets: map[SubnetKey]*PoolData{},
    48  	}
    49  
    50  	network := fmt.Sprintf("192.168.100.0/%d", mask)
    51  	// total ips 2^(32-mask) - 2 (network and broadcast)
    52  	totalIps := 1<<uint(32-mask) - 2
    53  
    54  	pid, _, _, err := a.RequestPool("giallo", network, "", nil, false)
    55  	if err != nil {
    56  		t.Fatal(err)
    57  	}
    58  
    59  	return &testContext{
    60  		a:      a,
    61  		opts:   options,
    62  		ipList: make([]*net.IPNet, 0, totalIps),
    63  		ipMap:  make(map[string]bool),
    64  		pid:    pid,
    65  		maxIP:  totalIps,
    66  	}
    67  }
    68  
    69  func TestDebug(t *testing.T) {
    70  	tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"})
    71  	tctx.a.RequestAddress(tctx.pid, nil, map[string]string{ipamapi.AllocSerialPrefix: "true"})
    72  	tctx.a.RequestAddress(tctx.pid, nil, map[string]string{ipamapi.AllocSerialPrefix: "true"})
    73  }
    74  
    75  type op struct {
    76  	id   int32
    77  	add  bool
    78  	name string
    79  }
    80  
    81  func (o *op) String() string {
    82  	return fmt.Sprintf("%+v", *o)
    83  }
    84  
    85  func TestRequestPoolParallel(t *testing.T) {
    86  	a, err := getAllocator(false)
    87  	if err != nil {
    88  		t.Fatal(err)
    89  	}
    90  	var operationIndex int32
    91  	ch := make(chan *op, 240)
    92  	for i := 0; i < 120; i++ {
    93  		go func(t *testing.T, a *Allocator, ch chan *op) {
    94  			name, _, _, err := a.RequestPool("GlobalDefault", "", "", nil, false)
    95  			if err != nil {
    96  				t.Errorf("request error %v", err)
    97  			}
    98  			idx := atomic.AddInt32(&operationIndex, 1)
    99  			ch <- &op{idx, true, name}
   100  			time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond)
   101  			idx = atomic.AddInt32(&operationIndex, 1)
   102  			err = a.ReleasePool(name)
   103  			if err != nil {
   104  				t.Errorf("release error %v", err)
   105  			}
   106  			ch <- &op{idx, false, name}
   107  		}(t, a, ch)
   108  	}
   109  
   110  	// map of events
   111  	m := make(map[string][]*op)
   112  	for i := 0; i < 240; i++ {
   113  		x := <-ch
   114  		ops, ok := m[x.name]
   115  		if !ok {
   116  			ops = make([]*op, 0, 10)
   117  		}
   118  		ops = append(ops, x)
   119  		m[x.name] = ops
   120  	}
   121  
   122  	// Post processing to avoid event reordering on the channel
   123  	for pool, ops := range m {
   124  		sort.Slice(ops[:], func(i, j int) bool {
   125  			return ops[i].id < ops[j].id
   126  		})
   127  		expected := true
   128  		for _, op := range ops {
   129  			if op.add != expected {
   130  				t.Fatalf("Operations for %v not valid %v, operations %v", pool, op, ops)
   131  			}
   132  			expected = !expected
   133  		}
   134  	}
   135  }
   136  
   137  func TestFullAllocateRelease(t *testing.T) {
   138  	for _, parallelism := range []int64{2, 4, 8} {
   139  		for _, mask := range []int{29, 25, 24, 21} {
   140  			tctx := newTestContext(t, mask, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   141  			allocate(t, tctx, parallelism)
   142  			release(t, tctx, all, parallelism)
   143  		}
   144  	}
   145  }
   146  
   147  func TestOddAllocateRelease(t *testing.T) {
   148  	for _, parallelism := range []int64{2, 4, 8} {
   149  		for _, mask := range []int{29, 25, 24, 21} {
   150  			tctx := newTestContext(t, mask, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   151  			allocate(t, tctx, parallelism)
   152  			release(t, tctx, odd, parallelism)
   153  		}
   154  	}
   155  }
   156  
   157  func TestFullAllocateSerialReleaseParallel(t *testing.T) {
   158  	for _, parallelism := range []int64{1, 4, 8} {
   159  		tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   160  		allocate(t, tctx, 1)
   161  		release(t, tctx, all, parallelism)
   162  	}
   163  }
   164  
   165  func TestOddAllocateSerialReleaseParallel(t *testing.T) {
   166  	for _, parallelism := range []int64{1, 4, 8} {
   167  		tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   168  		allocate(t, tctx, 1)
   169  		release(t, tctx, odd, parallelism)
   170  	}
   171  }
   172  
   173  func TestEvenAllocateSerialReleaseParallel(t *testing.T) {
   174  	for _, parallelism := range []int64{1, 4, 8} {
   175  		tctx := newTestContext(t, 23, map[string]string{ipamapi.AllocSerialPrefix: "true"})
   176  		allocate(t, tctx, 1)
   177  		release(t, tctx, even, parallelism)
   178  	}
   179  }
   180  
   181  func allocate(t *testing.T, tctx *testContext, parallel int64) {
   182  	// Allocate the whole space
   183  	parallelExec := semaphore.NewWeighted(parallel)
   184  	routineNum := tctx.maxIP + 10
   185  	ch := make(chan *net.IPNet, routineNum)
   186  	var id int
   187  	var wg sync.WaitGroup
   188  	// routine loop
   189  	for {
   190  		wg.Add(1)
   191  		go func(id int) {
   192  			parallelExec.Acquire(context.Background(), 1)
   193  			ip, _, _ := tctx.a.RequestAddress(tctx.pid, nil, tctx.opts)
   194  			ch <- ip
   195  			parallelExec.Release(1)
   196  			wg.Done()
   197  		}(id)
   198  		id++
   199  		if id == routineNum {
   200  			break
   201  		}
   202  	}
   203  
   204  	// give time to all the go routines to finish
   205  	wg.Wait()
   206  
   207  	// process results
   208  	for i := 0; i < routineNum; i++ {
   209  		ip := <-ch
   210  		if ip == nil {
   211  			continue
   212  		}
   213  		if there, ok := tctx.ipMap[ip.String()]; ok && there {
   214  			t.Fatalf("Got duplicate IP %s", ip.String())
   215  			break
   216  		}
   217  		tctx.ipList = append(tctx.ipList, ip)
   218  		tctx.ipMap[ip.String()] = true
   219  	}
   220  
   221  	assert.Check(t, is.Len(tctx.ipList, tctx.maxIP))
   222  	if len(tctx.ipList) != tctx.maxIP {
   223  		t.Fatal("mismatch number allocation")
   224  	}
   225  }
   226  
   227  func release(t *testing.T, tctx *testContext, mode releaseMode, parallel int64) {
   228  	var startIndex, increment, stopIndex, length int
   229  	switch mode {
   230  	case all:
   231  		startIndex = 0
   232  		increment = 1
   233  		stopIndex = tctx.maxIP - 1
   234  		length = tctx.maxIP
   235  	case odd, even:
   236  		if mode == odd {
   237  			startIndex = 1
   238  		}
   239  		increment = 2
   240  		stopIndex = tctx.maxIP - 1
   241  		length = tctx.maxIP / 2
   242  		if tctx.maxIP%2 > 0 {
   243  			length++
   244  		}
   245  	default:
   246  		t.Fatal("unsupported mode yet")
   247  	}
   248  
   249  	ipIndex := make([]int, 0, length)
   250  	// calculate the index to release from the ipList
   251  	for i := startIndex; ; i += increment {
   252  		ipIndex = append(ipIndex, i)
   253  		if i+increment > stopIndex {
   254  			break
   255  		}
   256  	}
   257  
   258  	var id int
   259  	parallelExec := semaphore.NewWeighted(parallel)
   260  	ch := make(chan *net.IPNet, len(ipIndex))
   261  	wg := sync.WaitGroup{}
   262  	for index := range ipIndex {
   263  		wg.Add(1)
   264  		go func(id, index int) {
   265  			parallelExec.Acquire(context.Background(), 1)
   266  			// logrus.Errorf("index %v", index)
   267  			// logrus.Errorf("list %v", tctx.ipList)
   268  			err := tctx.a.ReleaseAddress(tctx.pid, tctx.ipList[index].IP)
   269  			if err != nil {
   270  				t.Errorf("routine %d got %v", id, err)
   271  			}
   272  			ch <- tctx.ipList[index]
   273  			parallelExec.Release(1)
   274  			wg.Done()
   275  		}(id, index)
   276  		id++
   277  	}
   278  	wg.Wait()
   279  
   280  	for i := 0; i < len(ipIndex); i++ {
   281  		ip := <-ch
   282  
   283  		// check if it is really free
   284  		_, _, err := tctx.a.RequestAddress(tctx.pid, ip.IP, nil)
   285  		assert.Check(t, err, "ip %v not properly released", ip)
   286  		if err != nil {
   287  			t.Fatalf("ip %v not properly released, error:%v", ip, err)
   288  		}
   289  		err = tctx.a.ReleaseAddress(tctx.pid, ip.IP)
   290  		assert.NilError(t, err)
   291  
   292  		if there, ok := tctx.ipMap[ip.String()]; !ok || !there {
   293  			t.Fatalf("ip %v got double deallocated", ip)
   294  		}
   295  		tctx.ipMap[ip.String()] = false
   296  		for j, v := range tctx.ipList {
   297  			if v == ip {
   298  				tctx.ipList = append(tctx.ipList[:j], tctx.ipList[j+1:]...)
   299  				break
   300  			}
   301  		}
   302  	}
   303  
   304  	assert.Check(t, is.Len(tctx.ipList, tctx.maxIP-length))
   305  }