k8s.io/kubernetes@v1.29.3/pkg/controller/nodeipam/ipam/range_allocator_test.go (about)

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package ipam
    18  
    19  import (
    20  	"context"
    21  	"net"
    22  	"testing"
    23  	"time"
    24  
    25  	v1 "k8s.io/api/core/v1"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	"k8s.io/apimachinery/pkg/util/wait"
    28  	"k8s.io/client-go/kubernetes/fake"
    29  	"k8s.io/klog/v2/ktesting"
    30  	"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
    31  	"k8s.io/kubernetes/pkg/controller/testutil"
    32  	netutils "k8s.io/utils/net"
    33  )
    34  
    35  type testCase struct {
    36  	description     string
    37  	fakeNodeHandler *testutil.FakeNodeHandler
    38  	allocatorParams CIDRAllocatorParams
    39  	// key is index of the cidr allocated
    40  	expectedAllocatedCIDR map[int]string
    41  	allocatedCIDRs        map[int][]string
    42  	// should controller creation fail?
    43  	ctrlCreateFail bool
    44  }
    45  
    46  func TestOccupyPreExistingCIDR(t *testing.T) {
    47  	// all tests operate on a single node
    48  	testCases := []testCase{
    49  		{
    50  			description: "success, single stack no node allocation",
    51  			fakeNodeHandler: &testutil.FakeNodeHandler{
    52  				Existing: []*v1.Node{
    53  					{
    54  						ObjectMeta: metav1.ObjectMeta{
    55  							Name: "node0",
    56  						},
    57  					},
    58  				},
    59  				Clientset: fake.NewSimpleClientset(),
    60  			},
    61  			allocatorParams: CIDRAllocatorParams{
    62  				ClusterCIDRs: func() []*net.IPNet {
    63  					_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
    64  					return []*net.IPNet{clusterCIDRv4}
    65  				}(),
    66  				ServiceCIDR:          nil,
    67  				SecondaryServiceCIDR: nil,
    68  				NodeCIDRMaskSizes:    []int{24},
    69  			},
    70  			allocatedCIDRs:        nil,
    71  			expectedAllocatedCIDR: nil,
    72  			ctrlCreateFail:        false,
    73  		},
    74  		{
    75  			description: "success, dual stack no node allocation",
    76  			fakeNodeHandler: &testutil.FakeNodeHandler{
    77  				Existing: []*v1.Node{
    78  					{
    79  						ObjectMeta: metav1.ObjectMeta{
    80  							Name: "node0",
    81  						},
    82  					},
    83  				},
    84  				Clientset: fake.NewSimpleClientset(),
    85  			},
    86  			allocatorParams: CIDRAllocatorParams{
    87  				ClusterCIDRs: func() []*net.IPNet {
    88  					_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
    89  					_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8")
    90  					return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
    91  				}(),
    92  				ServiceCIDR:          nil,
    93  				SecondaryServiceCIDR: nil,
    94  				NodeCIDRMaskSizes:    []int{24, 24},
    95  			},
    96  			allocatedCIDRs:        nil,
    97  			expectedAllocatedCIDR: nil,
    98  			ctrlCreateFail:        false,
    99  		},
   100  		{
   101  			description: "success, single stack correct node allocation",
   102  			fakeNodeHandler: &testutil.FakeNodeHandler{
   103  				Existing: []*v1.Node{
   104  					{
   105  						ObjectMeta: metav1.ObjectMeta{
   106  							Name: "node0",
   107  						},
   108  						Spec: v1.NodeSpec{
   109  							PodCIDRs: []string{"10.10.0.1/24"},
   110  						},
   111  					},
   112  				},
   113  				Clientset: fake.NewSimpleClientset(),
   114  			},
   115  			allocatorParams: CIDRAllocatorParams{
   116  				ClusterCIDRs: func() []*net.IPNet {
   117  					_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
   118  					return []*net.IPNet{clusterCIDRv4}
   119  				}(),
   120  				ServiceCIDR:          nil,
   121  				SecondaryServiceCIDR: nil,
   122  				NodeCIDRMaskSizes:    []int{24},
   123  			},
   124  			allocatedCIDRs:        nil,
   125  			expectedAllocatedCIDR: nil,
   126  			ctrlCreateFail:        false,
   127  		},
   128  		{
   129  			description: "success, dual stack both allocated correctly",
   130  			fakeNodeHandler: &testutil.FakeNodeHandler{
   131  				Existing: []*v1.Node{
   132  					{
   133  						ObjectMeta: metav1.ObjectMeta{
   134  							Name: "node0",
   135  						},
   136  						Spec: v1.NodeSpec{
   137  							PodCIDRs: []string{"10.10.0.1/24", "a00::/86"},
   138  						},
   139  					},
   140  				},
   141  				Clientset: fake.NewSimpleClientset(),
   142  			},
   143  			allocatorParams: CIDRAllocatorParams{
   144  				ClusterCIDRs: func() []*net.IPNet {
   145  					_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
   146  					_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8")
   147  					return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
   148  				}(),
   149  				ServiceCIDR:          nil,
   150  				SecondaryServiceCIDR: nil,
   151  				NodeCIDRMaskSizes:    []int{24, 24},
   152  			},
   153  			allocatedCIDRs:        nil,
   154  			expectedAllocatedCIDR: nil,
   155  			ctrlCreateFail:        false,
   156  		},
   157  		// failure cases
   158  		{
   159  			description: "fail, single stack incorrect node allocation",
   160  			fakeNodeHandler: &testutil.FakeNodeHandler{
   161  				Existing: []*v1.Node{
   162  					{
   163  						ObjectMeta: metav1.ObjectMeta{
   164  							Name: "node0",
   165  						},
   166  						Spec: v1.NodeSpec{
   167  							PodCIDRs: []string{"172.10.0.1/24"},
   168  						},
   169  					},
   170  				},
   171  				Clientset: fake.NewSimpleClientset(),
   172  			},
   173  			allocatorParams: CIDRAllocatorParams{
   174  				ClusterCIDRs: func() []*net.IPNet {
   175  					_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
   176  					return []*net.IPNet{clusterCIDRv4}
   177  				}(),
   178  				ServiceCIDR:          nil,
   179  				SecondaryServiceCIDR: nil,
   180  				NodeCIDRMaskSizes:    []int{24},
   181  			},
   182  			allocatedCIDRs:        nil,
   183  			expectedAllocatedCIDR: nil,
   184  			ctrlCreateFail:        true,
   185  		},
   186  		{
   187  			description: "fail, dualstack node allocating from non existing cidr",
   188  
   189  			fakeNodeHandler: &testutil.FakeNodeHandler{
   190  				Existing: []*v1.Node{
   191  					{
   192  						ObjectMeta: metav1.ObjectMeta{
   193  							Name: "node0",
   194  						},
   195  						Spec: v1.NodeSpec{
   196  							PodCIDRs: []string{"10.10.0.1/24", "a00::/86"},
   197  						},
   198  					},
   199  				},
   200  				Clientset: fake.NewSimpleClientset(),
   201  			},
   202  			allocatorParams: CIDRAllocatorParams{
   203  				ClusterCIDRs: func() []*net.IPNet {
   204  					_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
   205  					return []*net.IPNet{clusterCIDRv4}
   206  				}(),
   207  				ServiceCIDR:          nil,
   208  				SecondaryServiceCIDR: nil,
   209  				NodeCIDRMaskSizes:    []int{24},
   210  			},
   211  			allocatedCIDRs:        nil,
   212  			expectedAllocatedCIDR: nil,
   213  			ctrlCreateFail:        true,
   214  		},
   215  		{
   216  			description: "fail, dualstack node allocating bad v4",
   217  
   218  			fakeNodeHandler: &testutil.FakeNodeHandler{
   219  				Existing: []*v1.Node{
   220  					{
   221  						ObjectMeta: metav1.ObjectMeta{
   222  							Name: "node0",
   223  						},
   224  						Spec: v1.NodeSpec{
   225  							PodCIDRs: []string{"172.10.0.1/24", "a00::/86"},
   226  						},
   227  					},
   228  				},
   229  				Clientset: fake.NewSimpleClientset(),
   230  			},
   231  			allocatorParams: CIDRAllocatorParams{
   232  				ClusterCIDRs: func() []*net.IPNet {
   233  					_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
   234  					_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8")
   235  					return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
   236  				}(),
   237  				ServiceCIDR:          nil,
   238  				SecondaryServiceCIDR: nil,
   239  				NodeCIDRMaskSizes:    []int{24, 24},
   240  			},
   241  			allocatedCIDRs:        nil,
   242  			expectedAllocatedCIDR: nil,
   243  			ctrlCreateFail:        true,
   244  		},
   245  		{
   246  			description: "fail, dualstack node allocating bad v6",
   247  
   248  			fakeNodeHandler: &testutil.FakeNodeHandler{
   249  				Existing: []*v1.Node{
   250  					{
   251  						ObjectMeta: metav1.ObjectMeta{
   252  							Name: "node0",
   253  						},
   254  						Spec: v1.NodeSpec{
   255  							PodCIDRs: []string{"10.10.0.1/24", "cdd::/86"},
   256  						},
   257  					},
   258  				},
   259  				Clientset: fake.NewSimpleClientset(),
   260  			},
   261  			allocatorParams: CIDRAllocatorParams{
   262  				ClusterCIDRs: func() []*net.IPNet {
   263  					_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
   264  					_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8")
   265  					return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
   266  				}(),
   267  				ServiceCIDR:          nil,
   268  				SecondaryServiceCIDR: nil,
   269  				NodeCIDRMaskSizes:    []int{24, 24},
   270  			},
   271  			allocatedCIDRs:        nil,
   272  			expectedAllocatedCIDR: nil,
   273  			ctrlCreateFail:        true,
   274  		},
   275  	}
   276  
   277  	// test function
   278  	logger, _ := ktesting.NewTestContext(t)
   279  	for _, tc := range testCases {
   280  		t.Run(tc.description, func(t *testing.T) {
   281  			// Initialize the range allocator.
   282  			fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler)
   283  			nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{})
   284  			_, err := NewCIDRRangeAllocator(logger, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList)
   285  			if err == nil && tc.ctrlCreateFail {
   286  				t.Fatalf("creating range allocator was expected to fail, but it did not")
   287  			}
   288  			if err != nil && !tc.ctrlCreateFail {
   289  				t.Fatalf("creating range allocator was expected to succeed, but it did not")
   290  			}
   291  		})
   292  	}
   293  }
   294  
   295  func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
   296  	// Non-parallel test (overrides global var)
   297  	oldNodePollInterval := nodePollInterval
   298  	nodePollInterval = test.NodePollInterval
   299  	defer func() {
   300  		nodePollInterval = oldNodePollInterval
   301  	}()
   302  
   303  	// all tests operate on a single node
   304  	testCases := []testCase{
   305  		{
   306  			description: "When there's no ServiceCIDR return first CIDR in range",
   307  			fakeNodeHandler: &testutil.FakeNodeHandler{
   308  				Existing: []*v1.Node{
   309  					{
   310  						ObjectMeta: metav1.ObjectMeta{
   311  							Name: "node0",
   312  						},
   313  					},
   314  				},
   315  				Clientset: fake.NewSimpleClientset(),
   316  			},
   317  			allocatorParams: CIDRAllocatorParams{
   318  				ClusterCIDRs: func() []*net.IPNet {
   319  					_, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24")
   320  					return []*net.IPNet{clusterCIDR}
   321  				}(),
   322  				ServiceCIDR:          nil,
   323  				SecondaryServiceCIDR: nil,
   324  				NodeCIDRMaskSizes:    []int{30},
   325  			},
   326  			expectedAllocatedCIDR: map[int]string{
   327  				0: "127.123.234.0/30",
   328  			},
   329  		},
   330  		{
   331  			description: "Correctly filter out ServiceCIDR",
   332  			fakeNodeHandler: &testutil.FakeNodeHandler{
   333  				Existing: []*v1.Node{
   334  					{
   335  						ObjectMeta: metav1.ObjectMeta{
   336  							Name: "node0",
   337  						},
   338  					},
   339  				},
   340  				Clientset: fake.NewSimpleClientset(),
   341  			},
   342  			allocatorParams: CIDRAllocatorParams{
   343  				ClusterCIDRs: func() []*net.IPNet {
   344  					_, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24")
   345  					return []*net.IPNet{clusterCIDR}
   346  				}(),
   347  				ServiceCIDR: func() *net.IPNet {
   348  					_, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
   349  					return serviceCIDR
   350  				}(),
   351  				SecondaryServiceCIDR: nil,
   352  				NodeCIDRMaskSizes:    []int{30},
   353  			},
   354  			// it should return first /30 CIDR after service range
   355  			expectedAllocatedCIDR: map[int]string{
   356  				0: "127.123.234.64/30",
   357  			},
   358  		},
   359  		{
   360  			description: "Correctly ignore already allocated CIDRs",
   361  			fakeNodeHandler: &testutil.FakeNodeHandler{
   362  				Existing: []*v1.Node{
   363  					{
   364  						ObjectMeta: metav1.ObjectMeta{
   365  							Name: "node0",
   366  						},
   367  					},
   368  				},
   369  				Clientset: fake.NewSimpleClientset(),
   370  			},
   371  			allocatorParams: CIDRAllocatorParams{
   372  				ClusterCIDRs: func() []*net.IPNet {
   373  					_, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24")
   374  					return []*net.IPNet{clusterCIDR}
   375  				}(),
   376  				ServiceCIDR: func() *net.IPNet {
   377  					_, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
   378  					return serviceCIDR
   379  				}(),
   380  				SecondaryServiceCIDR: nil,
   381  				NodeCIDRMaskSizes:    []int{30},
   382  			},
   383  			allocatedCIDRs: map[int][]string{
   384  				0: {"127.123.234.64/30", "127.123.234.68/30", "127.123.234.72/30", "127.123.234.80/30"},
   385  			},
   386  			expectedAllocatedCIDR: map[int]string{
   387  				0: "127.123.234.76/30",
   388  			},
   389  		},
   390  		{
   391  			description: "Dualstack CIDRs v4,v6",
   392  			fakeNodeHandler: &testutil.FakeNodeHandler{
   393  				Existing: []*v1.Node{
   394  					{
   395  						ObjectMeta: metav1.ObjectMeta{
   396  							Name: "node0",
   397  						},
   398  					},
   399  				},
   400  				Clientset: fake.NewSimpleClientset(),
   401  			},
   402  			allocatorParams: CIDRAllocatorParams{
   403  				ClusterCIDRs: func() []*net.IPNet {
   404  					_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("127.123.234.0/8")
   405  					_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/84")
   406  					return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
   407  				}(),
   408  				ServiceCIDR: func() *net.IPNet {
   409  					_, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
   410  					return serviceCIDR
   411  				}(),
   412  				SecondaryServiceCIDR: nil,
   413  				NodeCIDRMaskSizes:    []int{24, 98},
   414  			},
   415  		},
   416  		{
   417  			description: "Dualstack CIDRs v6,v4",
   418  			fakeNodeHandler: &testutil.FakeNodeHandler{
   419  				Existing: []*v1.Node{
   420  					{
   421  						ObjectMeta: metav1.ObjectMeta{
   422  							Name: "node0",
   423  						},
   424  					},
   425  				},
   426  				Clientset: fake.NewSimpleClientset(),
   427  			},
   428  			allocatorParams: CIDRAllocatorParams{
   429  				ClusterCIDRs: func() []*net.IPNet {
   430  					_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("127.123.234.0/8")
   431  					_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/84")
   432  					return []*net.IPNet{clusterCIDRv6, clusterCIDRv4}
   433  				}(),
   434  				ServiceCIDR: func() *net.IPNet {
   435  					_, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
   436  					return serviceCIDR
   437  				}(),
   438  				SecondaryServiceCIDR: nil,
   439  				NodeCIDRMaskSizes:    []int{98, 24},
   440  			},
   441  		},
   442  		{
   443  			description: "Dualstack CIDRs, more than two",
   444  			fakeNodeHandler: &testutil.FakeNodeHandler{
   445  				Existing: []*v1.Node{
   446  					{
   447  						ObjectMeta: metav1.ObjectMeta{
   448  							Name: "node0",
   449  						},
   450  					},
   451  				},
   452  				Clientset: fake.NewSimpleClientset(),
   453  			},
   454  			allocatorParams: CIDRAllocatorParams{
   455  				ClusterCIDRs: func() []*net.IPNet {
   456  					_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("127.123.234.0/8")
   457  					_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/84")
   458  					_, clusterCIDRv4_2, _ := netutils.ParseCIDRSloppy("10.0.0.0/8")
   459  					return []*net.IPNet{clusterCIDRv4, clusterCIDRv6, clusterCIDRv4_2}
   460  				}(),
   461  				ServiceCIDR: func() *net.IPNet {
   462  					_, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
   463  					return serviceCIDR
   464  				}(),
   465  				SecondaryServiceCIDR: nil,
   466  				NodeCIDRMaskSizes:    []int{24, 98, 24},
   467  			},
   468  		},
   469  		{
   470  			description: "no double counting",
   471  			fakeNodeHandler: &testutil.FakeNodeHandler{
   472  				Existing: []*v1.Node{
   473  					{
   474  						ObjectMeta: metav1.ObjectMeta{
   475  							Name: "node0",
   476  						},
   477  						Spec: v1.NodeSpec{
   478  							PodCIDRs: []string{"10.10.0.0/24"},
   479  						},
   480  					},
   481  					{
   482  						ObjectMeta: metav1.ObjectMeta{
   483  							Name: "node1",
   484  						},
   485  						Spec: v1.NodeSpec{
   486  							PodCIDRs: []string{"10.10.2.0/24"},
   487  						},
   488  					},
   489  					{
   490  						ObjectMeta: metav1.ObjectMeta{
   491  							Name: "node2",
   492  						},
   493  					},
   494  				},
   495  				Clientset: fake.NewSimpleClientset(),
   496  			},
   497  			allocatorParams: CIDRAllocatorParams{
   498  				ClusterCIDRs: func() []*net.IPNet {
   499  					_, clusterCIDR, _ := netutils.ParseCIDRSloppy("10.10.0.0/22")
   500  					return []*net.IPNet{clusterCIDR}
   501  				}(),
   502  				ServiceCIDR:          nil,
   503  				SecondaryServiceCIDR: nil,
   504  				NodeCIDRMaskSizes:    []int{24},
   505  			},
   506  			expectedAllocatedCIDR: map[int]string{
   507  				0: "10.10.1.0/24",
   508  			},
   509  		},
   510  	}
   511  
   512  	// test function
   513  	logger, ctx := ktesting.NewTestContext(t)
   514  	testFunc := func(tc testCase) {
   515  		fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler)
   516  		nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{})
   517  		// Initialize the range allocator.
   518  		allocator, err := NewCIDRRangeAllocator(logger, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList)
   519  		if err != nil {
   520  			t.Errorf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err)
   521  			return
   522  		}
   523  		rangeAllocator, ok := allocator.(*rangeAllocator)
   524  		if !ok {
   525  			t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
   526  			return
   527  		}
   528  		rangeAllocator.nodesSynced = test.AlwaysReady
   529  		rangeAllocator.recorder = testutil.NewFakeRecorder()
   530  		go allocator.Run(ctx)
   531  
   532  		// this is a bit of white box testing
   533  		// pre allocate the cidrs as per the test
   534  		for idx, allocatedList := range tc.allocatedCIDRs {
   535  			for _, allocated := range allocatedList {
   536  				_, cidr, err := netutils.ParseCIDRSloppy(allocated)
   537  				if err != nil {
   538  					t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
   539  				}
   540  				if err = rangeAllocator.cidrSets[idx].Occupy(cidr); err != nil {
   541  					t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
   542  				}
   543  			}
   544  		}
   545  
   546  		updateCount := 0
   547  		for _, node := range tc.fakeNodeHandler.Existing {
   548  			if node.Spec.PodCIDRs == nil {
   549  				updateCount++
   550  			}
   551  			if err := allocator.AllocateOrOccupyCIDR(logger, node); err != nil {
   552  				t.Errorf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
   553  			}
   554  		}
   555  		if updateCount != 1 {
   556  			t.Fatalf("test error: all tests must update exactly one node")
   557  		}
   558  		if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, updateCount, wait.ForeverTestTimeout); err != nil {
   559  			t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
   560  		}
   561  
   562  		if len(tc.expectedAllocatedCIDR) == 0 {
   563  			// nothing further expected
   564  			return
   565  		}
   566  		for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() {
   567  			if len(updatedNode.Spec.PodCIDRs) == 0 {
   568  				continue // not assigned yet
   569  			}
   570  			//match
   571  			for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR {
   572  				if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR {
   573  					t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs)
   574  					break
   575  				}
   576  			}
   577  		}
   578  	}
   579  
   580  	// run the test cases
   581  	for _, tc := range testCases {
   582  		testFunc(tc)
   583  	}
   584  }
   585  
   586  func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
   587  	testCases := []testCase{
   588  		{
   589  			description: "When there's no ServiceCIDR return first CIDR in range",
   590  			fakeNodeHandler: &testutil.FakeNodeHandler{
   591  				Existing: []*v1.Node{
   592  					{
   593  						ObjectMeta: metav1.ObjectMeta{
   594  							Name: "node0",
   595  						},
   596  					},
   597  				},
   598  				Clientset: fake.NewSimpleClientset(),
   599  			},
   600  			allocatorParams: CIDRAllocatorParams{
   601  				ClusterCIDRs: func() []*net.IPNet {
   602  					_, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28")
   603  					return []*net.IPNet{clusterCIDR}
   604  				}(),
   605  				ServiceCIDR:          nil,
   606  				SecondaryServiceCIDR: nil,
   607  				NodeCIDRMaskSizes:    []int{30},
   608  			},
   609  			allocatedCIDRs: map[int][]string{
   610  				0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
   611  			},
   612  		},
   613  	}
   614  	logger, ctx := ktesting.NewTestContext(t)
   615  	testFunc := func(tc testCase) {
   616  		// Initialize the range allocator.
   617  		allocator, err := NewCIDRRangeAllocator(logger, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
   618  		if err != nil {
   619  			t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err)
   620  		}
   621  		rangeAllocator, ok := allocator.(*rangeAllocator)
   622  		if !ok {
   623  			t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
   624  			return
   625  		}
   626  		rangeAllocator.nodesSynced = test.AlwaysReady
   627  		rangeAllocator.recorder = testutil.NewFakeRecorder()
   628  		go allocator.Run(ctx)
   629  
   630  		// this is a bit of white box testing
   631  		for setIdx, allocatedList := range tc.allocatedCIDRs {
   632  			for _, allocated := range allocatedList {
   633  				_, cidr, err := netutils.ParseCIDRSloppy(allocated)
   634  				if err != nil {
   635  					t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, cidr, err)
   636  				}
   637  				err = rangeAllocator.cidrSets[setIdx].Occupy(cidr)
   638  				if err != nil {
   639  					t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, cidr, err)
   640  				}
   641  			}
   642  		}
   643  		if err := allocator.AllocateOrOccupyCIDR(logger, tc.fakeNodeHandler.Existing[0]); err == nil {
   644  			t.Errorf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err)
   645  		}
   646  		// We don't expect any updates, so just sleep for some time
   647  		time.Sleep(time.Second)
   648  		if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 {
   649  			t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy())
   650  		}
   651  		if len(tc.expectedAllocatedCIDR) == 0 {
   652  			// nothing further expected
   653  			return
   654  		}
   655  		for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() {
   656  			if len(updatedNode.Spec.PodCIDRs) == 0 {
   657  				continue // not assigned yet
   658  			}
   659  			//match
   660  			for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR {
   661  				if updatedNode.Spec.PodCIDRs[podCIDRIdx] == expectedPodCIDR {
   662  					t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs)
   663  					break
   664  				}
   665  			}
   666  		}
   667  	}
   668  	for _, tc := range testCases {
   669  		testFunc(tc)
   670  	}
   671  }
   672  
   673  type releaseTestCase struct {
   674  	description                      string
   675  	fakeNodeHandler                  *testutil.FakeNodeHandler
   676  	allocatorParams                  CIDRAllocatorParams
   677  	expectedAllocatedCIDRFirstRound  map[int]string
   678  	expectedAllocatedCIDRSecondRound map[int]string
   679  	allocatedCIDRs                   map[int][]string
   680  	cidrsToRelease                   [][]string
   681  }
   682  
   683  func TestReleaseCIDRSuccess(t *testing.T) {
   684  	// Non-parallel test (overrides global var)
   685  	oldNodePollInterval := nodePollInterval
   686  	nodePollInterval = test.NodePollInterval
   687  	defer func() {
   688  		nodePollInterval = oldNodePollInterval
   689  	}()
   690  
   691  	testCases := []releaseTestCase{
   692  		{
   693  			description: "Correctly release preallocated CIDR",
   694  			fakeNodeHandler: &testutil.FakeNodeHandler{
   695  				Existing: []*v1.Node{
   696  					{
   697  						ObjectMeta: metav1.ObjectMeta{
   698  							Name: "node0",
   699  						},
   700  					},
   701  				},
   702  				Clientset: fake.NewSimpleClientset(),
   703  			},
   704  			allocatorParams: CIDRAllocatorParams{
   705  				ClusterCIDRs: func() []*net.IPNet {
   706  					_, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28")
   707  					return []*net.IPNet{clusterCIDR}
   708  				}(),
   709  				ServiceCIDR:          nil,
   710  				SecondaryServiceCIDR: nil,
   711  				NodeCIDRMaskSizes:    []int{30},
   712  			},
   713  			allocatedCIDRs: map[int][]string{
   714  				0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
   715  			},
   716  			expectedAllocatedCIDRFirstRound: nil,
   717  			cidrsToRelease: [][]string{
   718  				{"127.123.234.4/30"},
   719  			},
   720  			expectedAllocatedCIDRSecondRound: map[int]string{
   721  				0: "127.123.234.4/30",
   722  			},
   723  		},
   724  		{
   725  			description: "Correctly recycle CIDR",
   726  			fakeNodeHandler: &testutil.FakeNodeHandler{
   727  				Existing: []*v1.Node{
   728  					{
   729  						ObjectMeta: metav1.ObjectMeta{
   730  							Name: "node0",
   731  						},
   732  					},
   733  				},
   734  				Clientset: fake.NewSimpleClientset(),
   735  			},
   736  			allocatorParams: CIDRAllocatorParams{
   737  				ClusterCIDRs: func() []*net.IPNet {
   738  					_, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28")
   739  					return []*net.IPNet{clusterCIDR}
   740  				}(),
   741  				ServiceCIDR:          nil,
   742  				SecondaryServiceCIDR: nil,
   743  				NodeCIDRMaskSizes:    []int{30},
   744  			},
   745  			allocatedCIDRs: map[int][]string{
   746  				0: {"127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
   747  			},
   748  			expectedAllocatedCIDRFirstRound: map[int]string{
   749  				0: "127.123.234.0/30",
   750  			},
   751  			cidrsToRelease: [][]string{
   752  				{"127.123.234.0/30"},
   753  			},
   754  			expectedAllocatedCIDRSecondRound: map[int]string{
   755  				0: "127.123.234.0/30",
   756  			},
   757  		},
   758  	}
   759  	logger, ctx := ktesting.NewTestContext(t)
   760  	testFunc := func(tc releaseTestCase) {
   761  		// Initialize the range allocator.
   762  		allocator, _ := NewCIDRRangeAllocator(logger, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
   763  		rangeAllocator, ok := allocator.(*rangeAllocator)
   764  		if !ok {
   765  			t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
   766  			return
   767  		}
   768  		rangeAllocator.nodesSynced = test.AlwaysReady
   769  		rangeAllocator.recorder = testutil.NewFakeRecorder()
   770  		go allocator.Run(ctx)
   771  
   772  		// this is a bit of white box testing
   773  		for setIdx, allocatedList := range tc.allocatedCIDRs {
   774  			for _, allocated := range allocatedList {
   775  				_, cidr, err := netutils.ParseCIDRSloppy(allocated)
   776  				if err != nil {
   777  					t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
   778  				}
   779  				err = rangeAllocator.cidrSets[setIdx].Occupy(cidr)
   780  				if err != nil {
   781  					t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
   782  				}
   783  			}
   784  		}
   785  
   786  		err := allocator.AllocateOrOccupyCIDR(logger, tc.fakeNodeHandler.Existing[0])
   787  		if len(tc.expectedAllocatedCIDRFirstRound) != 0 {
   788  			if err != nil {
   789  				t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
   790  			}
   791  			if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
   792  				t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
   793  			}
   794  		} else {
   795  			if err == nil {
   796  				t.Fatalf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err)
   797  			}
   798  			// We don't expect any updates here
   799  			time.Sleep(time.Second)
   800  			if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 {
   801  				t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy())
   802  			}
   803  		}
   804  		for _, cidrToRelease := range tc.cidrsToRelease {
   805  			nodeToRelease := v1.Node{
   806  				ObjectMeta: metav1.ObjectMeta{
   807  					Name: "node0",
   808  				},
   809  			}
   810  			nodeToRelease.Spec.PodCIDRs = cidrToRelease
   811  			err = allocator.ReleaseCIDR(logger, &nodeToRelease)
   812  			if err != nil {
   813  				t.Fatalf("%v: unexpected error in ReleaseCIDR: %v", tc.description, err)
   814  			}
   815  		}
   816  		if err = allocator.AllocateOrOccupyCIDR(logger, tc.fakeNodeHandler.Existing[0]); err != nil {
   817  			t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
   818  		}
   819  		if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
   820  			t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
   821  		}
   822  
   823  		if len(tc.expectedAllocatedCIDRSecondRound) == 0 {
   824  			// nothing further expected
   825  			return
   826  		}
   827  		for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() {
   828  			if len(updatedNode.Spec.PodCIDRs) == 0 {
   829  				continue // not assigned yet
   830  			}
   831  			//match
   832  			for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDRSecondRound {
   833  				if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR {
   834  					t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs)
   835  					break
   836  				}
   837  			}
   838  		}
   839  	}
   840  
   841  	for _, tc := range testCases {
   842  		testFunc(tc)
   843  	}
   844  }