k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/controller/nodeipam/ipam/range_allocator_test.go (about) 1 /* 2 Copyright 2016 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package ipam 18 19 import ( 20 "net" 21 "testing" 22 "time" 23 24 v1 "k8s.io/api/core/v1" 25 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 26 "k8s.io/apimachinery/pkg/util/wait" 27 "k8s.io/client-go/kubernetes/fake" 28 "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test" 29 "k8s.io/kubernetes/pkg/controller/testutil" 30 "k8s.io/kubernetes/test/utils/ktesting" 31 netutils "k8s.io/utils/net" 32 ) 33 34 type testCase struct { 35 description string 36 fakeNodeHandler *testutil.FakeNodeHandler 37 allocatorParams CIDRAllocatorParams 38 // key is index of the cidr allocated 39 expectedAllocatedCIDR map[int]string 40 allocatedCIDRs map[int][]string 41 // should controller creation fail? 42 ctrlCreateFail bool 43 } 44 45 func TestOccupyPreExistingCIDR(t *testing.T) { 46 // all tests operate on a single node 47 testCases := []testCase{ 48 { 49 description: "success, single stack no node allocation", 50 fakeNodeHandler: &testutil.FakeNodeHandler{ 51 Existing: []*v1.Node{ 52 { 53 ObjectMeta: metav1.ObjectMeta{ 54 Name: "node0", 55 }, 56 }, 57 }, 58 Clientset: fake.NewSimpleClientset(), 59 }, 60 allocatorParams: CIDRAllocatorParams{ 61 ClusterCIDRs: func() []*net.IPNet { 62 _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16") 63 return []*net.IPNet{clusterCIDRv4} 64 }(), 65 ServiceCIDR: nil, 66 SecondaryServiceCIDR: nil, 67 NodeCIDRMaskSizes: []int{24}, 68 }, 69 allocatedCIDRs: nil, 70 expectedAllocatedCIDR: nil, 71 ctrlCreateFail: false, 72 }, 73 { 74 description: "success, dual stack no node allocation", 75 fakeNodeHandler: &testutil.FakeNodeHandler{ 76 Existing: []*v1.Node{ 77 { 78 ObjectMeta: metav1.ObjectMeta{ 79 Name: "node0", 80 }, 81 }, 82 }, 83 Clientset: fake.NewSimpleClientset(), 84 }, 85 allocatorParams: CIDRAllocatorParams{ 86 ClusterCIDRs: func() []*net.IPNet { 87 _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16") 88 _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8") 89 return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} 90 }(), 91 ServiceCIDR: nil, 92 SecondaryServiceCIDR: nil, 93 NodeCIDRMaskSizes: []int{24, 24}, 94 }, 95 allocatedCIDRs: nil, 96 expectedAllocatedCIDR: nil, 97 ctrlCreateFail: false, 98 }, 99 { 100 description: "success, single stack correct node allocation", 101 fakeNodeHandler: &testutil.FakeNodeHandler{ 102 Existing: []*v1.Node{ 103 { 104 ObjectMeta: metav1.ObjectMeta{ 105 Name: "node0", 106 }, 107 Spec: v1.NodeSpec{ 108 PodCIDRs: []string{"10.10.0.1/24"}, 109 }, 110 }, 111 }, 112 Clientset: fake.NewSimpleClientset(), 113 }, 114 allocatorParams: CIDRAllocatorParams{ 115 ClusterCIDRs: func() []*net.IPNet { 116 _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16") 117 return []*net.IPNet{clusterCIDRv4} 118 }(), 119 ServiceCIDR: nil, 120 SecondaryServiceCIDR: nil, 121 NodeCIDRMaskSizes: []int{24}, 122 }, 123 allocatedCIDRs: nil, 124 expectedAllocatedCIDR: nil, 125 ctrlCreateFail: false, 126 }, 127 { 128 description: "success, dual stack both allocated correctly", 129 fakeNodeHandler: &testutil.FakeNodeHandler{ 130 Existing: []*v1.Node{ 131 { 132 ObjectMeta: metav1.ObjectMeta{ 133 Name: "node0", 134 }, 135 Spec: v1.NodeSpec{ 136 PodCIDRs: []string{"10.10.0.1/24", "a00::/86"}, 137 }, 138 }, 139 }, 140 Clientset: fake.NewSimpleClientset(), 141 }, 142 allocatorParams: CIDRAllocatorParams{ 143 ClusterCIDRs: func() []*net.IPNet { 144 _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16") 145 _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8") 146 return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} 147 }(), 148 ServiceCIDR: nil, 149 SecondaryServiceCIDR: nil, 150 NodeCIDRMaskSizes: []int{24, 24}, 151 }, 152 allocatedCIDRs: nil, 153 expectedAllocatedCIDR: nil, 154 ctrlCreateFail: false, 155 }, 156 // failure cases 157 { 158 description: "fail, single stack incorrect node allocation", 159 fakeNodeHandler: &testutil.FakeNodeHandler{ 160 Existing: []*v1.Node{ 161 { 162 ObjectMeta: metav1.ObjectMeta{ 163 Name: "node0", 164 }, 165 Spec: v1.NodeSpec{ 166 PodCIDRs: []string{"172.10.0.1/24"}, 167 }, 168 }, 169 }, 170 Clientset: fake.NewSimpleClientset(), 171 }, 172 allocatorParams: CIDRAllocatorParams{ 173 ClusterCIDRs: func() []*net.IPNet { 174 _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16") 175 return []*net.IPNet{clusterCIDRv4} 176 }(), 177 ServiceCIDR: nil, 178 SecondaryServiceCIDR: nil, 179 NodeCIDRMaskSizes: []int{24}, 180 }, 181 allocatedCIDRs: nil, 182 expectedAllocatedCIDR: nil, 183 ctrlCreateFail: true, 184 }, 185 { 186 description: "fail, dualstack node allocating from non existing cidr", 187 188 fakeNodeHandler: &testutil.FakeNodeHandler{ 189 Existing: []*v1.Node{ 190 { 191 ObjectMeta: metav1.ObjectMeta{ 192 Name: "node0", 193 }, 194 Spec: v1.NodeSpec{ 195 PodCIDRs: []string{"10.10.0.1/24", "a00::/86"}, 196 }, 197 }, 198 }, 199 Clientset: fake.NewSimpleClientset(), 200 }, 201 allocatorParams: CIDRAllocatorParams{ 202 ClusterCIDRs: func() []*net.IPNet { 203 _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16") 204 return []*net.IPNet{clusterCIDRv4} 205 }(), 206 ServiceCIDR: nil, 207 SecondaryServiceCIDR: nil, 208 NodeCIDRMaskSizes: []int{24}, 209 }, 210 allocatedCIDRs: nil, 211 expectedAllocatedCIDR: nil, 212 ctrlCreateFail: true, 213 }, 214 { 215 description: "fail, dualstack node allocating bad v4", 216 217 fakeNodeHandler: &testutil.FakeNodeHandler{ 218 Existing: []*v1.Node{ 219 { 220 ObjectMeta: metav1.ObjectMeta{ 221 Name: "node0", 222 }, 223 Spec: v1.NodeSpec{ 224 PodCIDRs: []string{"172.10.0.1/24", "a00::/86"}, 225 }, 226 }, 227 }, 228 Clientset: fake.NewSimpleClientset(), 229 }, 230 allocatorParams: CIDRAllocatorParams{ 231 ClusterCIDRs: func() []*net.IPNet { 232 _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16") 233 _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8") 234 return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} 235 }(), 236 ServiceCIDR: nil, 237 SecondaryServiceCIDR: nil, 238 NodeCIDRMaskSizes: []int{24, 24}, 239 }, 240 allocatedCIDRs: nil, 241 expectedAllocatedCIDR: nil, 242 ctrlCreateFail: true, 243 }, 244 { 245 description: "fail, dualstack node allocating bad v6", 246 247 fakeNodeHandler: &testutil.FakeNodeHandler{ 248 Existing: []*v1.Node{ 249 { 250 ObjectMeta: metav1.ObjectMeta{ 251 Name: "node0", 252 }, 253 Spec: v1.NodeSpec{ 254 PodCIDRs: []string{"10.10.0.1/24", "cdd::/86"}, 255 }, 256 }, 257 }, 258 Clientset: fake.NewSimpleClientset(), 259 }, 260 allocatorParams: CIDRAllocatorParams{ 261 ClusterCIDRs: func() []*net.IPNet { 262 _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16") 263 _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8") 264 return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} 265 }(), 266 ServiceCIDR: nil, 267 SecondaryServiceCIDR: nil, 268 NodeCIDRMaskSizes: []int{24, 24}, 269 }, 270 allocatedCIDRs: nil, 271 expectedAllocatedCIDR: nil, 272 ctrlCreateFail: true, 273 }, 274 } 275 276 // test function 277 tCtx := ktesting.Init(t) 278 for _, tc := range testCases { 279 t.Run(tc.description, func(t *testing.T) { 280 // Initialize the range allocator. 281 fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler) 282 nodeList, _ := tc.fakeNodeHandler.List(tCtx, metav1.ListOptions{}) 283 _, err := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList) 284 if err == nil && tc.ctrlCreateFail { 285 t.Fatalf("creating range allocator was expected to fail, but it did not") 286 } 287 if err != nil && !tc.ctrlCreateFail { 288 t.Fatalf("creating range allocator was expected to succeed, but it did not") 289 } 290 }) 291 } 292 } 293 294 func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { 295 // Non-parallel test (overrides global var) 296 oldNodePollInterval := nodePollInterval 297 nodePollInterval = test.NodePollInterval 298 defer func() { 299 nodePollInterval = oldNodePollInterval 300 }() 301 302 // all tests operate on a single node 303 testCases := []testCase{ 304 { 305 description: "When there's no ServiceCIDR return first CIDR in range", 306 fakeNodeHandler: &testutil.FakeNodeHandler{ 307 Existing: []*v1.Node{ 308 { 309 ObjectMeta: metav1.ObjectMeta{ 310 Name: "node0", 311 }, 312 }, 313 }, 314 Clientset: fake.NewSimpleClientset(), 315 }, 316 allocatorParams: CIDRAllocatorParams{ 317 ClusterCIDRs: func() []*net.IPNet { 318 _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24") 319 return []*net.IPNet{clusterCIDR} 320 }(), 321 ServiceCIDR: nil, 322 SecondaryServiceCIDR: nil, 323 NodeCIDRMaskSizes: []int{30}, 324 }, 325 expectedAllocatedCIDR: map[int]string{ 326 0: "127.123.234.0/30", 327 }, 328 }, 329 { 330 description: "Correctly filter out ServiceCIDR", 331 fakeNodeHandler: &testutil.FakeNodeHandler{ 332 Existing: []*v1.Node{ 333 { 334 ObjectMeta: metav1.ObjectMeta{ 335 Name: "node0", 336 }, 337 }, 338 }, 339 Clientset: fake.NewSimpleClientset(), 340 }, 341 allocatorParams: CIDRAllocatorParams{ 342 ClusterCIDRs: func() []*net.IPNet { 343 _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24") 344 return []*net.IPNet{clusterCIDR} 345 }(), 346 ServiceCIDR: func() *net.IPNet { 347 _, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26") 348 return serviceCIDR 349 }(), 350 SecondaryServiceCIDR: nil, 351 NodeCIDRMaskSizes: []int{30}, 352 }, 353 // it should return first /30 CIDR after service range 354 expectedAllocatedCIDR: map[int]string{ 355 0: "127.123.234.64/30", 356 }, 357 }, 358 { 359 description: "Correctly ignore already allocated CIDRs", 360 fakeNodeHandler: &testutil.FakeNodeHandler{ 361 Existing: []*v1.Node{ 362 { 363 ObjectMeta: metav1.ObjectMeta{ 364 Name: "node0", 365 }, 366 }, 367 }, 368 Clientset: fake.NewSimpleClientset(), 369 }, 370 allocatorParams: CIDRAllocatorParams{ 371 ClusterCIDRs: func() []*net.IPNet { 372 _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24") 373 return []*net.IPNet{clusterCIDR} 374 }(), 375 ServiceCIDR: func() *net.IPNet { 376 _, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26") 377 return serviceCIDR 378 }(), 379 SecondaryServiceCIDR: nil, 380 NodeCIDRMaskSizes: []int{30}, 381 }, 382 allocatedCIDRs: map[int][]string{ 383 0: {"127.123.234.64/30", "127.123.234.68/30", "127.123.234.72/30", "127.123.234.80/30"}, 384 }, 385 expectedAllocatedCIDR: map[int]string{ 386 0: "127.123.234.76/30", 387 }, 388 }, 389 { 390 description: "Dualstack CIDRs v4,v6", 391 fakeNodeHandler: &testutil.FakeNodeHandler{ 392 Existing: []*v1.Node{ 393 { 394 ObjectMeta: metav1.ObjectMeta{ 395 Name: "node0", 396 }, 397 }, 398 }, 399 Clientset: fake.NewSimpleClientset(), 400 }, 401 allocatorParams: CIDRAllocatorParams{ 402 ClusterCIDRs: func() []*net.IPNet { 403 _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("127.123.234.0/8") 404 _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/84") 405 return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} 406 }(), 407 ServiceCIDR: func() *net.IPNet { 408 _, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26") 409 return serviceCIDR 410 }(), 411 SecondaryServiceCIDR: nil, 412 NodeCIDRMaskSizes: []int{24, 98}, 413 }, 414 }, 415 { 416 description: "Dualstack CIDRs v6,v4", 417 fakeNodeHandler: &testutil.FakeNodeHandler{ 418 Existing: []*v1.Node{ 419 { 420 ObjectMeta: metav1.ObjectMeta{ 421 Name: "node0", 422 }, 423 }, 424 }, 425 Clientset: fake.NewSimpleClientset(), 426 }, 427 allocatorParams: CIDRAllocatorParams{ 428 ClusterCIDRs: func() []*net.IPNet { 429 _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("127.123.234.0/8") 430 _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/84") 431 return []*net.IPNet{clusterCIDRv6, clusterCIDRv4} 432 }(), 433 ServiceCIDR: func() *net.IPNet { 434 _, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26") 435 return serviceCIDR 436 }(), 437 SecondaryServiceCIDR: nil, 438 NodeCIDRMaskSizes: []int{98, 24}, 439 }, 440 }, 441 { 442 description: "Dualstack CIDRs, more than two", 443 fakeNodeHandler: &testutil.FakeNodeHandler{ 444 Existing: []*v1.Node{ 445 { 446 ObjectMeta: metav1.ObjectMeta{ 447 Name: "node0", 448 }, 449 }, 450 }, 451 Clientset: fake.NewSimpleClientset(), 452 }, 453 allocatorParams: CIDRAllocatorParams{ 454 ClusterCIDRs: func() []*net.IPNet { 455 _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("127.123.234.0/8") 456 _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/84") 457 _, clusterCIDRv4_2, _ := netutils.ParseCIDRSloppy("10.0.0.0/8") 458 return []*net.IPNet{clusterCIDRv4, clusterCIDRv6, clusterCIDRv4_2} 459 }(), 460 ServiceCIDR: func() *net.IPNet { 461 _, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26") 462 return serviceCIDR 463 }(), 464 SecondaryServiceCIDR: nil, 465 NodeCIDRMaskSizes: []int{24, 98, 24}, 466 }, 467 }, 468 { 469 description: "no double counting", 470 fakeNodeHandler: &testutil.FakeNodeHandler{ 471 Existing: []*v1.Node{ 472 { 473 ObjectMeta: metav1.ObjectMeta{ 474 Name: "node0", 475 }, 476 Spec: v1.NodeSpec{ 477 PodCIDRs: []string{"10.10.0.0/24"}, 478 }, 479 }, 480 { 481 ObjectMeta: metav1.ObjectMeta{ 482 Name: "node1", 483 }, 484 Spec: v1.NodeSpec{ 485 PodCIDRs: []string{"10.10.2.0/24"}, 486 }, 487 }, 488 { 489 ObjectMeta: metav1.ObjectMeta{ 490 Name: "node2", 491 }, 492 }, 493 }, 494 Clientset: fake.NewSimpleClientset(), 495 }, 496 allocatorParams: CIDRAllocatorParams{ 497 ClusterCIDRs: func() []*net.IPNet { 498 _, clusterCIDR, _ := netutils.ParseCIDRSloppy("10.10.0.0/22") 499 return []*net.IPNet{clusterCIDR} 500 }(), 501 ServiceCIDR: nil, 502 SecondaryServiceCIDR: nil, 503 NodeCIDRMaskSizes: []int{24}, 504 }, 505 expectedAllocatedCIDR: map[int]string{ 506 0: "10.10.1.0/24", 507 }, 508 }, 509 } 510 511 // test function 512 _, tCtx := ktesting.NewTestContext(t) 513 testFunc := func(tc testCase) { 514 fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler) 515 nodeList, _ := tc.fakeNodeHandler.List(tCtx, metav1.ListOptions{}) 516 // Initialize the range allocator. 517 allocator, err := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList) 518 if err != nil { 519 t.Errorf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) 520 return 521 } 522 rangeAllocator, ok := allocator.(*rangeAllocator) 523 if !ok { 524 t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) 525 return 526 } 527 rangeAllocator.nodesSynced = test.AlwaysReady 528 rangeAllocator.recorder = testutil.NewFakeRecorder() 529 go allocator.Run(tCtx) 530 531 // this is a bit of white box testing 532 // pre allocate the cidrs as per the test 533 for idx, allocatedList := range tc.allocatedCIDRs { 534 for _, allocated := range allocatedList { 535 _, cidr, err := netutils.ParseCIDRSloppy(allocated) 536 if err != nil { 537 t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) 538 } 539 if err = rangeAllocator.cidrSets[idx].Occupy(cidr); err != nil { 540 t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err) 541 } 542 } 543 } 544 545 updateCount := 0 546 for _, node := range tc.fakeNodeHandler.Existing { 547 if node.Spec.PodCIDRs == nil { 548 updateCount++ 549 } 550 if err := allocator.AllocateOrOccupyCIDR(tCtx, node); err != nil { 551 t.Errorf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) 552 } 553 } 554 if updateCount != 1 { 555 t.Fatalf("test error: all tests must update exactly one node") 556 } 557 if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, updateCount, wait.ForeverTestTimeout); err != nil { 558 t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) 559 } 560 561 if len(tc.expectedAllocatedCIDR) == 0 { 562 // nothing further expected 563 return 564 } 565 for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { 566 if len(updatedNode.Spec.PodCIDRs) == 0 { 567 continue // not assigned yet 568 } 569 //match 570 for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR { 571 if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR { 572 t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) 573 break 574 } 575 } 576 } 577 } 578 579 // run the test cases 580 for _, tc := range testCases { 581 testFunc(tc) 582 } 583 } 584 585 func TestAllocateOrOccupyCIDRFailure(t *testing.T) { 586 testCases := []testCase{ 587 { 588 description: "When there's no ServiceCIDR return first CIDR in range", 589 fakeNodeHandler: &testutil.FakeNodeHandler{ 590 Existing: []*v1.Node{ 591 { 592 ObjectMeta: metav1.ObjectMeta{ 593 Name: "node0", 594 }, 595 }, 596 }, 597 Clientset: fake.NewSimpleClientset(), 598 }, 599 allocatorParams: CIDRAllocatorParams{ 600 ClusterCIDRs: func() []*net.IPNet { 601 _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28") 602 return []*net.IPNet{clusterCIDR} 603 }(), 604 ServiceCIDR: nil, 605 SecondaryServiceCIDR: nil, 606 NodeCIDRMaskSizes: []int{30}, 607 }, 608 allocatedCIDRs: map[int][]string{ 609 0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, 610 }, 611 }, 612 } 613 _, tCtx := ktesting.NewTestContext(t) 614 testFunc := func(tc testCase) { 615 // Initialize the range allocator. 616 allocator, err := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) 617 if err != nil { 618 t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) 619 } 620 rangeAllocator, ok := allocator.(*rangeAllocator) 621 if !ok { 622 t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) 623 return 624 } 625 rangeAllocator.nodesSynced = test.AlwaysReady 626 rangeAllocator.recorder = testutil.NewFakeRecorder() 627 go allocator.Run(tCtx) 628 629 // this is a bit of white box testing 630 for setIdx, allocatedList := range tc.allocatedCIDRs { 631 for _, allocated := range allocatedList { 632 _, cidr, err := netutils.ParseCIDRSloppy(allocated) 633 if err != nil { 634 t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, cidr, err) 635 } 636 err = rangeAllocator.cidrSets[setIdx].Occupy(cidr) 637 if err != nil { 638 t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, cidr, err) 639 } 640 } 641 } 642 if err := allocator.AllocateOrOccupyCIDR(tCtx, tc.fakeNodeHandler.Existing[0]); err == nil { 643 t.Errorf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err) 644 } 645 // We don't expect any updates, so just sleep for some time 646 time.Sleep(time.Second) 647 if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 { 648 t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy()) 649 } 650 if len(tc.expectedAllocatedCIDR) == 0 { 651 // nothing further expected 652 return 653 } 654 for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { 655 if len(updatedNode.Spec.PodCIDRs) == 0 { 656 continue // not assigned yet 657 } 658 //match 659 for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR { 660 if updatedNode.Spec.PodCIDRs[podCIDRIdx] == expectedPodCIDR { 661 t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) 662 break 663 } 664 } 665 } 666 } 667 for _, tc := range testCases { 668 testFunc(tc) 669 } 670 } 671 672 type releaseTestCase struct { 673 description string 674 fakeNodeHandler *testutil.FakeNodeHandler 675 allocatorParams CIDRAllocatorParams 676 expectedAllocatedCIDRFirstRound map[int]string 677 expectedAllocatedCIDRSecondRound map[int]string 678 allocatedCIDRs map[int][]string 679 cidrsToRelease [][]string 680 } 681 682 func TestReleaseCIDRSuccess(t *testing.T) { 683 // Non-parallel test (overrides global var) 684 oldNodePollInterval := nodePollInterval 685 nodePollInterval = test.NodePollInterval 686 defer func() { 687 nodePollInterval = oldNodePollInterval 688 }() 689 690 testCases := []releaseTestCase{ 691 { 692 description: "Correctly release preallocated CIDR", 693 fakeNodeHandler: &testutil.FakeNodeHandler{ 694 Existing: []*v1.Node{ 695 { 696 ObjectMeta: metav1.ObjectMeta{ 697 Name: "node0", 698 }, 699 }, 700 }, 701 Clientset: fake.NewSimpleClientset(), 702 }, 703 allocatorParams: CIDRAllocatorParams{ 704 ClusterCIDRs: func() []*net.IPNet { 705 _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28") 706 return []*net.IPNet{clusterCIDR} 707 }(), 708 ServiceCIDR: nil, 709 SecondaryServiceCIDR: nil, 710 NodeCIDRMaskSizes: []int{30}, 711 }, 712 allocatedCIDRs: map[int][]string{ 713 0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, 714 }, 715 expectedAllocatedCIDRFirstRound: nil, 716 cidrsToRelease: [][]string{ 717 {"127.123.234.4/30"}, 718 }, 719 expectedAllocatedCIDRSecondRound: map[int]string{ 720 0: "127.123.234.4/30", 721 }, 722 }, 723 { 724 description: "Correctly recycle CIDR", 725 fakeNodeHandler: &testutil.FakeNodeHandler{ 726 Existing: []*v1.Node{ 727 { 728 ObjectMeta: metav1.ObjectMeta{ 729 Name: "node0", 730 }, 731 }, 732 }, 733 Clientset: fake.NewSimpleClientset(), 734 }, 735 allocatorParams: CIDRAllocatorParams{ 736 ClusterCIDRs: func() []*net.IPNet { 737 _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28") 738 return []*net.IPNet{clusterCIDR} 739 }(), 740 ServiceCIDR: nil, 741 SecondaryServiceCIDR: nil, 742 NodeCIDRMaskSizes: []int{30}, 743 }, 744 allocatedCIDRs: map[int][]string{ 745 0: {"127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, 746 }, 747 expectedAllocatedCIDRFirstRound: map[int]string{ 748 0: "127.123.234.0/30", 749 }, 750 cidrsToRelease: [][]string{ 751 {"127.123.234.0/30"}, 752 }, 753 expectedAllocatedCIDRSecondRound: map[int]string{ 754 0: "127.123.234.0/30", 755 }, 756 }, 757 } 758 logger, tCtx := ktesting.NewTestContext(t) 759 testFunc := func(tc releaseTestCase) { 760 // Initialize the range allocator. 761 allocator, _ := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) 762 rangeAllocator, ok := allocator.(*rangeAllocator) 763 if !ok { 764 t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) 765 return 766 } 767 rangeAllocator.nodesSynced = test.AlwaysReady 768 rangeAllocator.recorder = testutil.NewFakeRecorder() 769 go allocator.Run(tCtx) 770 771 // this is a bit of white box testing 772 for setIdx, allocatedList := range tc.allocatedCIDRs { 773 for _, allocated := range allocatedList { 774 _, cidr, err := netutils.ParseCIDRSloppy(allocated) 775 if err != nil { 776 t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) 777 } 778 err = rangeAllocator.cidrSets[setIdx].Occupy(cidr) 779 if err != nil { 780 t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err) 781 } 782 } 783 } 784 785 err := allocator.AllocateOrOccupyCIDR(tCtx, tc.fakeNodeHandler.Existing[0]) 786 if len(tc.expectedAllocatedCIDRFirstRound) != 0 { 787 if err != nil { 788 t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) 789 } 790 if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { 791 t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) 792 } 793 } else { 794 if err == nil { 795 t.Fatalf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err) 796 } 797 // We don't expect any updates here 798 time.Sleep(time.Second) 799 if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 { 800 t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy()) 801 } 802 } 803 for _, cidrToRelease := range tc.cidrsToRelease { 804 nodeToRelease := v1.Node{ 805 ObjectMeta: metav1.ObjectMeta{ 806 Name: "node0", 807 }, 808 } 809 nodeToRelease.Spec.PodCIDRs = cidrToRelease 810 err = allocator.ReleaseCIDR(logger, &nodeToRelease) 811 if err != nil { 812 t.Fatalf("%v: unexpected error in ReleaseCIDR: %v", tc.description, err) 813 } 814 } 815 if err = allocator.AllocateOrOccupyCIDR(tCtx, tc.fakeNodeHandler.Existing[0]); err != nil { 816 t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) 817 } 818 if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { 819 t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) 820 } 821 822 if len(tc.expectedAllocatedCIDRSecondRound) == 0 { 823 // nothing further expected 824 return 825 } 826 for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { 827 if len(updatedNode.Spec.PodCIDRs) == 0 { 828 continue // not assigned yet 829 } 830 //match 831 for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDRSecondRound { 832 if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR { 833 t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) 834 break 835 } 836 } 837 } 838 } 839 840 for _, tc := range testCases { 841 testFunc(tc) 842 } 843 }