sigs.k8s.io/cluster-api@v1.7.1/controlplane/kubeadm/internal/workload_cluster_test.go (about) 1 /* 2 Copyright 2020 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package internal 18 19 import ( 20 "context" 21 "errors" 22 "testing" 23 24 "github.com/blang/semver/v4" 25 "github.com/google/go-cmp/cmp" 26 . "github.com/onsi/gomega" 27 appsv1 "k8s.io/api/apps/v1" 28 corev1 "k8s.io/api/core/v1" 29 apierrors "k8s.io/apimachinery/pkg/api/errors" 30 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 31 "sigs.k8s.io/controller-runtime/pkg/client" 32 "sigs.k8s.io/controller-runtime/pkg/client/fake" 33 "sigs.k8s.io/yaml" 34 35 clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 36 bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" 37 controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" 38 "sigs.k8s.io/cluster-api/util/version" 39 utilyaml "sigs.k8s.io/cluster-api/util/yaml" 40 ) 41 42 func TestGetControlPlaneNodes(t *testing.T) { 43 tests := []struct { 44 name string 45 nodes []corev1.Node 46 expectedNodes []string 47 }{ 48 { 49 name: "Return control plane nodes", 50 nodes: []corev1.Node{ 51 { 52 ObjectMeta: metav1.ObjectMeta{ 53 Name: "control-plane-node-with-old-label", 54 Labels: map[string]string{ 55 labelNodeRoleOldControlPlane: "", 56 }, 57 }, 58 }, 59 { 60 ObjectMeta: metav1.ObjectMeta{ 61 Name: "control-plane-node-with-both-labels", 62 Labels: map[string]string{ 63 labelNodeRoleOldControlPlane: "", 64 labelNodeRoleControlPlane: "", 65 }, 66 }, 67 }, 68 { 69 ObjectMeta: metav1.ObjectMeta{ 70 Name: "control-plane-node-with-new-label", 71 Labels: map[string]string{ 72 labelNodeRoleControlPlane: "", 73 }, 74 }, 75 }, 76 { 77 ObjectMeta: metav1.ObjectMeta{ 78 Name: "worker-node", 79 Labels: map[string]string{}, 80 }, 81 }, 82 }, 83 expectedNodes: []string{ 84 "control-plane-node-with-both-labels", 85 "control-plane-node-with-old-label", 86 "control-plane-node-with-new-label", 87 }, 88 }, 89 } 90 91 for _, tt := range tests { 92 t.Run(tt.name, func(t *testing.T) { 93 g := NewWithT(t) 94 objs := []client.Object{} 95 for i := range tt.nodes { 96 objs = append(objs, &tt.nodes[i]) 97 } 98 fakeClient := fake.NewClientBuilder().WithObjects(objs...).Build() 99 100 w := &Workload{ 101 Client: fakeClient, 102 } 103 nodes, err := w.getControlPlaneNodes(ctx) 104 g.Expect(err).ToNot(HaveOccurred()) 105 var actualNodes []string 106 for _, n := range nodes.Items { 107 actualNodes = append(actualNodes, n.Name) 108 } 109 g.Expect(actualNodes).To(Equal(tt.expectedNodes)) 110 }) 111 } 112 } 113 114 func TestUpdateKubeProxyImageInfo(t *testing.T) { 115 tests := []struct { 116 name string 117 ds appsv1.DaemonSet 118 expectErr bool 119 expectImage string 120 clientGet map[string]interface{} 121 patchErr error 122 KCP *controlplanev1.KubeadmControlPlane 123 }{ 124 { 125 name: "succeeds if patch correctly", 126 ds: newKubeProxyDS(), 127 expectErr: false, 128 expectImage: "k8s.gcr.io/kube-proxy:v1.16.3", 129 KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{Version: "v1.16.3"}}, 130 }, 131 { 132 name: "returns error if image in kube-proxy ds was in digest format", 133 ds: newKubeProxyDSWithImage("k8s.gcr.io/kube-proxy@sha256:47bfd"), 134 expectErr: true, 135 expectImage: "k8s.gcr.io/kube-proxy@sha256:47bfd", 136 KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{Version: "v1.16.3"}}, 137 }, 138 { 139 name: "expects OCI compatible format of tag", 140 ds: newKubeProxyDS(), 141 expectErr: false, 142 expectImage: "k8s.gcr.io/kube-proxy:v1.16.3_build1", 143 KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{Version: "v1.16.3+build1"}}, 144 }, 145 { 146 name: "returns error if image in kube-proxy ds was in wrong format", 147 ds: newKubeProxyDSWithImage(""), 148 expectErr: true, 149 KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{Version: "v1.16.3"}}, 150 }, 151 { 152 name: "updates image repository if one has been set on the control plane", 153 ds: newKubeProxyDS(), 154 expectErr: false, 155 expectImage: "foo.bar.example/baz/qux/kube-proxy:v1.16.3", 156 KCP: &controlplanev1.KubeadmControlPlane{ 157 Spec: controlplanev1.KubeadmControlPlaneSpec{ 158 Version: "v1.16.3", 159 KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ 160 ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ 161 ImageRepository: "foo.bar.example/baz/qux", 162 }, 163 }, 164 }}, 165 }, 166 { 167 name: "does not update image repository if it is blank", 168 ds: newKubeProxyDS(), 169 expectErr: false, 170 expectImage: "k8s.gcr.io/kube-proxy:v1.16.3", 171 KCP: &controlplanev1.KubeadmControlPlane{ 172 Spec: controlplanev1.KubeadmControlPlaneSpec{ 173 Version: "v1.16.3", 174 KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ 175 ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ 176 ImageRepository: "", 177 }, 178 }, 179 }}, 180 }, 181 { 182 name: "does update image repository to new default registry for v1.25 updates", 183 ds: newKubeProxyDSWithImage("k8s.gcr.io/kube-proxy:v1.24.0"), 184 expectErr: false, 185 expectImage: "registry.k8s.io/kube-proxy:v1.25.0-alpha.1", 186 KCP: &controlplanev1.KubeadmControlPlane{ 187 Spec: controlplanev1.KubeadmControlPlaneSpec{ 188 Version: "v1.25.0-alpha.1", 189 }}, 190 }, 191 { 192 name: "returns error if image repository is invalid", 193 ds: newKubeProxyDS(), 194 expectErr: true, 195 KCP: &controlplanev1.KubeadmControlPlane{ 196 Spec: controlplanev1.KubeadmControlPlaneSpec{ 197 Version: "v1.16.3", 198 KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ 199 ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ 200 ImageRepository: "%%%", 201 }, 202 }, 203 }}, 204 }, 205 { 206 name: "does not update image repository when no kube-proxy update is requested", 207 ds: newKubeProxyDSWithImage(""), // Using the same image name that would otherwise lead to an error 208 expectErr: false, 209 expectImage: "", 210 KCP: &controlplanev1.KubeadmControlPlane{ 211 ObjectMeta: metav1.ObjectMeta{ 212 Annotations: map[string]string{ 213 controlplanev1.SkipKubeProxyAnnotation: "", 214 }, 215 }, 216 Spec: controlplanev1.KubeadmControlPlaneSpec{ 217 Version: "v1.16.3", 218 }}, 219 }, 220 } 221 222 for i := range tests { 223 tt := tests[i] 224 t.Run(tt.name, func(t *testing.T) { 225 gs := NewWithT(t) 226 227 objects := []client.Object{ 228 &tt.ds, 229 } 230 fakeClient := fake.NewClientBuilder().WithObjects(objects...).Build() 231 w := &Workload{ 232 Client: fakeClient, 233 } 234 kubernetesVersion, err := version.ParseMajorMinorPatchTolerant(tt.KCP.Spec.Version) 235 gs.Expect(err).ToNot(HaveOccurred()) 236 err = w.UpdateKubeProxyImageInfo(ctx, tt.KCP, kubernetesVersion) 237 if tt.expectErr { 238 gs.Expect(err).To(HaveOccurred()) 239 } else { 240 gs.Expect(err).ToNot(HaveOccurred()) 241 } 242 243 proxyImage, err := getProxyImageInfo(ctx, w.Client) 244 gs.Expect(err).ToNot(HaveOccurred()) 245 if tt.expectImage != "" { 246 gs.Expect(proxyImage).To(Equal(tt.expectImage)) 247 } 248 }) 249 } 250 } 251 252 func TestRemoveMachineFromKubeadmConfigMap(t *testing.T) { 253 machine := &clusterv1.Machine{ 254 Status: clusterv1.MachineStatus{ 255 NodeRef: &corev1.ObjectReference{ 256 Name: "ip-10-0-0-1.ec2.internal", 257 }, 258 }, 259 } 260 kubeadmConfig := &corev1.ConfigMap{ 261 ObjectMeta: metav1.ObjectMeta{ 262 Name: kubeadmConfigKey, 263 Namespace: metav1.NamespaceSystem, 264 }, 265 Data: map[string]string{ 266 clusterStatusKey: utilyaml.Raw(` 267 apiEndpoints: 268 ip-10-0-0-1.ec2.internal: 269 advertiseAddress: 10.0.0.1 270 bindPort: 6443 271 ip-10-0-0-2.ec2.internal: 272 advertiseAddress: 10.0.0.2 273 bindPort: 6443 274 apiVersion: kubeadm.k8s.io/v1beta2 275 kind: ClusterStatus 276 `), 277 }, 278 BinaryData: map[string][]byte{ 279 "": nil, 280 }, 281 } 282 kubeadmConfigWithoutClusterStatus := kubeadmConfig.DeepCopy() 283 delete(kubeadmConfigWithoutClusterStatus.Data, clusterStatusKey) 284 285 tests := []struct { 286 name string 287 kubernetesVersion semver.Version 288 machine *clusterv1.Machine 289 objs []client.Object 290 expectErr bool 291 expectedEndpoints string 292 }{ 293 { 294 name: "does not panic if machine is nil", 295 expectErr: false, 296 }, 297 { 298 name: "does not panic if machine noderef is nil", 299 machine: &clusterv1.Machine{ 300 Status: clusterv1.MachineStatus{ 301 NodeRef: nil, 302 }, 303 }, 304 expectErr: false, 305 }, 306 { 307 name: "returns error if unable to find kubeadm-config", 308 machine: machine, 309 expectErr: true, 310 }, 311 { 312 name: "returns error if unable to find kubeadm-config for Kubernetes version < 1.22.0", 313 kubernetesVersion: semver.MustParse("1.19.1"), 314 machine: machine, 315 objs: []client.Object{kubeadmConfigWithoutClusterStatus}, 316 expectErr: true, 317 }, 318 { 319 name: "returns error if unable to remove api endpoint for Kubernetes version < 1.22.0", 320 kubernetesVersion: semver.MustParse("1.19.1"), // Kubernetes version < 1.22.0 has ClusterStatus 321 machine: machine, 322 objs: []client.Object{kubeadmConfigWithoutClusterStatus}, 323 expectErr: true, 324 }, 325 { 326 name: "removes the machine node ref from kubeadm config for Kubernetes version < 1.22.0", 327 kubernetesVersion: semver.MustParse("1.19.1"), // Kubernetes version < 1.22.0 has ClusterStatus 328 machine: machine, 329 objs: []client.Object{kubeadmConfig}, 330 expectErr: false, 331 expectedEndpoints: utilyaml.Raw(` 332 apiEndpoints: 333 ip-10-0-0-2.ec2.internal: 334 advertiseAddress: 10.0.0.2 335 bindPort: 6443 336 apiVersion: kubeadm.k8s.io/v1beta2 337 kind: ClusterStatus 338 `), 339 }, 340 { 341 name: "no op for Kubernetes version 1.22.0 alpha", 342 kubernetesVersion: semver.MustParse("1.22.0-alpha.0.734+ba502ee555924a"), // Kubernetes version >= 1.22.0 should not manage ClusterStatus 343 machine: machine, 344 objs: []client.Object{kubeadmConfigWithoutClusterStatus}, 345 expectErr: false, 346 }, 347 { 348 name: "no op for Kubernetes version >= 1.22.0", 349 kubernetesVersion: minKubernetesVersionWithoutClusterStatus, // Kubernetes version >= 1.22.0 should not manage ClusterStatus 350 machine: machine, 351 objs: []client.Object{kubeadmConfigWithoutClusterStatus}, 352 expectErr: false, 353 }, 354 } 355 356 for _, tt := range tests { 357 t.Run(tt.name, func(t *testing.T) { 358 g := NewWithT(t) 359 fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build() 360 w := &Workload{ 361 Client: fakeClient, 362 } 363 err := w.RemoveMachineFromKubeadmConfigMap(ctx, tt.machine, tt.kubernetesVersion) 364 if tt.expectErr { 365 g.Expect(err).To(HaveOccurred()) 366 return 367 } 368 g.Expect(err).ToNot(HaveOccurred()) 369 if tt.expectedEndpoints != "" { 370 var actualConfig corev1.ConfigMap 371 g.Expect(w.Client.Get( 372 ctx, 373 client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, 374 &actualConfig, 375 )).To(Succeed()) 376 g.Expect(actualConfig.Data[clusterStatusKey]).To(Equal(tt.expectedEndpoints), cmp.Diff(tt.expectedEndpoints, actualConfig.Data[clusterStatusKey])) 377 } 378 }) 379 } 380 } 381 382 func TestUpdateKubeletConfigMap(t *testing.T) { 383 tests := []struct { 384 name string 385 version semver.Version 386 objs []client.Object 387 expectErr bool 388 expectCgroupDriver string 389 expectNewConfigMap bool 390 }{ 391 { 392 name: "create new config map for 1.19 --> 1.20 (anything < 1.24); config map for previous version is copied", 393 version: semver.Version{Major: 1, Minor: 20}, 394 objs: []client.Object{&corev1.ConfigMap{ 395 ObjectMeta: metav1.ObjectMeta{ 396 Name: "kubelet-config-1.19", 397 Namespace: metav1.NamespaceSystem, 398 ResourceVersion: "some-resource-version", 399 }, 400 Data: map[string]string{ 401 kubeletConfigKey: utilyaml.Raw(` 402 apiVersion: kubelet.config.k8s.io/v1beta1 403 kind: KubeletConfiguration 404 foo: bar 405 `), 406 }, 407 }}, 408 expectNewConfigMap: true, 409 }, 410 { 411 name: "create new config map 1.23 --> 1.24; config map for previous version is copied", 412 version: semver.Version{Major: 1, Minor: 24}, 413 objs: []client.Object{&corev1.ConfigMap{ 414 ObjectMeta: metav1.ObjectMeta{ 415 Name: "kubelet-config-1.23", 416 Namespace: metav1.NamespaceSystem, 417 ResourceVersion: "some-resource-version", 418 }, 419 Data: map[string]string{ 420 kubeletConfigKey: utilyaml.Raw(` 421 apiVersion: kubelet.config.k8s.io/v1beta1 422 kind: KubeletConfiguration 423 foo: bar 424 `), 425 }, 426 }}, 427 expectNewConfigMap: true, 428 }, 429 { 430 name: "create new config map >=1.24 --> next; no op", 431 version: semver.Version{Major: 1, Minor: 25}, 432 objs: []client.Object{&corev1.ConfigMap{ 433 ObjectMeta: metav1.ObjectMeta{ 434 Name: "kubelet-config", 435 Namespace: metav1.NamespaceSystem, 436 ResourceVersion: "some-resource-version", 437 }, 438 Data: map[string]string{ 439 kubeletConfigKey: utilyaml.Raw(` 440 apiVersion: kubelet.config.k8s.io/v1beta1 441 kind: KubeletConfiguration 442 foo: bar 443 `), 444 }, 445 }}, 446 }, 447 { 448 name: "1.20 --> 1.21 sets the cgroupDriver if empty", 449 version: semver.Version{Major: 1, Minor: 21}, 450 objs: []client.Object{&corev1.ConfigMap{ 451 ObjectMeta: metav1.ObjectMeta{ 452 Name: "kubelet-config-1.20", 453 Namespace: metav1.NamespaceSystem, 454 ResourceVersion: "some-resource-version", 455 }, 456 Data: map[string]string{ 457 kubeletConfigKey: utilyaml.Raw(` 458 apiVersion: kubelet.config.k8s.io/v1beta1 459 kind: KubeletConfiguration 460 foo: bar 461 `), 462 }, 463 }}, 464 expectCgroupDriver: "systemd", 465 expectNewConfigMap: true, 466 }, 467 { 468 name: "1.20 --> 1.21 preserves cgroupDriver if already set", 469 version: semver.Version{Major: 1, Minor: 21}, 470 objs: []client.Object{&corev1.ConfigMap{ 471 ObjectMeta: metav1.ObjectMeta{ 472 Name: "kubelet-config-1.20", 473 Namespace: metav1.NamespaceSystem, 474 ResourceVersion: "some-resource-version", 475 }, 476 Data: map[string]string{ 477 kubeletConfigKey: utilyaml.Raw(` 478 apiVersion: kubelet.config.k8s.io/v1beta1 479 kind: KubeletConfiguration 480 cgroupDriver: cgroupfs 481 foo: bar 482 `), 483 }, 484 }}, 485 expectCgroupDriver: "cgroupfs", 486 expectNewConfigMap: true, 487 }, 488 { 489 name: "returns error if cannot find previous config map", 490 version: semver.Version{Major: 1, Minor: 21}, 491 expectErr: true, 492 }, 493 } 494 495 for _, tt := range tests { 496 t.Run(tt.name, func(t *testing.T) { 497 g := NewWithT(t) 498 fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build() 499 w := &Workload{ 500 Client: fakeClient, 501 } 502 err := w.UpdateKubeletConfigMap(ctx, tt.version) 503 if tt.expectErr { 504 g.Expect(err).To(HaveOccurred()) 505 return 506 } 507 g.Expect(err).ToNot(HaveOccurred()) 508 509 // Check if the resulting ConfigMap exists 510 var actualConfig corev1.ConfigMap 511 g.Expect(w.Client.Get( 512 ctx, 513 client.ObjectKey{Name: generateKubeletConfigName(tt.version), Namespace: metav1.NamespaceSystem}, 514 &actualConfig, 515 )).To(Succeed()) 516 // Check other values are carried over for previous config map 517 g.Expect(actualConfig.Data[kubeletConfigKey]).To(ContainSubstring("foo")) 518 // Check the cgroupvalue has the expected value 519 g.Expect(actualConfig.Data[kubeletConfigKey]).To(ContainSubstring(tt.expectCgroupDriver)) 520 // check if the config map is new 521 if tt.expectNewConfigMap { 522 g.Expect(actualConfig.ResourceVersion).ToNot(Equal("some-resource-version")) 523 } else { 524 g.Expect(actualConfig.ResourceVersion).To(Equal("some-resource-version")) 525 } 526 }) 527 } 528 } 529 530 func TestUpdateUpdateClusterConfigurationInKubeadmConfigMap(t *testing.T) { 531 tests := []struct { 532 name string 533 version semver.Version 534 objs []client.Object 535 mutator func(*bootstrapv1.ClusterConfiguration) 536 wantConfigMap *corev1.ConfigMap 537 wantErr bool 538 }{ 539 { 540 name: "fails if missing config map", 541 version: semver.MustParse("1.17.2"), 542 objs: nil, 543 wantErr: true, 544 }, 545 { 546 name: "fail if config map without ClusterConfiguration data", 547 version: semver.MustParse("1.17.2"), 548 objs: []client.Object{&corev1.ConfigMap{ 549 ObjectMeta: metav1.ObjectMeta{ 550 Name: kubeadmConfigKey, 551 Namespace: metav1.NamespaceSystem, 552 }, 553 Data: map[string]string{}, 554 }}, 555 wantErr: true, 556 }, 557 { 558 name: "fail if config map with invalid ClusterConfiguration data", 559 version: semver.MustParse("1.17.2"), 560 objs: []client.Object{&corev1.ConfigMap{ 561 ObjectMeta: metav1.ObjectMeta{ 562 Name: kubeadmConfigKey, 563 Namespace: metav1.NamespaceSystem, 564 }, 565 Data: map[string]string{ 566 clusterConfigurationKey: "foo", 567 }, 568 }}, 569 wantErr: true, 570 }, 571 { 572 name: "no op if mutator does not apply changes", 573 version: semver.MustParse("1.17.2"), 574 objs: []client.Object{&corev1.ConfigMap{ 575 ObjectMeta: metav1.ObjectMeta{ 576 Name: kubeadmConfigKey, 577 Namespace: metav1.NamespaceSystem, 578 }, 579 Data: map[string]string{ 580 clusterConfigurationKey: utilyaml.Raw(` 581 apiVersion: kubeadm.k8s.io/v1beta2 582 kind: ClusterConfiguration 583 kubernetesVersion: v1.16.1 584 `), 585 }, 586 }}, 587 mutator: func(*bootstrapv1.ClusterConfiguration) {}, 588 wantConfigMap: &corev1.ConfigMap{ 589 ObjectMeta: metav1.ObjectMeta{ 590 Name: kubeadmConfigKey, 591 Namespace: metav1.NamespaceSystem, 592 }, 593 Data: map[string]string{ 594 clusterConfigurationKey: utilyaml.Raw(` 595 apiVersion: kubeadm.k8s.io/v1beta2 596 kind: ClusterConfiguration 597 kubernetesVersion: v1.16.1 598 `), 599 }, 600 }, 601 }, 602 { 603 name: "apply changes", 604 version: semver.MustParse("1.17.2"), 605 objs: []client.Object{&corev1.ConfigMap{ 606 ObjectMeta: metav1.ObjectMeta{ 607 Name: kubeadmConfigKey, 608 Namespace: metav1.NamespaceSystem, 609 }, 610 Data: map[string]string{ 611 clusterConfigurationKey: utilyaml.Raw(` 612 apiVersion: kubeadm.k8s.io/v1beta2 613 kind: ClusterConfiguration 614 kubernetesVersion: v1.16.1 615 `), 616 }, 617 }}, 618 mutator: func(c *bootstrapv1.ClusterConfiguration) { 619 c.KubernetesVersion = "v1.17.2" 620 }, 621 wantConfigMap: &corev1.ConfigMap{ 622 ObjectMeta: metav1.ObjectMeta{ 623 Name: kubeadmConfigKey, 624 Namespace: metav1.NamespaceSystem, 625 }, 626 Data: map[string]string{ 627 clusterConfigurationKey: utilyaml.Raw(` 628 apiServer: {} 629 apiVersion: kubeadm.k8s.io/v1beta2 630 controllerManager: {} 631 dns: {} 632 etcd: {} 633 kind: ClusterConfiguration 634 kubernetesVersion: v1.17.2 635 networking: {} 636 scheduler: {} 637 `), 638 }, 639 }, 640 }, 641 { 642 name: "converts kubeadm api version during mutation if required", 643 version: semver.MustParse("1.28.0"), 644 objs: []client.Object{&corev1.ConfigMap{ 645 ObjectMeta: metav1.ObjectMeta{ 646 Name: kubeadmConfigKey, 647 Namespace: metav1.NamespaceSystem, 648 }, 649 Data: map[string]string{ 650 clusterConfigurationKey: utilyaml.Raw(` 651 apiVersion: kubeadm.k8s.io/v1beta2 652 kind: ClusterConfiguration 653 kubernetesVersion: v1.16.1 654 `), 655 }, 656 }}, 657 mutator: func(c *bootstrapv1.ClusterConfiguration) { 658 c.KubernetesVersion = "v1.28.0" 659 }, 660 wantConfigMap: &corev1.ConfigMap{ 661 ObjectMeta: metav1.ObjectMeta{ 662 Name: kubeadmConfigKey, 663 Namespace: metav1.NamespaceSystem, 664 }, 665 Data: map[string]string{ 666 clusterConfigurationKey: utilyaml.Raw(` 667 apiServer: {} 668 apiVersion: kubeadm.k8s.io/v1beta3 669 controllerManager: {} 670 dns: {} 671 etcd: {} 672 kind: ClusterConfiguration 673 kubernetesVersion: v1.28.0 674 networking: {} 675 scheduler: {} 676 `), 677 }, 678 }, 679 }, 680 } 681 682 for _, tt := range tests { 683 t.Run(tt.name, func(t *testing.T) { 684 g := NewWithT(t) 685 fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build() 686 687 w := &Workload{ 688 Client: fakeClient, 689 } 690 err := w.UpdateClusterConfiguration(ctx, tt.version, tt.mutator) 691 if tt.wantErr { 692 g.Expect(err).To(HaveOccurred()) 693 return 694 } 695 g.Expect(err).ToNot(HaveOccurred()) 696 697 var actualConfig corev1.ConfigMap 698 g.Expect(w.Client.Get( 699 ctx, 700 client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, 701 &actualConfig, 702 )).To(Succeed()) 703 g.Expect(actualConfig.Data[clusterConfigurationKey]).To(Equal(tt.wantConfigMap.Data[clusterConfigurationKey]), cmp.Diff(tt.wantConfigMap.Data[clusterConfigurationKey], actualConfig.Data[clusterConfigurationKey])) 704 }) 705 } 706 } 707 708 func TestUpdateUpdateClusterStatusInKubeadmConfigMap(t *testing.T) { 709 tests := []struct { 710 name string 711 version semver.Version 712 objs []client.Object 713 mutator func(status *bootstrapv1.ClusterStatus) 714 wantConfigMap *corev1.ConfigMap 715 wantErr bool 716 }{ 717 { 718 name: "fails if missing config map", 719 version: semver.MustParse("1.17.2"), 720 objs: nil, 721 wantErr: true, 722 }, 723 { 724 name: "fail if config map without ClusterStatus data", 725 version: semver.MustParse("1.17.2"), 726 objs: []client.Object{&corev1.ConfigMap{ 727 ObjectMeta: metav1.ObjectMeta{ 728 Name: kubeadmConfigKey, 729 Namespace: metav1.NamespaceSystem, 730 }, 731 Data: map[string]string{}, 732 }}, 733 wantErr: true, 734 }, 735 { 736 name: "fail if config map with invalid ClusterStatus data", 737 version: semver.MustParse("1.17.2"), 738 objs: []client.Object{&corev1.ConfigMap{ 739 ObjectMeta: metav1.ObjectMeta{ 740 Name: kubeadmConfigKey, 741 Namespace: metav1.NamespaceSystem, 742 }, 743 Data: map[string]string{ 744 clusterStatusKey: "foo", 745 }, 746 }}, 747 wantErr: true, 748 }, 749 { 750 name: "no op if mutator does not apply changes", 751 version: semver.MustParse("1.17.2"), 752 objs: []client.Object{&corev1.ConfigMap{ 753 ObjectMeta: metav1.ObjectMeta{ 754 Name: kubeadmConfigKey, 755 Namespace: metav1.NamespaceSystem, 756 }, 757 Data: map[string]string{ 758 clusterStatusKey: utilyaml.Raw(` 759 apiEndpoints: 760 ip-10-0-0-1.ec2.internal: 761 advertiseAddress: 10.0.0.1 762 bindPort: 6443 763 apiVersion: kubeadm.k8s.io/v1beta2 764 kind: ClusterStatus 765 `), 766 }, 767 }}, 768 mutator: func(*bootstrapv1.ClusterStatus) {}, 769 wantConfigMap: &corev1.ConfigMap{ 770 ObjectMeta: metav1.ObjectMeta{ 771 Name: kubeadmConfigKey, 772 Namespace: metav1.NamespaceSystem, 773 }, 774 Data: map[string]string{ 775 clusterStatusKey: utilyaml.Raw(` 776 apiEndpoints: 777 ip-10-0-0-1.ec2.internal: 778 advertiseAddress: 10.0.0.1 779 bindPort: 6443 780 apiVersion: kubeadm.k8s.io/v1beta2 781 kind: ClusterStatus 782 `), 783 }, 784 }, 785 }, 786 { 787 name: "apply changes", 788 version: semver.MustParse("1.17.2"), 789 objs: []client.Object{&corev1.ConfigMap{ 790 ObjectMeta: metav1.ObjectMeta{ 791 Name: kubeadmConfigKey, 792 Namespace: metav1.NamespaceSystem, 793 }, 794 Data: map[string]string{ 795 clusterStatusKey: utilyaml.Raw(` 796 apiEndpoints: 797 ip-10-0-0-1.ec2.internal: 798 advertiseAddress: 10.0.0.1 799 bindPort: 6443 800 apiVersion: kubeadm.k8s.io/v1beta2 801 kind: ClusterStatus 802 `), 803 }, 804 }}, 805 mutator: func(status *bootstrapv1.ClusterStatus) { 806 status.APIEndpoints["ip-10-0-0-2.ec2.internal"] = bootstrapv1.APIEndpoint{} 807 }, 808 wantConfigMap: &corev1.ConfigMap{ 809 ObjectMeta: metav1.ObjectMeta{ 810 Name: kubeadmConfigKey, 811 Namespace: metav1.NamespaceSystem, 812 }, 813 Data: map[string]string{ 814 clusterStatusKey: utilyaml.Raw(` 815 apiEndpoints: 816 ip-10-0-0-1.ec2.internal: 817 advertiseAddress: 10.0.0.1 818 bindPort: 6443 819 ip-10-0-0-2.ec2.internal: {} 820 apiVersion: kubeadm.k8s.io/v1beta2 821 kind: ClusterStatus 822 `), 823 }, 824 }, 825 }, 826 } 827 828 for _, tt := range tests { 829 t.Run(tt.name, func(t *testing.T) { 830 g := NewWithT(t) 831 fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build() 832 833 w := &Workload{ 834 Client: fakeClient, 835 } 836 err := w.updateClusterStatus(ctx, tt.mutator, tt.version) 837 if tt.wantErr { 838 g.Expect(err).To(HaveOccurred()) 839 return 840 } 841 g.Expect(err).ToNot(HaveOccurred()) 842 843 var actualConfig corev1.ConfigMap 844 g.Expect(w.Client.Get( 845 ctx, 846 client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, 847 &actualConfig, 848 )).To(Succeed()) 849 g.Expect(actualConfig.Data[clusterStatusKey]).To(Equal(tt.wantConfigMap.Data[clusterStatusKey]), cmp.Diff(tt.wantConfigMap.Data[clusterStatusKey], actualConfig.Data[clusterStatusKey])) 850 }) 851 } 852 } 853 854 func TestUpdateKubernetesVersionInKubeadmConfigMap(t *testing.T) { 855 tests := []struct { 856 name string 857 version semver.Version 858 clusterConfigurationData string 859 }{ 860 { 861 name: "updates the config map and changes the kubeadm API version", 862 version: semver.MustParse("1.17.2"), 863 clusterConfigurationData: utilyaml.Raw(` 864 apiVersion: kubeadm.k8s.io/v1beta2 865 kind: ClusterConfiguration 866 kubernetesVersion: v1.16.1`), 867 }, 868 } 869 870 for _, tt := range tests { 871 t.Run(tt.name, func(t *testing.T) { 872 g := NewWithT(t) 873 fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ 874 ObjectMeta: metav1.ObjectMeta{ 875 Name: kubeadmConfigKey, 876 Namespace: metav1.NamespaceSystem, 877 }, 878 Data: map[string]string{ 879 clusterConfigurationKey: tt.clusterConfigurationData, 880 }, 881 }).Build() 882 883 w := &Workload{ 884 Client: fakeClient, 885 } 886 887 err := w.UpdateClusterConfiguration(ctx, tt.version, w.UpdateKubernetesVersionInKubeadmConfigMap(tt.version)) 888 g.Expect(err).ToNot(HaveOccurred()) 889 890 var actualConfig corev1.ConfigMap 891 g.Expect(w.Client.Get( 892 ctx, 893 client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, 894 &actualConfig, 895 )).To(Succeed()) 896 g.Expect(actualConfig.Data[clusterConfigurationKey]).To(ContainSubstring(tt.version.String())) 897 }) 898 } 899 } 900 901 func TestUpdateImageRepositoryInKubeadmConfigMap(t *testing.T) { 902 tests := []struct { 903 name string 904 clusterConfigurationData string 905 newImageRepository string 906 wantImageRepository string 907 }{ 908 { 909 name: "it should set the image repository", 910 clusterConfigurationData: utilyaml.Raw(` 911 apiVersion: kubeadm.k8s.io/v1beta2 912 kind: ClusterConfiguration`), 913 newImageRepository: "example.com/k8s", 914 wantImageRepository: "example.com/k8s", 915 }, 916 { 917 name: "it should preserve the existing image repository if then new value is empty", 918 clusterConfigurationData: utilyaml.Raw(` 919 apiVersion: kubeadm.k8s.io/v1beta2 920 kind: ClusterConfiguration 921 imageRepository: foo.bar/baz.io`), 922 newImageRepository: "", 923 wantImageRepository: "foo.bar/baz.io", 924 }, 925 } 926 927 for _, tt := range tests { 928 t.Run(tt.name, func(t *testing.T) { 929 g := NewWithT(t) 930 fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ 931 ObjectMeta: metav1.ObjectMeta{ 932 Name: kubeadmConfigKey, 933 Namespace: metav1.NamespaceSystem, 934 }, 935 Data: map[string]string{ 936 clusterConfigurationKey: tt.clusterConfigurationData, 937 }, 938 }).Build() 939 940 w := &Workload{ 941 Client: fakeClient, 942 } 943 err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.UpdateImageRepositoryInKubeadmConfigMap(tt.newImageRepository)) 944 g.Expect(err).ToNot(HaveOccurred()) 945 946 var actualConfig corev1.ConfigMap 947 g.Expect(w.Client.Get( 948 ctx, 949 client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, 950 &actualConfig, 951 )).To(Succeed()) 952 g.Expect(actualConfig.Data[clusterConfigurationKey]).To(ContainSubstring(tt.wantImageRepository)) 953 }) 954 } 955 } 956 957 func TestUpdateApiServerInKubeadmConfigMap(t *testing.T) { 958 tests := []struct { 959 name string 960 clusterConfigurationData string 961 newAPIServer bootstrapv1.APIServer 962 wantClusterConfiguration string 963 }{ 964 { 965 name: "it should set the api server config", 966 clusterConfigurationData: utilyaml.Raw(` 967 apiVersion: kubeadm.k8s.io/v1beta2 968 kind: ClusterConfiguration 969 `), 970 newAPIServer: bootstrapv1.APIServer{ 971 ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{ 972 ExtraArgs: map[string]string{ 973 "bar": "baz", 974 "someKey": "someVal", 975 }, 976 ExtraVolumes: []bootstrapv1.HostPathMount{ 977 { 978 Name: "mount2", 979 HostPath: "/bar/baz", 980 MountPath: "/foo/bar", 981 }, 982 }, 983 }, 984 }, 985 wantClusterConfiguration: utilyaml.Raw(` 986 apiServer: 987 extraArgs: 988 bar: baz 989 someKey: someVal 990 extraVolumes: 991 - hostPath: /bar/baz 992 mountPath: /foo/bar 993 name: mount2 994 apiVersion: kubeadm.k8s.io/v1beta2 995 controllerManager: {} 996 dns: {} 997 etcd: {} 998 kind: ClusterConfiguration 999 networking: {} 1000 scheduler: {} 1001 `), 1002 }, 1003 } 1004 1005 for _, tt := range tests { 1006 t.Run(tt.name, func(t *testing.T) { 1007 g := NewWithT(t) 1008 fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ 1009 ObjectMeta: metav1.ObjectMeta{ 1010 Name: kubeadmConfigKey, 1011 Namespace: metav1.NamespaceSystem, 1012 }, 1013 Data: map[string]string{ 1014 clusterConfigurationKey: tt.clusterConfigurationData, 1015 }, 1016 }).Build() 1017 1018 w := &Workload{ 1019 Client: fakeClient, 1020 } 1021 err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.UpdateAPIServerInKubeadmConfigMap(tt.newAPIServer)) 1022 g.Expect(err).ToNot(HaveOccurred()) 1023 1024 var actualConfig corev1.ConfigMap 1025 g.Expect(w.Client.Get( 1026 ctx, 1027 client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, 1028 &actualConfig, 1029 )).To(Succeed()) 1030 g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey])) 1031 }) 1032 } 1033 } 1034 1035 func TestUpdateControllerManagerInKubeadmConfigMap(t *testing.T) { 1036 tests := []struct { 1037 name string 1038 clusterConfigurationData string 1039 newControllerManager bootstrapv1.ControlPlaneComponent 1040 wantClusterConfiguration string 1041 }{ 1042 { 1043 name: "it should set the controller manager config", 1044 clusterConfigurationData: utilyaml.Raw(` 1045 apiVersion: kubeadm.k8s.io/v1beta2 1046 kind: ClusterConfiguration 1047 `), 1048 newControllerManager: bootstrapv1.ControlPlaneComponent{ 1049 ExtraArgs: map[string]string{ 1050 "bar": "baz", 1051 "someKey": "someVal", 1052 }, 1053 ExtraVolumes: []bootstrapv1.HostPathMount{ 1054 { 1055 Name: "mount2", 1056 HostPath: "/bar/baz", 1057 MountPath: "/foo/bar", 1058 }, 1059 }, 1060 }, 1061 wantClusterConfiguration: utilyaml.Raw(` 1062 apiServer: {} 1063 apiVersion: kubeadm.k8s.io/v1beta2 1064 controllerManager: 1065 extraArgs: 1066 bar: baz 1067 someKey: someVal 1068 extraVolumes: 1069 - hostPath: /bar/baz 1070 mountPath: /foo/bar 1071 name: mount2 1072 dns: {} 1073 etcd: {} 1074 kind: ClusterConfiguration 1075 networking: {} 1076 scheduler: {} 1077 `), 1078 }, 1079 } 1080 1081 for _, tt := range tests { 1082 t.Run(tt.name, func(t *testing.T) { 1083 g := NewWithT(t) 1084 fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ 1085 ObjectMeta: metav1.ObjectMeta{ 1086 Name: kubeadmConfigKey, 1087 Namespace: metav1.NamespaceSystem, 1088 }, 1089 Data: map[string]string{ 1090 clusterConfigurationKey: tt.clusterConfigurationData, 1091 }, 1092 }).Build() 1093 1094 w := &Workload{ 1095 Client: fakeClient, 1096 } 1097 err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.UpdateControllerManagerInKubeadmConfigMap(tt.newControllerManager)) 1098 g.Expect(err).ToNot(HaveOccurred()) 1099 1100 var actualConfig corev1.ConfigMap 1101 g.Expect(w.Client.Get( 1102 ctx, 1103 client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, 1104 &actualConfig, 1105 )).To(Succeed()) 1106 g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey])) 1107 }) 1108 } 1109 } 1110 1111 func TestUpdateSchedulerInKubeadmConfigMap(t *testing.T) { 1112 tests := []struct { 1113 name string 1114 clusterConfigurationData string 1115 newScheduler bootstrapv1.ControlPlaneComponent 1116 wantClusterConfiguration string 1117 }{ 1118 { 1119 name: "it should set the scheduler config", 1120 clusterConfigurationData: utilyaml.Raw(` 1121 apiVersion: kubeadm.k8s.io/v1beta2 1122 kind: ClusterConfiguration 1123 `), 1124 newScheduler: bootstrapv1.ControlPlaneComponent{ 1125 ExtraArgs: map[string]string{ 1126 "bar": "baz", 1127 "someKey": "someVal", 1128 }, 1129 ExtraVolumes: []bootstrapv1.HostPathMount{ 1130 { 1131 Name: "mount2", 1132 HostPath: "/bar/baz", 1133 MountPath: "/foo/bar", 1134 }, 1135 }, 1136 }, 1137 wantClusterConfiguration: utilyaml.Raw(` 1138 apiServer: {} 1139 apiVersion: kubeadm.k8s.io/v1beta2 1140 controllerManager: {} 1141 dns: {} 1142 etcd: {} 1143 kind: ClusterConfiguration 1144 networking: {} 1145 scheduler: 1146 extraArgs: 1147 bar: baz 1148 someKey: someVal 1149 extraVolumes: 1150 - hostPath: /bar/baz 1151 mountPath: /foo/bar 1152 name: mount2 1153 `), 1154 }, 1155 } 1156 for _, tt := range tests { 1157 t.Run(tt.name, func(t *testing.T) { 1158 g := NewWithT(t) 1159 fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ 1160 ObjectMeta: metav1.ObjectMeta{ 1161 Name: kubeadmConfigKey, 1162 Namespace: metav1.NamespaceSystem, 1163 }, 1164 Data: map[string]string{ 1165 clusterConfigurationKey: tt.clusterConfigurationData, 1166 }, 1167 }).Build() 1168 1169 w := &Workload{ 1170 Client: fakeClient, 1171 } 1172 err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.UpdateSchedulerInKubeadmConfigMap(tt.newScheduler)) 1173 g.Expect(err).ToNot(HaveOccurred()) 1174 1175 var actualConfig corev1.ConfigMap 1176 g.Expect(w.Client.Get( 1177 ctx, 1178 client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, 1179 &actualConfig, 1180 )).To(Succeed()) 1181 g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey])) 1182 }) 1183 } 1184 } 1185 1186 func TestClusterStatus(t *testing.T) { 1187 node1 := &corev1.Node{ 1188 ObjectMeta: metav1.ObjectMeta{ 1189 Name: "node1", 1190 Labels: map[string]string{ 1191 labelNodeRoleControlPlane: "", 1192 }, 1193 }, 1194 Status: corev1.NodeStatus{ 1195 Conditions: []corev1.NodeCondition{{ 1196 Type: corev1.NodeReady, 1197 Status: corev1.ConditionTrue, 1198 }}, 1199 }, 1200 } 1201 node2 := &corev1.Node{ 1202 ObjectMeta: metav1.ObjectMeta{ 1203 Name: "node2", 1204 Labels: map[string]string{ 1205 labelNodeRoleControlPlane: "", 1206 }, 1207 }, 1208 Status: corev1.NodeStatus{ 1209 Conditions: []corev1.NodeCondition{{ 1210 Type: corev1.NodeReady, 1211 Status: corev1.ConditionFalse, 1212 }}, 1213 }, 1214 } 1215 kconf := &corev1.ConfigMap{ 1216 ObjectMeta: metav1.ObjectMeta{ 1217 Name: kubeadmConfigKey, 1218 Namespace: metav1.NamespaceSystem, 1219 }, 1220 } 1221 tests := []struct { 1222 name string 1223 objs []client.Object 1224 expectErr bool 1225 expectHasConf bool 1226 }{ 1227 { 1228 name: "returns cluster status", 1229 objs: []client.Object{node1, node2}, 1230 expectErr: false, 1231 expectHasConf: false, 1232 }, 1233 { 1234 name: "returns cluster status with kubeadm config", 1235 objs: []client.Object{node1, node2, kconf}, 1236 expectErr: false, 1237 expectHasConf: true, 1238 }, 1239 } 1240 1241 for _, tt := range tests { 1242 t.Run(tt.name, func(t *testing.T) { 1243 g := NewWithT(t) 1244 fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build() 1245 w := &Workload{ 1246 Client: fakeClient, 1247 } 1248 status, err := w.ClusterStatus(ctx) 1249 if tt.expectErr { 1250 g.Expect(err).To(HaveOccurred()) 1251 return 1252 } 1253 g.Expect(err).ToNot(HaveOccurred()) 1254 g.Expect(status.Nodes).To(BeEquivalentTo(2)) 1255 g.Expect(status.ReadyNodes).To(BeEquivalentTo(1)) 1256 if tt.expectHasConf { 1257 g.Expect(status.HasKubeadmConfig).To(BeTrue()) 1258 return 1259 } 1260 g.Expect(status.HasKubeadmConfig).To(BeFalse()) 1261 }) 1262 } 1263 } 1264 1265 func TestUpdateFeatureGatesInKubeadmConfigMap(t *testing.T) { 1266 tests := []struct { 1267 name string 1268 clusterConfigurationData string 1269 newFeatureGates map[string]bool 1270 wantFeatureGates map[string]bool 1271 }{ 1272 { 1273 name: "it updates feature gates", 1274 clusterConfigurationData: utilyaml.Raw(` 1275 apiVersion: kubeadm.k8s.io/v1beta2 1276 kind: ClusterConfiguration`), 1277 newFeatureGates: map[string]bool{"EtcdLearnerMode": true}, 1278 wantFeatureGates: map[string]bool{"EtcdLearnerMode": true}, 1279 }, 1280 { 1281 name: "it should override feature gates even if new value is nil", 1282 clusterConfigurationData: utilyaml.Raw(` 1283 apiVersion: kubeadm.k8s.io/v1beta2 1284 kind: ClusterConfiguration 1285 featureGates: 1286 EtcdLearnerMode: true 1287 `), 1288 newFeatureGates: nil, 1289 wantFeatureGates: nil, 1290 }, 1291 } 1292 1293 for _, tt := range tests { 1294 t.Run(tt.name, func(t *testing.T) { 1295 g := NewWithT(t) 1296 fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ 1297 ObjectMeta: metav1.ObjectMeta{ 1298 Name: kubeadmConfigKey, 1299 Namespace: metav1.NamespaceSystem, 1300 }, 1301 Data: map[string]string{ 1302 clusterConfigurationKey: tt.clusterConfigurationData, 1303 }, 1304 }).Build() 1305 1306 w := &Workload{ 1307 Client: fakeClient, 1308 } 1309 err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.UpdateFeatureGatesInKubeadmConfigMap(tt.newFeatureGates)) 1310 g.Expect(err).ToNot(HaveOccurred()) 1311 1312 var actualConfig corev1.ConfigMap 1313 g.Expect(w.Client.Get( 1314 ctx, 1315 client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, 1316 &actualConfig, 1317 )).To(Succeed()) 1318 1319 actualConfiguration := bootstrapv1.ClusterConfiguration{} 1320 err = yaml.Unmarshal([]byte(actualConfig.Data[clusterConfigurationKey]), &actualConfiguration) 1321 if err != nil { 1322 return 1323 } 1324 g.Expect(actualConfiguration.FeatureGates).Should(Equal(tt.wantFeatureGates)) 1325 }) 1326 } 1327 } 1328 1329 func getProxyImageInfo(ctx context.Context, c client.Client) (string, error) { 1330 ds := &appsv1.DaemonSet{} 1331 1332 if err := c.Get(ctx, client.ObjectKey{Name: kubeProxyKey, Namespace: metav1.NamespaceSystem}, ds); err != nil { 1333 if apierrors.IsNotFound(err) { 1334 return "", errors.New("no image found") 1335 } 1336 return "", errors.New("failed to determine if daemonset already exists") 1337 } 1338 container := findKubeProxyContainer(ds) 1339 if container == nil { 1340 return "", errors.New("unable to find container") 1341 } 1342 return container.Image, nil 1343 } 1344 1345 func newKubeProxyDS() appsv1.DaemonSet { 1346 return appsv1.DaemonSet{ 1347 ObjectMeta: metav1.ObjectMeta{ 1348 Name: kubeProxyKey, 1349 Namespace: metav1.NamespaceSystem, 1350 }, 1351 Spec: appsv1.DaemonSetSpec{ 1352 Template: corev1.PodTemplateSpec{ 1353 Spec: corev1.PodSpec{ 1354 Containers: []corev1.Container{ 1355 { 1356 Image: "k8s.gcr.io/kube-proxy:v1.16.2", 1357 Name: "kube-proxy", 1358 }, 1359 }, 1360 }, 1361 }, 1362 }, 1363 } 1364 } 1365 1366 func newKubeProxyDSWithImage(image string) appsv1.DaemonSet { 1367 ds := newKubeProxyDS() 1368 ds.Spec.Template.Spec.Containers[0].Image = image 1369 return ds 1370 }