k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/cmd/kubeadm/app/phases/addons/dns/dns_test.go (about) 1 /* 2 Copyright 2017 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package dns 18 19 import ( 20 "bytes" 21 "context" 22 "fmt" 23 "reflect" 24 "strings" 25 "testing" 26 27 "github.com/lithammer/dedent" 28 29 apps "k8s.io/api/apps/v1" 30 v1 "k8s.io/api/core/v1" 31 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 32 "k8s.io/apimachinery/pkg/runtime" 33 "k8s.io/apimachinery/pkg/util/intstr" 34 clientset "k8s.io/client-go/kubernetes" 35 clientsetfake "k8s.io/client-go/kubernetes/fake" 36 clientsetscheme "k8s.io/client-go/kubernetes/scheme" 37 38 kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" 39 kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" 40 kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" 41 ) 42 43 func TestCompileManifests(t *testing.T) { 44 replicas := int32(coreDNSReplicas) 45 var tests = []struct { 46 name string 47 manifest string 48 data interface{} 49 }{ 50 { 51 name: "CoreDNSDeployment manifest", 52 manifest: CoreDNSDeployment, 53 data: struct { 54 DeploymentName, Image, ControlPlaneTaintKey string 55 Replicas *int32 56 }{ 57 DeploymentName: "foo", 58 Image: "foo", 59 ControlPlaneTaintKey: "foo", 60 Replicas: &replicas, 61 }, 62 }, 63 { 64 name: "CoreDNSConfigMap manifest", 65 manifest: CoreDNSConfigMap, 66 data: struct{ DNSDomain string }{ 67 DNSDomain: "foo", 68 }, 69 }, 70 } 71 for _, rt := range tests { 72 t.Run(rt.name, func(t *testing.T) { 73 _, err := kubeadmutil.ParseTemplate(rt.manifest, rt.data) 74 if err != nil { 75 t.Errorf("unexpected ParseTemplate failure: %+v", err) 76 } 77 }) 78 } 79 } 80 81 func TestGetDNSIP(t *testing.T) { 82 var tests = []struct { 83 name, svcSubnet, expectedDNSIP string 84 isDualStack bool 85 }{ 86 { 87 name: "subnet mask 12", 88 svcSubnet: "10.96.0.0/12", 89 expectedDNSIP: "10.96.0.10", 90 }, 91 { 92 name: "subnet mask 26", 93 svcSubnet: "10.87.116.64/26", 94 expectedDNSIP: "10.87.116.74", 95 }, 96 { 97 name: "dual-stack ipv4 primary, subnet mask 26", 98 svcSubnet: "10.87.116.64/26,fd03::/112", 99 expectedDNSIP: "10.87.116.74", 100 }, 101 { 102 name: "dual-stack ipv6 primary, subnet mask 112", 103 svcSubnet: "fd03::/112,10.87.116.64/26", 104 expectedDNSIP: "fd03::a", 105 }, 106 } 107 for _, rt := range tests { 108 t.Run(rt.name, func(t *testing.T) { 109 dnsIP, err := kubeadmconstants.GetDNSIP(rt.svcSubnet) 110 if err != nil { 111 t.Fatalf("couldn't get dnsIP : %v", err) 112 } 113 114 actualDNSIP := dnsIP.String() 115 if actualDNSIP != rt.expectedDNSIP { 116 t.Errorf( 117 "failed GetDNSIP\n\texpected: %s\n\t actual: %s", 118 rt.expectedDNSIP, 119 actualDNSIP, 120 ) 121 } 122 }) 123 } 124 } 125 126 func TestDeploymentsHaveSystemClusterCriticalPriorityClassName(t *testing.T) { 127 replicas := int32(coreDNSReplicas) 128 testCases := []struct { 129 name string 130 manifest string 131 data interface{} 132 }{ 133 { 134 name: "CoreDNSDeployment", 135 manifest: CoreDNSDeployment, 136 data: struct { 137 DeploymentName, Image, ControlPlaneTaintKey, CoreDNSConfigMapName string 138 Replicas *int32 139 }{ 140 DeploymentName: "foo", 141 Image: "foo", 142 ControlPlaneTaintKey: "foo", 143 CoreDNSConfigMapName: "foo", 144 Replicas: &replicas, 145 }, 146 }, 147 } 148 for _, testCase := range testCases { 149 t.Run(testCase.name, func(t *testing.T) { 150 deploymentBytes, _ := kubeadmutil.ParseTemplate(testCase.manifest, testCase.data) 151 deployment := &apps.Deployment{} 152 if err := runtime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), deploymentBytes, deployment); err != nil { 153 t.Errorf("unexpected error: %v", err) 154 } 155 if deployment.Spec.Template.Spec.PriorityClassName != "system-cluster-critical" { 156 t.Errorf("expected to see system-cluster-critical priority class name. Got %q instead", deployment.Spec.Template.Spec.PriorityClassName) 157 } 158 }) 159 } 160 } 161 162 func TestCreateCoreDNSAddon(t *testing.T) { 163 tests := []struct { 164 name string 165 initialCorefileData string 166 expectedCorefileData string 167 coreDNSVersion string 168 }{ 169 { 170 name: "Empty Corefile", 171 initialCorefileData: "", 172 expectedCorefileData: `.:53 { 173 errors 174 health { 175 lameduck 5s 176 } 177 ready 178 kubernetes cluster.local in-addr.arpa ip6.arpa { 179 pods insecure 180 fallthrough in-addr.arpa ip6.arpa 181 ttl 30 182 } 183 prometheus :9153 184 forward . /etc/resolv.conf { 185 max_concurrent 1000 186 } 187 cache 30 188 loop 189 reload 190 loadbalance 191 } 192 `, 193 coreDNSVersion: "1.6.7", 194 }, 195 { 196 name: "Default Corefile", 197 initialCorefileData: `.:53 { 198 errors 199 health { 200 lameduck 5s 201 } 202 ready 203 kubernetes cluster.local in-addr.arpa ip6.arpa { 204 pods insecure 205 fallthrough in-addr.arpa ip6.arpa 206 ttl 30 207 } 208 prometheus :9153 209 forward . /etc/resolv.conf 210 cache 30 211 loop 212 reload 213 loadbalance 214 } 215 `, 216 expectedCorefileData: `.:53 { 217 errors 218 health { 219 lameduck 5s 220 } 221 ready 222 kubernetes cluster.local in-addr.arpa ip6.arpa { 223 pods insecure 224 fallthrough in-addr.arpa ip6.arpa 225 ttl 30 226 } 227 prometheus :9153 228 forward . /etc/resolv.conf { 229 max_concurrent 1000 230 } 231 cache 30 232 loop 233 reload 234 loadbalance 235 } 236 `, 237 coreDNSVersion: "1.6.7", 238 }, 239 { 240 name: "Modified Corefile with only newdefaults needed", 241 initialCorefileData: `.:53 { 242 errors 243 log 244 health 245 ready 246 kubernetes cluster.local in-addr.arpa ip6.arpa { 247 pods insecure 248 fallthrough in-addr.arpa ip6.arpa 249 ttl 30 250 } 251 prometheus :9153 252 forward . /etc/resolv.conf 253 cache 30 254 loop 255 reload 256 loadbalance 257 } 258 `, 259 expectedCorefileData: `.:53 { 260 errors 261 log 262 health { 263 lameduck 5s 264 } 265 ready 266 kubernetes cluster.local in-addr.arpa ip6.arpa { 267 pods insecure 268 fallthrough in-addr.arpa ip6.arpa 269 ttl 30 270 } 271 prometheus :9153 272 forward . /etc/resolv.conf { 273 max_concurrent 1000 274 } 275 cache 30 276 loop 277 reload 278 loadbalance 279 } 280 `, 281 coreDNSVersion: "1.6.2", 282 }, 283 { 284 name: "Default Corefile with rearranged plugins", 285 initialCorefileData: `.:53 { 286 errors 287 cache 30 288 prometheus :9153 289 forward . /etc/resolv.conf 290 loop 291 reload 292 loadbalance 293 kubernetes cluster.local in-addr.arpa ip6.arpa { 294 pods insecure 295 upstream 296 fallthrough in-addr.arpa ip6.arpa 297 ttl 30 298 } 299 health 300 } 301 `, 302 expectedCorefileData: `.:53 { 303 errors 304 health { 305 lameduck 5s 306 } 307 ready 308 kubernetes cluster.local in-addr.arpa ip6.arpa { 309 pods insecure 310 fallthrough in-addr.arpa ip6.arpa 311 ttl 30 312 } 313 prometheus :9153 314 forward . /etc/resolv.conf { 315 max_concurrent 1000 316 } 317 cache 30 318 loop 319 reload 320 loadbalance 321 } 322 `, 323 coreDNSVersion: "1.3.1", 324 }, 325 { 326 name: "Remove Deprecated options", 327 initialCorefileData: `.:53 { 328 errors 329 logs 330 health 331 kubernetes cluster.local in-addr.arpa ip6.arpa { 332 pods insecure 333 upstream 334 fallthrough in-addr.arpa ip6.arpa 335 ttl 30 336 } 337 prometheus :9153 338 forward . /etc/resolv.conf 339 cache 30 340 loop 341 reload 342 loadbalance 343 }`, 344 expectedCorefileData: `.:53 { 345 errors 346 logs 347 health { 348 lameduck 5s 349 } 350 kubernetes cluster.local in-addr.arpa ip6.arpa { 351 pods insecure 352 fallthrough in-addr.arpa ip6.arpa 353 ttl 30 354 } 355 prometheus :9153 356 forward . /etc/resolv.conf { 357 max_concurrent 1000 358 } 359 cache 30 360 loop 361 reload 362 loadbalance 363 ready 364 } 365 `, 366 coreDNSVersion: "1.3.1", 367 }, 368 { 369 name: "Update proxy plugin to forward plugin", 370 initialCorefileData: `.:53 { 371 errors 372 health 373 kubernetes cluster.local in-addr.arpa ip6.arpa { 374 pods insecure 375 upstream 376 fallthrough in-addr.arpa ip6.arpa 377 } 378 prometheus :9153 379 proxy . /etc/resolv.conf 380 k8s_external example.com 381 cache 30 382 loop 383 reload 384 loadbalance 385 }`, 386 expectedCorefileData: `.:53 { 387 errors 388 health { 389 lameduck 5s 390 } 391 kubernetes cluster.local in-addr.arpa ip6.arpa { 392 pods insecure 393 fallthrough in-addr.arpa ip6.arpa 394 } 395 prometheus :9153 396 forward . /etc/resolv.conf { 397 max_concurrent 1000 398 } 399 k8s_external example.com 400 cache 30 401 loop 402 reload 403 loadbalance 404 ready 405 } 406 `, 407 coreDNSVersion: "1.3.1", 408 }, 409 { 410 name: "Modified Corefile with no migration required", 411 initialCorefileData: `consul { 412 errors 413 forward . 10.10.96.16:8600 10.10.96.17:8600 10.10.96.18:8600 { 414 max_concurrent 1000 415 } 416 loadbalance 417 cache 5 418 reload 419 } 420 domain.int { 421 errors 422 forward . 10.10.0.140 10.10.0.240 10.10.51.40 { 423 max_concurrent 1000 424 } 425 loadbalance 426 cache 3600 427 reload 428 } 429 .:53 { 430 errors 431 health { 432 lameduck 5s 433 } 434 ready 435 kubernetes cluster.local in-addr.arpa ip6.arpa { 436 pods insecure 437 fallthrough in-addr.arpa ip6.arpa 438 } 439 prometheus :9153 440 forward . /etc/resolv.conf { 441 prefer_udp 442 max_concurrent 1000 443 } 444 cache 30 445 loop 446 reload 447 loadbalance 448 } 449 `, 450 expectedCorefileData: `consul { 451 errors 452 forward . 10.10.96.16:8600 10.10.96.17:8600 10.10.96.18:8600 { 453 max_concurrent 1000 454 } 455 loadbalance 456 cache 5 457 reload 458 } 459 domain.int { 460 errors 461 forward . 10.10.0.140 10.10.0.240 10.10.51.40 { 462 max_concurrent 1000 463 } 464 loadbalance 465 cache 3600 466 reload 467 } 468 .:53 { 469 errors 470 health { 471 lameduck 5s 472 } 473 ready 474 kubernetes cluster.local in-addr.arpa ip6.arpa { 475 pods insecure 476 fallthrough in-addr.arpa ip6.arpa 477 } 478 prometheus :9153 479 forward . /etc/resolv.conf { 480 prefer_udp 481 max_concurrent 1000 482 } 483 cache 30 484 loop 485 reload 486 loadbalance 487 } 488 `, 489 coreDNSVersion: "1.6.7", 490 }, 491 } 492 493 for _, tc := range tests { 494 t.Run(tc.name, func(t *testing.T) { 495 client := createClientAndCoreDNSManifest(t, tc.initialCorefileData, tc.coreDNSVersion) 496 497 configMapBytes, err := kubeadmutil.ParseTemplate(CoreDNSConfigMap, struct{ DNSDomain, UpstreamNameserver, StubDomain string }{ 498 DNSDomain: "cluster.local", 499 UpstreamNameserver: "/etc/resolv.conf", 500 StubDomain: "", 501 }) 502 if err != nil { 503 t.Errorf("unexpected ParseTemplate failure: %+v", err) 504 } 505 506 err = createCoreDNSAddon(nil, nil, configMapBytes, client) 507 if err != nil { 508 t.Fatalf("error creating the CoreDNS Addon: %v", err) 509 } 510 migratedConfigMap, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.CoreDNSConfigMap, metav1.GetOptions{}) 511 if !strings.EqualFold(migratedConfigMap.Data["Corefile"], tc.expectedCorefileData) { 512 t.Fatalf("expected to get %v, but got %v", tc.expectedCorefileData, migratedConfigMap.Data["Corefile"]) 513 } 514 }) 515 } 516 } 517 518 func createClientAndCoreDNSManifest(t *testing.T, corefile, coreDNSVersion string) *clientsetfake.Clientset { 519 client := clientsetfake.NewSimpleClientset() 520 _, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &v1.ConfigMap{ 521 ObjectMeta: metav1.ObjectMeta{ 522 Name: kubeadmconstants.CoreDNSConfigMap, 523 Namespace: metav1.NamespaceSystem, 524 }, 525 Data: map[string]string{ 526 "Corefile": corefile, 527 }, 528 }, metav1.CreateOptions{}) 529 if err != nil { 530 t.Fatalf("error creating ConfigMap: %v", err) 531 } 532 _, err = client.AppsV1().Deployments(metav1.NamespaceSystem).Create(context.TODO(), &apps.Deployment{ 533 TypeMeta: metav1.TypeMeta{ 534 Kind: "Deployment", 535 APIVersion: "apps/v1", 536 }, 537 ObjectMeta: metav1.ObjectMeta{ 538 Name: kubeadmconstants.CoreDNSConfigMap, 539 Namespace: metav1.NamespaceSystem, 540 Labels: map[string]string{ 541 "k8s-app": "kube-dns", 542 }, 543 }, 544 Spec: apps.DeploymentSpec{ 545 Template: v1.PodTemplateSpec{ 546 Spec: v1.PodSpec{ 547 Containers: []v1.Container{ 548 { 549 Image: "test:" + coreDNSVersion, 550 }, 551 }, 552 }, 553 }, 554 }, 555 }, metav1.CreateOptions{}) 556 if err != nil { 557 t.Fatalf("error creating deployment: %v", err) 558 } 559 return client 560 } 561 562 func TestDeployedDNSReplicas(t *testing.T) { 563 tests := []struct { 564 name string 565 deploymentSize int 566 want int32 567 wantErr bool 568 }{ 569 { 570 name: "one coredns addon deployment", 571 deploymentSize: 1, 572 want: 2, 573 wantErr: false, 574 }, 575 { 576 name: "no coredns addon deployment", 577 deploymentSize: 0, 578 want: 5, 579 wantErr: false, 580 }, 581 { 582 name: "multiple coredns addon deployments", 583 deploymentSize: 3, 584 want: 5, 585 wantErr: true, 586 }, 587 } 588 for _, tt := range tests { 589 t.Run(tt.name, func(t *testing.T) { 590 client := newMockClientForTest(t, 2, tt.deploymentSize, "", "", "") 591 got, err := deployedDNSReplicas(client, 5) 592 if (err != nil) != tt.wantErr { 593 t.Errorf("deployedDNSReplicas() error = %v, wantErr %v", err, tt.wantErr) 594 return 595 } 596 if *got != tt.want { 597 t.Errorf("deployedDNSReplicas() = %v, want %v", *got, tt.want) 598 } 599 }) 600 } 601 } 602 603 func TestCoreDNSAddon(t *testing.T) { 604 type args struct { 605 cfg *kubeadmapi.ClusterConfiguration 606 client clientset.Interface 607 printManifest bool 608 } 609 tests := []struct { 610 name string 611 args args 612 wantOut string 613 wantErr bool 614 }{ 615 { 616 name: "cfg is empty", 617 args: args{ 618 cfg: &kubeadmapi.ClusterConfiguration{}, 619 client: newMockClientForTest(t, 2, 1, "", "", ""), 620 printManifest: false, 621 }, 622 wantOut: "", 623 wantErr: true, 624 }, 625 { 626 name: "cfg is valid and not print Manifest", 627 args: args{ 628 cfg: &kubeadmapi.ClusterConfiguration{ 629 DNS: kubeadmapi.DNS{ 630 ImageMeta: kubeadmapi.ImageMeta{ 631 ImageRepository: "foo.bar.io", 632 }, 633 }, 634 Networking: kubeadmapi.Networking{ 635 ServiceSubnet: "10.0.0.0/16", 636 }, 637 }, 638 client: newMockClientForTest(t, 2, 1, "", "", ""), 639 printManifest: false, 640 }, 641 wantOut: "[addons] Applied essential addon: CoreDNS\n", 642 wantErr: false, 643 }, 644 { 645 name: "cfg is valid and print Manifest", 646 args: args{ 647 cfg: &kubeadmapi.ClusterConfiguration{ 648 DNS: kubeadmapi.DNS{ 649 ImageMeta: kubeadmapi.ImageMeta{ 650 ImageRepository: "foo.bar.io", 651 }, 652 }, 653 Networking: kubeadmapi.Networking{ 654 ServiceSubnet: "10.0.0.0/16", 655 }, 656 }, 657 client: newMockClientForTest(t, 2, 1, "", "", ""), 658 printManifest: true, 659 }, 660 wantOut: dedent.Dedent(`--- 661 apiVersion: apps/v1 662 kind: Deployment 663 metadata: 664 name: coredns 665 namespace: kube-system 666 labels: 667 k8s-app: kube-dns 668 spec: 669 replicas: 3 670 strategy: 671 type: RollingUpdate 672 rollingUpdate: 673 maxUnavailable: 1 674 selector: 675 matchLabels: 676 k8s-app: kube-dns 677 template: 678 metadata: 679 labels: 680 k8s-app: kube-dns 681 spec: 682 priorityClassName: system-cluster-critical 683 serviceAccountName: coredns 684 affinity: 685 podAntiAffinity: 686 preferredDuringSchedulingIgnoredDuringExecution: 687 - weight: 100 688 podAffinityTerm: 689 labelSelector: 690 matchExpressions: 691 - key: k8s-app 692 operator: In 693 values: ["kube-dns"] 694 topologyKey: kubernetes.io/hostname 695 tolerations: 696 - key: CriticalAddonsOnly 697 operator: Exists 698 - key: node-role.kubernetes.io/control-plane 699 effect: NoSchedule 700 nodeSelector: 701 kubernetes.io/os: linux 702 containers: 703 - name: coredns 704 image: foo.bar.io/coredns:v1.11.1 705 imagePullPolicy: IfNotPresent 706 resources: 707 limits: 708 memory: 170Mi 709 requests: 710 cpu: 100m 711 memory: 70Mi 712 args: [ "-conf", "/etc/coredns/Corefile" ] 713 volumeMounts: 714 - name: config-volume 715 mountPath: /etc/coredns 716 readOnly: true 717 ports: 718 - containerPort: 53 719 name: dns 720 protocol: UDP 721 - containerPort: 53 722 name: dns-tcp 723 protocol: TCP 724 - containerPort: 9153 725 name: metrics 726 protocol: TCP 727 livenessProbe: 728 httpGet: 729 path: /health 730 port: 8080 731 scheme: HTTP 732 initialDelaySeconds: 60 733 timeoutSeconds: 5 734 successThreshold: 1 735 failureThreshold: 5 736 readinessProbe: 737 httpGet: 738 path: /ready 739 port: 8181 740 scheme: HTTP 741 securityContext: 742 allowPrivilegeEscalation: false 743 capabilities: 744 add: 745 - NET_BIND_SERVICE 746 drop: 747 - ALL 748 readOnlyRootFilesystem: true 749 dnsPolicy: Default 750 volumes: 751 - name: config-volume 752 configMap: 753 name: coredns 754 items: 755 - key: Corefile 756 path: Corefile 757 --- 758 apiVersion: v1 759 kind: ConfigMap 760 metadata: 761 name: coredns 762 namespace: kube-system 763 data: 764 Corefile: | 765 .:53 { 766 errors 767 health { 768 lameduck 5s 769 } 770 ready 771 kubernetes in-addr.arpa ip6.arpa { 772 pods insecure 773 fallthrough in-addr.arpa ip6.arpa 774 ttl 30 775 } 776 prometheus :9153 777 forward . /etc/resolv.conf { 778 max_concurrent 1000 779 } 780 cache 30 781 loop 782 reload 783 loadbalance 784 } 785 --- 786 apiVersion: v1 787 kind: Service 788 metadata: 789 labels: 790 k8s-app: kube-dns 791 kubernetes.io/cluster-service: "true" 792 kubernetes.io/name: "CoreDNS" 793 name: kube-dns 794 namespace: kube-system 795 annotations: 796 prometheus.io/port: "9153" 797 prometheus.io/scrape: "true" 798 # Without this resourceVersion value, an update of the Service between versions will yield: 799 # Service "kube-dns" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update 800 resourceVersion: "0" 801 spec: 802 clusterIP: 10.0.0.10 803 ports: 804 - name: dns 805 port: 53 806 protocol: UDP 807 targetPort: 53 808 - name: dns-tcp 809 port: 53 810 protocol: TCP 811 targetPort: 53 812 - name: metrics 813 port: 9153 814 protocol: TCP 815 targetPort: 9153 816 selector: 817 k8s-app: kube-dns 818 --- 819 apiVersion: rbac.authorization.k8s.io/v1 820 kind: ClusterRole 821 metadata: 822 name: system:coredns 823 rules: 824 - apiGroups: 825 - "" 826 resources: 827 - endpoints 828 - services 829 - pods 830 - namespaces 831 verbs: 832 - list 833 - watch 834 - apiGroups: 835 - discovery.k8s.io 836 resources: 837 - endpointslices 838 verbs: 839 - list 840 - watch 841 --- 842 apiVersion: rbac.authorization.k8s.io/v1 843 kind: ClusterRoleBinding 844 metadata: 845 name: system:coredns 846 roleRef: 847 apiGroup: rbac.authorization.k8s.io 848 kind: ClusterRole 849 name: system:coredns 850 subjects: 851 - kind: ServiceAccount 852 name: coredns 853 namespace: kube-system 854 --- 855 apiVersion: v1 856 kind: ServiceAccount 857 metadata: 858 name: coredns 859 namespace: kube-system 860 `), 861 wantErr: false, 862 }, 863 } 864 for _, tt := range tests { 865 t.Run(tt.name, func(t *testing.T) { 866 out := &bytes.Buffer{} 867 var replicas int32 = 3 868 if err := coreDNSAddon(tt.args.cfg, tt.args.client, &replicas, "", out, tt.args.printManifest); (err != nil) != tt.wantErr { 869 t.Errorf("coreDNSAddon() error = %v, wantErr %v", err, tt.wantErr) 870 return 871 } 872 if gotOut := out.String(); gotOut != tt.wantOut { 873 t.Errorf("Actual output of coreDNSAddon() does not match expected.\nActual: %v\nExpected: %v\n", gotOut, tt.wantOut) 874 } 875 }) 876 } 877 } 878 879 func TestEnsureDNSAddon(t *testing.T) { 880 type args struct { 881 cfg *kubeadmapi.ClusterConfiguration 882 client clientset.Interface 883 printManifest bool 884 } 885 tests := []struct { 886 name string 887 args args 888 wantOut string 889 wantErr bool 890 }{ 891 { 892 name: "not print Manifest", 893 args: args{ 894 cfg: &kubeadmapi.ClusterConfiguration{ 895 DNS: kubeadmapi.DNS{ 896 ImageMeta: kubeadmapi.ImageMeta{ 897 ImageRepository: "foo.bar.io", 898 }, 899 }, 900 Networking: kubeadmapi.Networking{ 901 ServiceSubnet: "10.0.0.0/16", 902 }, 903 }, 904 client: newMockClientForTest(t, 0, 1, "", "", ""), 905 printManifest: false, 906 }, 907 wantOut: "[addons] Applied essential addon: CoreDNS\n", 908 }, 909 { 910 name: "get dns replicas failed", 911 args: args{ 912 cfg: &kubeadmapi.ClusterConfiguration{ 913 DNS: kubeadmapi.DNS{ 914 ImageMeta: kubeadmapi.ImageMeta{ 915 ImageRepository: "foo.bar.io", 916 }, 917 }, 918 Networking: kubeadmapi.Networking{ 919 ServiceSubnet: "10.0.0.0/16", 920 }, 921 }, 922 client: newMockClientForTest(t, 0, 2, "", "", ""), 923 printManifest: false, 924 }, 925 wantErr: true, 926 wantOut: "", 927 }, 928 { 929 name: "not print Manifest", 930 args: args{ 931 cfg: &kubeadmapi.ClusterConfiguration{ 932 DNS: kubeadmapi.DNS{ 933 ImageMeta: kubeadmapi.ImageMeta{ 934 ImageRepository: "foo.bar.io", 935 }, 936 }, 937 Networking: kubeadmapi.Networking{ 938 ServiceSubnet: "10.0.0.0/16", 939 }, 940 }, 941 client: newMockClientForTest(t, 0, 1, "", "", ""), 942 printManifest: true, 943 }, 944 wantOut: dedent.Dedent(`--- 945 apiVersion: apps/v1 946 kind: Deployment 947 metadata: 948 name: coredns 949 namespace: kube-system 950 labels: 951 k8s-app: kube-dns 952 spec: 953 replicas: 2 954 strategy: 955 type: RollingUpdate 956 rollingUpdate: 957 maxUnavailable: 1 958 selector: 959 matchLabels: 960 k8s-app: kube-dns 961 template: 962 metadata: 963 labels: 964 k8s-app: kube-dns 965 spec: 966 priorityClassName: system-cluster-critical 967 serviceAccountName: coredns 968 affinity: 969 podAntiAffinity: 970 preferredDuringSchedulingIgnoredDuringExecution: 971 - weight: 100 972 podAffinityTerm: 973 labelSelector: 974 matchExpressions: 975 - key: k8s-app 976 operator: In 977 values: ["kube-dns"] 978 topologyKey: kubernetes.io/hostname 979 tolerations: 980 - key: CriticalAddonsOnly 981 operator: Exists 982 - key: node-role.kubernetes.io/control-plane 983 effect: NoSchedule 984 nodeSelector: 985 kubernetes.io/os: linux 986 containers: 987 - name: coredns 988 image: foo.bar.io/coredns:v1.11.1 989 imagePullPolicy: IfNotPresent 990 resources: 991 limits: 992 memory: 170Mi 993 requests: 994 cpu: 100m 995 memory: 70Mi 996 args: [ "-conf", "/etc/coredns/Corefile" ] 997 volumeMounts: 998 - name: config-volume 999 mountPath: /etc/coredns 1000 readOnly: true 1001 ports: 1002 - containerPort: 53 1003 name: dns 1004 protocol: UDP 1005 - containerPort: 53 1006 name: dns-tcp 1007 protocol: TCP 1008 - containerPort: 9153 1009 name: metrics 1010 protocol: TCP 1011 livenessProbe: 1012 httpGet: 1013 path: /health 1014 port: 8080 1015 scheme: HTTP 1016 initialDelaySeconds: 60 1017 timeoutSeconds: 5 1018 successThreshold: 1 1019 failureThreshold: 5 1020 readinessProbe: 1021 httpGet: 1022 path: /ready 1023 port: 8181 1024 scheme: HTTP 1025 securityContext: 1026 allowPrivilegeEscalation: false 1027 capabilities: 1028 add: 1029 - NET_BIND_SERVICE 1030 drop: 1031 - ALL 1032 readOnlyRootFilesystem: true 1033 dnsPolicy: Default 1034 volumes: 1035 - name: config-volume 1036 configMap: 1037 name: coredns 1038 items: 1039 - key: Corefile 1040 path: Corefile 1041 --- 1042 apiVersion: v1 1043 kind: ConfigMap 1044 metadata: 1045 name: coredns 1046 namespace: kube-system 1047 data: 1048 Corefile: | 1049 .:53 { 1050 errors 1051 health { 1052 lameduck 5s 1053 } 1054 ready 1055 kubernetes in-addr.arpa ip6.arpa { 1056 pods insecure 1057 fallthrough in-addr.arpa ip6.arpa 1058 ttl 30 1059 } 1060 prometheus :9153 1061 forward . /etc/resolv.conf { 1062 max_concurrent 1000 1063 } 1064 cache 30 1065 loop 1066 reload 1067 loadbalance 1068 } 1069 --- 1070 apiVersion: v1 1071 kind: Service 1072 metadata: 1073 labels: 1074 k8s-app: kube-dns 1075 kubernetes.io/cluster-service: "true" 1076 kubernetes.io/name: "CoreDNS" 1077 name: kube-dns 1078 namespace: kube-system 1079 annotations: 1080 prometheus.io/port: "9153" 1081 prometheus.io/scrape: "true" 1082 # Without this resourceVersion value, an update of the Service between versions will yield: 1083 # Service "kube-dns" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update 1084 resourceVersion: "0" 1085 spec: 1086 clusterIP: 10.0.0.10 1087 ports: 1088 - name: dns 1089 port: 53 1090 protocol: UDP 1091 targetPort: 53 1092 - name: dns-tcp 1093 port: 53 1094 protocol: TCP 1095 targetPort: 53 1096 - name: metrics 1097 port: 9153 1098 protocol: TCP 1099 targetPort: 9153 1100 selector: 1101 k8s-app: kube-dns 1102 --- 1103 apiVersion: rbac.authorization.k8s.io/v1 1104 kind: ClusterRole 1105 metadata: 1106 name: system:coredns 1107 rules: 1108 - apiGroups: 1109 - "" 1110 resources: 1111 - endpoints 1112 - services 1113 - pods 1114 - namespaces 1115 verbs: 1116 - list 1117 - watch 1118 - apiGroups: 1119 - discovery.k8s.io 1120 resources: 1121 - endpointslices 1122 verbs: 1123 - list 1124 - watch 1125 --- 1126 apiVersion: rbac.authorization.k8s.io/v1 1127 kind: ClusterRoleBinding 1128 metadata: 1129 name: system:coredns 1130 roleRef: 1131 apiGroup: rbac.authorization.k8s.io 1132 kind: ClusterRole 1133 name: system:coredns 1134 subjects: 1135 - kind: ServiceAccount 1136 name: coredns 1137 namespace: kube-system 1138 --- 1139 apiVersion: v1 1140 kind: ServiceAccount 1141 metadata: 1142 name: coredns 1143 namespace: kube-system 1144 `), 1145 wantErr: false, 1146 }, 1147 } 1148 for _, tt := range tests { 1149 t.Run(tt.name, func(t *testing.T) { 1150 out := &bytes.Buffer{} 1151 if err := EnsureDNSAddon(tt.args.cfg, tt.args.client, "", out, tt.args.printManifest); (err != nil) != tt.wantErr { 1152 t.Errorf("EnsureDNSAddon() error = %v, wantErr %v", err, tt.wantErr) 1153 return 1154 } 1155 if gotOut := out.String(); gotOut != tt.wantOut { 1156 t.Errorf("Actual output of EnsureDNSAddon() does not match expected.\nActual: %v\nExpected: %v\n", gotOut, tt.wantOut) 1157 } 1158 }) 1159 } 1160 } 1161 1162 func TestCreateDNSService(t *testing.T) { 1163 coreDNSServiceBytes, _ := kubeadmutil.ParseTemplate(CoreDNSService, struct{ DNSIP string }{ 1164 DNSIP: "10.233.0.3", 1165 }) 1166 type args struct { 1167 dnsService *v1.Service 1168 serviceBytes []byte 1169 } 1170 tests := []struct { 1171 name string 1172 args args 1173 wantErr bool 1174 }{ 1175 { 1176 name: "dnsService and serviceBytes are nil", 1177 args: args{ 1178 dnsService: nil, 1179 serviceBytes: nil, 1180 }, 1181 wantErr: true, 1182 }, 1183 { 1184 name: "invalid dns", 1185 args: args{ 1186 dnsService: nil, 1187 serviceBytes: coreDNSServiceBytes, 1188 }, 1189 wantErr: true, 1190 }, 1191 { 1192 name: "serviceBytes is not valid", 1193 args: args{ 1194 dnsService: &v1.Service{ 1195 TypeMeta: metav1.TypeMeta{ 1196 Kind: "Service", 1197 APIVersion: "v1", 1198 }, 1199 ObjectMeta: metav1.ObjectMeta{Name: "coredns", 1200 Labels: map[string]string{"k8s-app": "kube-dns", 1201 "kubernetes.io/name": "coredns"}, 1202 Namespace: "kube-system", 1203 }, 1204 Spec: v1.ServiceSpec{ 1205 Ports: []v1.ServicePort{ 1206 { 1207 Name: "dns", 1208 Port: 53, 1209 Protocol: v1.ProtocolUDP, 1210 TargetPort: intstr.IntOrString{ 1211 Type: 0, 1212 IntVal: 53, 1213 }, 1214 }, 1215 { 1216 Name: "dns-tcp", 1217 Port: 53, 1218 Protocol: v1.ProtocolTCP, 1219 TargetPort: intstr.IntOrString{ 1220 Type: 0, 1221 IntVal: 53, 1222 }, 1223 }, 1224 }, 1225 Selector: map[string]string{ 1226 "k8s-app": "kube-dns", 1227 }, 1228 }, 1229 }, 1230 serviceBytes: []byte{ 1231 'f', 'o', 'o', 1232 }, 1233 }, 1234 wantErr: true, 1235 }, 1236 { 1237 name: "dnsService is valid and serviceBytes is nil", 1238 args: args{ 1239 dnsService: &v1.Service{ 1240 ObjectMeta: metav1.ObjectMeta{Name: "coredns", 1241 Labels: map[string]string{"k8s-app": "kube-dns", 1242 "kubernetes.io/name": "coredns"}, 1243 Namespace: "kube-system", 1244 }, 1245 Spec: v1.ServiceSpec{ 1246 Ports: []v1.ServicePort{ 1247 { 1248 Name: "dns", 1249 Port: 53, 1250 Protocol: v1.ProtocolUDP, 1251 TargetPort: intstr.IntOrString{ 1252 Type: 0, 1253 IntVal: 53, 1254 }, 1255 }, 1256 { 1257 Name: "dns-tcp", 1258 Port: 53, 1259 Protocol: v1.ProtocolTCP, 1260 TargetPort: intstr.IntOrString{ 1261 Type: 0, 1262 IntVal: 53, 1263 }, 1264 }, 1265 }, 1266 Selector: map[string]string{ 1267 "k8s-app": "kube-dns", 1268 }, 1269 }, 1270 }, 1271 serviceBytes: nil, 1272 }, 1273 wantErr: false, 1274 }, 1275 { 1276 name: "dnsService and serviceBytes are not nil and valid", 1277 args: args{ 1278 dnsService: &v1.Service{ 1279 TypeMeta: metav1.TypeMeta{ 1280 Kind: "Service", 1281 APIVersion: "v1", 1282 }, 1283 ObjectMeta: metav1.ObjectMeta{Name: "coredns", 1284 Labels: map[string]string{"k8s-app": "kube-dns", 1285 "kubernetes.io/name": "coredns"}, 1286 Namespace: "kube-system", 1287 }, 1288 Spec: v1.ServiceSpec{ 1289 ClusterIP: "10.233.0.3", 1290 Ports: []v1.ServicePort{ 1291 { 1292 Name: "dns", 1293 Port: 53, 1294 Protocol: v1.ProtocolUDP, 1295 TargetPort: intstr.IntOrString{ 1296 Type: 0, 1297 IntVal: 53, 1298 }, 1299 }, 1300 }, 1301 Selector: map[string]string{ 1302 "k8s-app": "kube-dns", 1303 }, 1304 }, 1305 }, 1306 serviceBytes: coreDNSServiceBytes, 1307 }, 1308 wantErr: false, 1309 }, 1310 { 1311 name: "the namespace of dnsService is not kube-system", 1312 args: args{ 1313 dnsService: &v1.Service{ 1314 TypeMeta: metav1.TypeMeta{ 1315 Kind: "Service", 1316 APIVersion: "v1", 1317 }, 1318 ObjectMeta: metav1.ObjectMeta{Name: "coredns", 1319 Labels: map[string]string{"k8s-app": "kube-dns", 1320 "kubernetes.io/name": "coredns"}, 1321 Namespace: "kube-system-test", 1322 }, 1323 Spec: v1.ServiceSpec{ 1324 Ports: []v1.ServicePort{ 1325 { 1326 Name: "dns", 1327 Port: 53, 1328 Protocol: v1.ProtocolUDP, 1329 TargetPort: intstr.IntOrString{ 1330 Type: 0, 1331 IntVal: 53, 1332 }, 1333 }, 1334 { 1335 Name: "dns-tcp", 1336 Port: 53, 1337 Protocol: v1.ProtocolTCP, 1338 TargetPort: intstr.IntOrString{ 1339 Type: 0, 1340 IntVal: 53, 1341 }, 1342 }, 1343 }, 1344 Selector: map[string]string{ 1345 "k8s-app": "kube-dns", 1346 }, 1347 }, 1348 }, 1349 serviceBytes: nil, 1350 }, 1351 wantErr: true, 1352 }, 1353 { 1354 name: "the name of dnsService is not coredns", 1355 args: args{ 1356 dnsService: &v1.Service{ 1357 TypeMeta: metav1.TypeMeta{ 1358 Kind: "Service", 1359 APIVersion: "v1", 1360 }, 1361 ObjectMeta: metav1.ObjectMeta{Name: "coredns-test", 1362 Labels: map[string]string{"k8s-app": "kube-dns", 1363 "kubernetes.io/name": "coredns"}, 1364 Namespace: "kube-system", 1365 }, 1366 Spec: v1.ServiceSpec{ 1367 Ports: []v1.ServicePort{ 1368 { 1369 Name: "dns", 1370 Port: 53, 1371 Protocol: v1.ProtocolUDP, 1372 TargetPort: intstr.IntOrString{ 1373 Type: 0, 1374 IntVal: 53, 1375 }, 1376 }, 1377 { 1378 Name: "dns-tcp", 1379 Port: 53, 1380 Protocol: v1.ProtocolTCP, 1381 TargetPort: intstr.IntOrString{ 1382 Type: 0, 1383 IntVal: 53, 1384 }, 1385 }, 1386 }, 1387 Selector: map[string]string{ 1388 "k8s-app": "kube-dns", 1389 }, 1390 }, 1391 }, 1392 serviceBytes: nil, 1393 }, 1394 wantErr: false, 1395 }, 1396 } 1397 for _, tt := range tests { 1398 t.Run(tt.name, func(t *testing.T) { 1399 client := newMockClientForTest(t, 1, 1, "", "", "") 1400 if err := createDNSService(tt.args.dnsService, tt.args.serviceBytes, client); (err != nil) != tt.wantErr { 1401 t.Errorf("createDNSService() error = %v, wantErr %v", err, tt.wantErr) 1402 } 1403 }) 1404 } 1405 } 1406 1407 func TestDeployedDNSAddon(t *testing.T) { 1408 tests := []struct { 1409 name string 1410 image string 1411 wantVersion string 1412 deploymentSize int 1413 wantErr bool 1414 }{ 1415 { 1416 name: "default", 1417 image: "registry.k8s.io/coredns/coredns:v1.11.1", 1418 deploymentSize: 1, 1419 wantVersion: "v1.11.1", 1420 }, 1421 { 1422 name: "no dns addon deployment", 1423 image: "registry.k8s.io/coredns/coredns:v1.11.1", 1424 deploymentSize: 0, 1425 wantVersion: "", 1426 }, 1427 { 1428 name: "multiple dns addon deployment", 1429 image: "registry.k8s.io/coredns/coredns:v1.11.1", 1430 deploymentSize: 2, 1431 wantVersion: "", 1432 wantErr: true, 1433 }, 1434 { 1435 name: "with digest", 1436 image: "registry.k8s.io/coredns/coredns:v1.11.1@sha256:a0ead06651cf580044aeb0a0feba63591858fb2e43ade8c9dea45a6a89ae7e5e", 1437 deploymentSize: 1, 1438 wantVersion: "v1.11.1", 1439 }, 1440 { 1441 name: "without registry", 1442 image: "coredns/coredns:coredns-s390x", 1443 deploymentSize: 1, 1444 wantVersion: "coredns-s390x", 1445 }, 1446 { 1447 name: "without registry and tag", 1448 image: "coredns/coredns", 1449 deploymentSize: 1, 1450 wantVersion: "", 1451 }, 1452 { 1453 name: "with explicit port", 1454 image: "localhost:4711/coredns/coredns:v1.11.2-pre.1", 1455 deploymentSize: 1, 1456 wantVersion: "v1.11.2-pre.1", 1457 }, 1458 { 1459 name: "with explicit port but without tag", 1460 image: "localhost:4711/coredns/coredns@sha256:a0ead06651cf580044aeb0a0feba63591858fb2e43ade8c9dea45a6a89ae7e5e", 1461 deploymentSize: 1, 1462 wantVersion: "", 1463 }, 1464 } 1465 1466 for _, tt := range tests { 1467 t.Run(tt.name, func(t *testing.T) { 1468 client := newMockClientForTest(t, 1, tt.deploymentSize, tt.image, "", "") 1469 1470 version, err := DeployedDNSAddon(client) 1471 if (err != nil) != tt.wantErr { 1472 t.Errorf("DeployedDNSAddon() error = %v, wantErr %v", err, tt.wantErr) 1473 } 1474 if version != tt.wantVersion { 1475 t.Errorf("DeployedDNSAddon() for image %q returned %q, want %q", tt.image, version, tt.wantVersion) 1476 } 1477 }) 1478 } 1479 } 1480 1481 func TestGetCoreDNSInfo(t *testing.T) { 1482 tests := []struct { 1483 name string 1484 client clientset.Interface 1485 wantConfigMap *v1.ConfigMap 1486 wantCorefile string 1487 wantVersion string 1488 wantErr bool 1489 }{ 1490 { 1491 name: "no coredns configmap", 1492 client: newMockClientForTest(t, 1, 1, "localhost:4711/coredns/coredns:v1.11.2-pre.1", "", ""), 1493 wantConfigMap: nil, 1494 wantCorefile: "", 1495 wantVersion: "", 1496 wantErr: false, 1497 }, 1498 { 1499 name: "the key of coredns configmap data does not contain corefile", 1500 client: newMockClientForTest(t, 1, 1, "localhost:4711/coredns/coredns:v1.11.2-pre.1", "coredns", "Corefilefake"), 1501 wantConfigMap: nil, 1502 wantCorefile: "", 1503 wantVersion: "", 1504 wantErr: true, 1505 }, 1506 { 1507 name: "failed to obtain coredns version", 1508 client: newMockClientForTest(t, 1, 2, "localhost:4711/coredns/coredns:v1.11.2-pre.1", "coredns", "Corefile"), 1509 wantConfigMap: nil, 1510 wantCorefile: "", 1511 wantVersion: "", 1512 wantErr: true, 1513 }, 1514 { 1515 name: "coredns information can be obtained normally", 1516 client: newMockClientForTest(t, 1, 1, "localhost:4711/coredns/coredns:v1.11.2-pre.1", "coredns", "Corefile"), 1517 wantConfigMap: &v1.ConfigMap{ 1518 TypeMeta: metav1.TypeMeta{ 1519 Kind: "ConfigMap", 1520 APIVersion: "v1", 1521 }, 1522 ObjectMeta: metav1.ObjectMeta{ 1523 Name: "coredns", 1524 Labels: map[string]string{ 1525 "k8s-app": "kube-dns", 1526 "kubernetes.io/name": "coredns", 1527 }, 1528 Namespace: "kube-system", 1529 }, 1530 Data: map[string]string{ 1531 "Corefile": dedent.Dedent(` 1532 .:53 { 1533 errors 1534 health { 1535 lameduck 5s 1536 } 1537 ready 1538 kubernetes cluster.local in-addr.arpa ip6.arpa { 1539 pods insecure 1540 fallthrough in-addr.arpa ip6.arpa 1541 } 1542 prometheus :9153 1543 forward . /etc/resolv.conf { 1544 prefer_udp 1545 max_concurrent 1000 1546 } 1547 cache 30 1548 1549 loop 1550 reload 1551 loadbalance 1552 } 1553 `), 1554 }, 1555 }, 1556 wantCorefile: dedent.Dedent(` 1557 .:53 { 1558 errors 1559 health { 1560 lameduck 5s 1561 } 1562 ready 1563 kubernetes cluster.local in-addr.arpa ip6.arpa { 1564 pods insecure 1565 fallthrough in-addr.arpa ip6.arpa 1566 } 1567 prometheus :9153 1568 forward . /etc/resolv.conf { 1569 prefer_udp 1570 max_concurrent 1000 1571 } 1572 cache 30 1573 1574 loop 1575 reload 1576 loadbalance 1577 } 1578 `), 1579 wantVersion: "v1.11.2-pre.1", 1580 wantErr: false, 1581 }, 1582 } 1583 for _, tt := range tests { 1584 t.Run(tt.name, func(t *testing.T) { 1585 got, got1, got2, err := GetCoreDNSInfo(tt.client) 1586 if (err != nil) != tt.wantErr { 1587 t.Errorf("GetCoreDNSInfo() error = %v, wantErr %v", err, tt.wantErr) 1588 return 1589 } 1590 if !reflect.DeepEqual(got, tt.wantConfigMap) { 1591 t.Errorf("GetCoreDNSInfo() got = %v, want %v", got, tt.wantConfigMap) 1592 } 1593 if got1 != tt.wantCorefile { 1594 t.Errorf("GetCoreDNSInfo() got1 = %v, want %v", got1, tt.wantCorefile) 1595 } 1596 if got2 != tt.wantVersion { 1597 t.Errorf("GetCoreDNSInfo() got2 = %v, want %v", got2, tt.wantVersion) 1598 } 1599 }) 1600 } 1601 } 1602 1603 func TestIsCoreDNSConfigMapMigrationRequired(t *testing.T) { 1604 tests := []struct { 1605 name string 1606 corefile string 1607 currentInstalledCoreDNSVersion string 1608 want bool 1609 wantErr bool 1610 }{ 1611 { 1612 name: "currentInstalledCoreDNSVersion is empty", 1613 corefile: "", 1614 currentInstalledCoreDNSVersion: "", 1615 want: false, 1616 wantErr: false, 1617 }, 1618 { 1619 name: "currentInstalledCoreDNSVersion is consistent with the standard version", 1620 corefile: "", 1621 currentInstalledCoreDNSVersion: kubeadmconstants.CoreDNSVersion, 1622 want: false, 1623 wantErr: false, 1624 }, 1625 { 1626 name: "Coredns Configmap needs to be migrated", 1627 corefile: "Corefile: fake", 1628 currentInstalledCoreDNSVersion: "v1.2.0", 1629 want: true, 1630 wantErr: false, 1631 }, 1632 { 1633 name: "currentInstalledCoreDNSVersion is not supported", 1634 corefile: "", 1635 currentInstalledCoreDNSVersion: "v0.11.1", 1636 want: false, 1637 wantErr: true, 1638 }, 1639 } 1640 for _, tt := range tests { 1641 t.Run(tt.name, func(t *testing.T) { 1642 got, err := isCoreDNSConfigMapMigrationRequired(tt.corefile, tt.currentInstalledCoreDNSVersion) 1643 if (err != nil) != tt.wantErr { 1644 t.Errorf("isCoreDNSConfigMapMigrationRequired() error = %v, wantErr %v", err, tt.wantErr) 1645 return 1646 } 1647 if got != tt.want { 1648 t.Errorf("isCoreDNSConfigMapMigrationRequired() = %v, want %v", got, tt.want) 1649 } 1650 }) 1651 } 1652 } 1653 1654 // replicas is replica of each DNS deployment 1655 // deploymentSize is the number of deployments with `k8s-app=kube-dns` label. 1656 func newMockClientForTest(t *testing.T, replicas int32, deploymentSize int, image string, configMap string, configData string) *clientsetfake.Clientset { 1657 if image == "" { 1658 image = "registry.k8s.io/coredns/coredns:v1.11.1" 1659 } 1660 client := clientsetfake.NewSimpleClientset() 1661 for i := 0; i < deploymentSize; i++ { 1662 _, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Create(context.TODO(), &apps.Deployment{ 1663 TypeMeta: metav1.TypeMeta{ 1664 Kind: "Deployment", 1665 APIVersion: "apps/v1", 1666 }, 1667 ObjectMeta: metav1.ObjectMeta{ 1668 Name: fmt.Sprintf("coredns-%d", i), 1669 Namespace: metav1.NamespaceSystem, 1670 Labels: map[string]string{ 1671 "k8s-app": "kube-dns", 1672 }, 1673 }, 1674 Spec: apps.DeploymentSpec{ 1675 Replicas: &replicas, 1676 Template: v1.PodTemplateSpec{ 1677 Spec: v1.PodSpec{ 1678 Containers: []v1.Container{{Image: image}}, 1679 }, 1680 }, 1681 }, 1682 }, metav1.CreateOptions{}) 1683 if err != nil { 1684 t.Fatalf("error creating deployment: %v", err) 1685 } 1686 } 1687 _, err := client.CoreV1().Services(metav1.NamespaceSystem).Create(context.TODO(), &v1.Service{ 1688 TypeMeta: metav1.TypeMeta{ 1689 Kind: "Service", 1690 APIVersion: "v1", 1691 }, 1692 ObjectMeta: metav1.ObjectMeta{Name: "coredns", 1693 Labels: map[string]string{"k8s-app": "kube-dns", 1694 "kubernetes.io/name": "coredns"}, 1695 Namespace: "kube-system", 1696 }, 1697 Spec: v1.ServiceSpec{ 1698 ClusterIP: "10.233.0.3", 1699 Ports: []v1.ServicePort{ 1700 { 1701 Name: "dns", 1702 Port: 53, 1703 Protocol: v1.ProtocolUDP, 1704 TargetPort: intstr.IntOrString{ 1705 Type: 0, 1706 IntVal: 53, 1707 }, 1708 }, 1709 }, 1710 Selector: map[string]string{ 1711 "k8s-app": "kube-dns", 1712 }, 1713 }, 1714 }, metav1.CreateOptions{}) 1715 if err != nil { 1716 t.Fatalf("error creating service: %v", err) 1717 } 1718 1719 if configMap != "" { 1720 if configMap == "" { 1721 configMap = "Corefile" 1722 } 1723 _, err = client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &v1.ConfigMap{ 1724 TypeMeta: metav1.TypeMeta{ 1725 Kind: "ConfigMap", 1726 APIVersion: "v1", 1727 }, 1728 ObjectMeta: metav1.ObjectMeta{ 1729 Name: configMap, 1730 Labels: map[string]string{ 1731 "k8s-app": "kube-dns", 1732 "kubernetes.io/name": "coredns", 1733 }, 1734 Namespace: "kube-system", 1735 }, 1736 Data: map[string]string{ 1737 configData: dedent.Dedent(` 1738 .:53 { 1739 errors 1740 health { 1741 lameduck 5s 1742 } 1743 ready 1744 kubernetes cluster.local in-addr.arpa ip6.arpa { 1745 pods insecure 1746 fallthrough in-addr.arpa ip6.arpa 1747 } 1748 prometheus :9153 1749 forward . /etc/resolv.conf { 1750 prefer_udp 1751 max_concurrent 1000 1752 } 1753 cache 30 1754 1755 loop 1756 reload 1757 loadbalance 1758 } 1759 `), 1760 }, 1761 }, metav1.CreateOptions{}) 1762 if err != nil { 1763 t.Fatalf("error creating service: %v", err) 1764 } 1765 } 1766 return client 1767 }