sigs.k8s.io/cluster-api-provider-azure@v1.17.0/templates/test/dev/cluster-template-custom-builds-dra.yaml (about)

     1  apiVersion: cluster.x-k8s.io/v1beta1
     2  kind: Cluster
     3  metadata:
     4    labels:
     5      cloud-provider: ${CLOUD_PROVIDER_AZURE_LABEL:=azure}
     6      cni: calico
     7      cni-windows: ${CLUSTER_NAME}-calico
     8      containerd-logger: enabled
     9      csi-proxy: enabled
    10      metrics-server: enabled
    11    name: ${CLUSTER_NAME}
    12    namespace: default
    13  spec:
    14    clusterNetwork:
    15      pods:
    16        cidrBlocks:
    17        - 192.168.0.0/16
    18    controlPlaneRef:
    19      apiVersion: controlplane.cluster.x-k8s.io/v1beta1
    20      kind: KubeadmControlPlane
    21      name: ${CLUSTER_NAME}-control-plane
    22    infrastructureRef:
    23      apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
    24      kind: AzureCluster
    25      name: ${CLUSTER_NAME}
    26  ---
    27  apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
    28  kind: AzureCluster
    29  metadata:
    30    name: ${CLUSTER_NAME}
    31    namespace: default
    32  spec:
    33    additionalTags:
    34      buildProvenance: ${BUILD_PROVENANCE}
    35      creationTimestamp: ${TIMESTAMP}
    36      jobName: ${JOB_NAME}
    37    identityRef:
    38      apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
    39      kind: AzureClusterIdentity
    40      name: ${CLUSTER_IDENTITY_NAME}
    41    location: ${AZURE_LOCATION}
    42    networkSpec:
    43      subnets:
    44      - name: control-plane-subnet
    45        role: control-plane
    46      - name: node-subnet
    47        role: node
    48      vnet:
    49        name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet}
    50    resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}}
    51    subscriptionID: ${AZURE_SUBSCRIPTION_ID}
    52  ---
    53  apiVersion: controlplane.cluster.x-k8s.io/v1beta1
    54  kind: KubeadmControlPlane
    55  metadata:
    56    annotations:
    57      controlplane.cluster.x-k8s.io/skip-kube-proxy: "true"
    58    name: ${CLUSTER_NAME}-control-plane
    59    namespace: default
    60  spec:
    61    kubeadmConfigSpec:
    62      clusterConfiguration:
    63        apiServer:
    64          extraArgs:
    65            cloud-provider: external
    66            feature-gates: ${K8S_FEATURE_GATES:-"DynamicResourceAllocation=true"}
    67            runtime-config: resource.k8s.io/v1alpha3=true
    68          timeoutForControlPlane: 20m
    69        controllerManager:
    70          extraArgs:
    71            allocate-node-cidrs: "false"
    72            cloud-provider: external
    73            cluster-name: ${CLUSTER_NAME}
    74            feature-gates: HPAContainerMetrics=true,DynamicResourceAllocation=true
    75            v: "4"
    76        etcd:
    77          local:
    78            dataDir: /var/lib/etcddisk/etcd
    79            extraArgs:
    80              quota-backend-bytes: "8589934592"
    81        kubernetesVersion: ci/${CI_VERSION}
    82        scheduler:
    83          extraArgs:
    84            feature-gates: DynamicResourceAllocation=true
    85      diskSetup:
    86        filesystems:
    87        - device: /dev/disk/azure/scsi1/lun0
    88          extraOpts:
    89          - -E
    90          - lazy_itable_init=1,lazy_journal_init=1
    91          filesystem: ext4
    92          label: etcd_disk
    93        - device: ephemeral0.1
    94          filesystem: ext4
    95          label: ephemeral0
    96          replaceFS: ntfs
    97        partitions:
    98        - device: /dev/disk/azure/scsi1/lun0
    99          layout: true
   100          overwrite: false
   101          tableType: gpt
   102      files:
   103      - contentFrom:
   104          secret:
   105            key: control-plane-azure.json
   106            name: ${CLUSTER_NAME}-control-plane-azure-json
   107        owner: root:root
   108        path: /etc/kubernetes/azure.json
   109        permissions: "0644"
   110      - content: |
   111          #!/bin/bash
   112  
   113          set -o nounset
   114          set -o pipefail
   115          set -o errexit
   116          [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO=""
   117  
   118          echo "Use OOT credential provider"
   119          mkdir -p /var/lib/kubelet/credential-provider
   120          curl --retry 10 --retry-delay 5 -w "response status code is %{http_code}" -Lo /var/lib/kubelet/credential-provider/acr-credential-provider "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider"
   121          chmod 755 /var/lib/kubelet/credential-provider/acr-credential-provider
   122          curl --retry 10 --retry-delay 5 -w "response status code is %{http_code}" -Lo /var/lib/kubelet/credential-provider-config.yaml "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml"
   123          chmod 644 /var/lib/kubelet/credential-provider-config.yaml
   124        owner: root:root
   125        path: /tmp/oot-cred-provider.sh
   126        permissions: "0744"
   127      - content: |
   128          #!/bin/bash
   129  
   130          set -o nounset
   131          set -o pipefail
   132          set -o errexit
   133  
   134          systemctl stop kubelet
   135          declare -a BINARIES=("kubeadm" "kubectl" "kubelet")
   136          az login --identity
   137          for BINARY in "$${BINARIES[@]}"; do
   138            echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}"
   139            az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" -f "/usr/bin/$${BINARY}" --auth-mode login
   140          done
   141          systemctl restart kubelet
   142  
   143          # prepull images from gcr.io/k8s-staging-ci-images and retag it to
   144          # registry.k8s.io so kubeadm can fetch correct images no matter what
   145          declare -a IMAGES=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler")
   146          [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO=""
   147          IMAGE_REGISTRY_PREFIX=registry.k8s.io
   148          for IMAGE in "$${IMAGES[@]}"; do
   149            $${SUDO} ctr -n k8s.io images tag $$IMAGE_REGISTRY_PREFIX/$$IMAGE-amd64:"${CI_VERSION//+/_}" $$IMAGE_REGISTRY_PREFIX/$$IMAGE:"${CI_VERSION//+/_}"
   150            $${SUDO} ctr -n k8s.io images tag $$IMAGE_REGISTRY_PREFIX/$$IMAGE-amd64:"${CI_VERSION//+/_}" gcr.io/k8s-staging-ci-images/$$IMAGE:"${CI_VERSION//+/_}"
   151          done
   152  
   153          echo "kubeadm version: $(kubeadm version -o=short)"
   154          echo "kubectl version: $(kubectl version --client=true)"
   155          echo "kubelet version: $(kubelet --version)"
   156        owner: root:root
   157        path: /tmp/replace-k8s-binaries.sh
   158        permissions: "0744"
   159      - content: |
   160          #!/bin/bash
   161  
   162          set -o nounset
   163          set -o pipefail
   164          set -o errexit
   165  
   166          curl -L --retry 10 --retry-delay 5 https://github.com/mikefarah/yq/releases/download/v4.6.1/yq_linux_amd64.tar.gz --output /tmp/yq_linux_amd64.tar.gz
   167          tar -xzvf /tmp/yq_linux_amd64.tar.gz -C /tmp && mv /tmp/yq_linux_amd64 /usr/bin/yq
   168          rm /tmp/yq_linux_amd64.tar.gz
   169  
   170          export KUBECONFIG=/etc/kubernetes/admin.conf
   171          kubectl -n kube-system set image daemonset/kube-proxy kube-proxy="${REGISTRY}/kube-proxy:${KUBE_IMAGE_TAG}"
   172          systemctl stop kubelet
   173          yq e '.spec.containers[0].image = "${REGISTRY}/kube-apiserver:${KUBE_IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-apiserver.yaml
   174          yq e '.spec.containers[0].image = "${REGISTRY}/kube-controller-manager:${KUBE_IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-controller-manager.yaml
   175          yq e '.spec.containers[0].image = "${REGISTRY}/kube-scheduler:${KUBE_IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-scheduler.yaml
   176          systemctl restart kubelet
   177        owner: root:root
   178        path: /tmp/replace-k8s-components.sh
   179        permissions: "0744"
   180      - content: |
   181          #!/bin/bash
   182  
   183          echo "enabling containerd CDI plugin"
   184          sed -i '/\[plugins."io.containerd.grpc.v1.cri"\]/a\    enable_cdi = true' /etc/containerd/config.toml
   185          systemctl restart containerd
   186        owner: root:root
   187        path: /tmp/containerd-config.sh
   188        permissions: "0744"
   189      initConfiguration:
   190        nodeRegistration:
   191          kubeletExtraArgs:
   192            cloud-provider: external
   193            feature-gates: DynamicResourceAllocation=true
   194            image-credential-provider-bin-dir: /var/lib/kubelet/credential-provider
   195            image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml
   196          name: '{{ ds.meta_data["local_hostname"] }}'
   197      joinConfiguration:
   198        nodeRegistration:
   199          kubeletExtraArgs:
   200            cloud-provider: external
   201            feature-gates: DynamicResourceAllocation=true
   202            image-credential-provider-bin-dir: /var/lib/kubelet/credential-provider
   203            image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml
   204          name: '{{ ds.meta_data["local_hostname"] }}'
   205      mounts:
   206      - - LABEL=etcd_disk
   207        - /var/lib/etcddisk
   208      postKubeadmCommands:
   209      - bash -c /tmp/replace-k8s-components.sh
   210      preKubeadmCommands:
   211      - bash -c /tmp/containerd-config.sh
   212      - bash -c /tmp/oot-cred-provider.sh
   213      - bash -c /tmp/replace-k8s-binaries.sh
   214      verbosity: 5
   215    machineTemplate:
   216      infrastructureRef:
   217        apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
   218        kind: AzureMachineTemplate
   219        name: ${CLUSTER_NAME}-control-plane
   220    replicas: ${CONTROL_PLANE_MACHINE_COUNT:=1}
   221    version: ${KUBERNETES_VERSION}
   222  ---
   223  apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
   224  kind: AzureMachineTemplate
   225  metadata:
   226    name: ${CLUSTER_NAME}-control-plane
   227    namespace: default
   228  spec:
   229    template:
   230      spec:
   231        dataDisks:
   232        - diskSizeGB: 256
   233          lun: 0
   234          nameSuffix: etcddisk
   235        identity: UserAssigned
   236        image:
   237          marketplace:
   238            offer: capi
   239            publisher: cncf-upstream
   240            sku: ubuntu-2204-gen1
   241            version: latest
   242        osDisk:
   243          diskSizeGB: 128
   244          osType: Linux
   245        sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""}
   246        userAssignedIdentities:
   247        - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY}
   248        vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE}
   249  ---
   250  apiVersion: cluster.x-k8s.io/v1beta1
   251  kind: MachineDeployment
   252  metadata:
   253    name: ${CLUSTER_NAME}-md-0
   254    namespace: default
   255  spec:
   256    clusterName: ${CLUSTER_NAME}
   257    replicas: ${WORKER_MACHINE_COUNT:=2}
   258    selector: {}
   259    template:
   260      metadata:
   261        labels:
   262          nodepool: pool1
   263      spec:
   264        bootstrap:
   265          configRef:
   266            apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
   267            kind: KubeadmConfigTemplate
   268            name: ${CLUSTER_NAME}-md-0
   269        clusterName: ${CLUSTER_NAME}
   270        infrastructureRef:
   271          apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
   272          kind: AzureMachineTemplate
   273          name: ${CLUSTER_NAME}-md-0
   274        version: ${KUBERNETES_VERSION}
   275  ---
   276  apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
   277  kind: AzureMachineTemplate
   278  metadata:
   279    name: ${CLUSTER_NAME}-md-0
   280    namespace: default
   281  spec:
   282    template:
   283      spec:
   284        identity: UserAssigned
   285        image:
   286          marketplace:
   287            offer: capi
   288            publisher: cncf-upstream
   289            sku: ubuntu-2204-gen1
   290            version: latest
   291        osDisk:
   292          diskSizeGB: 128
   293          osType: Linux
   294        sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""}
   295        userAssignedIdentities:
   296        - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY}
   297        vmExtensions:
   298        - name: CustomScript
   299          protectedSettings:
   300            commandToExecute: |
   301              #!/bin/sh
   302              echo "This script is a no-op used for extension testing purposes ..."
   303              touch test_file
   304          publisher: Microsoft.Azure.Extensions
   305          version: "2.1"
   306        vmSize: ${AZURE_NODE_MACHINE_TYPE}
   307  ---
   308  apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
   309  kind: KubeadmConfigTemplate
   310  metadata:
   311    name: ${CLUSTER_NAME}-md-0
   312    namespace: default
   313  spec:
   314    template:
   315      spec:
   316        files:
   317        - contentFrom:
   318            secret:
   319              key: worker-node-azure.json
   320              name: ${CLUSTER_NAME}-md-0-azure-json
   321          owner: root:root
   322          path: /etc/kubernetes/azure.json
   323          permissions: "0644"
   324        - content: |
   325            #!/bin/bash
   326  
   327            set -o nounset
   328            set -o pipefail
   329            set -o errexit
   330            [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO=""
   331  
   332            echo "Use OOT credential provider"
   333            mkdir -p /var/lib/kubelet/credential-provider
   334            curl --retry 10 --retry-delay 5 -w "response status code is %{http_code}" -Lo /var/lib/kubelet/credential-provider/acr-credential-provider "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider"
   335            chmod 755 /var/lib/kubelet/credential-provider/acr-credential-provider
   336            curl --retry 10 --retry-delay 5 -w "response status code is %{http_code}" -Lo /var/lib/kubelet/credential-provider-config.yaml "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml"
   337            chmod 644 /var/lib/kubelet/credential-provider-config.yaml
   338          owner: root:root
   339          path: /tmp/oot-cred-provider.sh
   340          permissions: "0744"
   341        - content: |
   342            #!/bin/bash
   343  
   344            set -o nounset
   345            set -o pipefail
   346            set -o errexit
   347  
   348            systemctl stop kubelet
   349            declare -a BINARIES=("kubeadm" "kubectl" "kubelet")
   350            az login --identity
   351            for BINARY in "$${BINARIES[@]}"; do
   352              echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}"
   353              az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" -f "/usr/bin/$${BINARY}" --auth-mode login
   354            done
   355            systemctl restart kubelet
   356  
   357            echo "kubeadm version: $(kubeadm version -o=short)"
   358            echo "kubectl version: $(kubectl version --client=true)"
   359            echo "kubelet version: $(kubelet --version)"
   360          owner: root:root
   361          path: /tmp/replace-k8s-binaries.sh
   362          permissions: "0744"
   363        - content: |
   364            #!/bin/bash
   365  
   366            echo "enabling containerd CDI plugin"
   367            sed -i '/\[plugins."io.containerd.grpc.v1.cri"\]/a\    enable_cdi = true' /etc/containerd/config.toml
   368            systemctl restart containerd
   369          owner: root:root
   370          path: /tmp/containerd-config.sh
   371          permissions: "0744"
   372        joinConfiguration:
   373          nodeRegistration:
   374            kubeletExtraArgs:
   375              cloud-provider: external
   376              feature-gates: ${NODE_FEATURE_GATES:-"DynamicResourceAllocation=true"}
   377              image-credential-provider-bin-dir: /var/lib/kubelet/credential-provider
   378              image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml
   379            name: '{{ ds.meta_data["local_hostname"] }}'
   380        preKubeadmCommands:
   381        - bash -c /tmp/containerd-config.sh
   382        - bash -c /tmp/oot-cred-provider.sh
   383        - bash -c /tmp/replace-k8s-binaries.sh
   384  ---
   385  apiVersion: cluster.x-k8s.io/v1beta1
   386  kind: MachineHealthCheck
   387  metadata:
   388    name: ${CLUSTER_NAME}-mhc-0
   389    namespace: default
   390  spec:
   391    clusterName: ${CLUSTER_NAME}
   392    maxUnhealthy: 100%
   393    selector:
   394      matchLabels:
   395        nodepool: pool1
   396    unhealthyConditions:
   397    - status: "True"
   398      timeout: 30s
   399      type: E2ENodeUnhealthy
   400  ---
   401  apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
   402  kind: AzureClusterIdentity
   403  metadata:
   404    labels:
   405      clusterctl.cluster.x-k8s.io/move-hierarchy: "true"
   406    name: ${CLUSTER_IDENTITY_NAME}
   407    namespace: default
   408  spec:
   409    allowedNamespaces: {}
   410    clientID: ${AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY}
   411    tenantID: ${AZURE_TENANT_ID}
   412    type: ${CLUSTER_IDENTITY_TYPE:=WorkloadIdentity}
   413  ---
   414  apiVersion: addons.cluster.x-k8s.io/v1beta1
   415  kind: ClusterResourceSet
   416  metadata:
   417    name: csi-proxy
   418    namespace: default
   419  spec:
   420    clusterSelector:
   421      matchLabels:
   422        csi-proxy: enabled
   423    resources:
   424    - kind: ConfigMap
   425      name: csi-proxy-addon
   426    strategy: ApplyOnce
   427  ---
   428  apiVersion: addons.cluster.x-k8s.io/v1beta1
   429  kind: ClusterResourceSet
   430  metadata:
   431    name: containerd-logger-${CLUSTER_NAME}
   432    namespace: default
   433  spec:
   434    clusterSelector:
   435      matchLabels:
   436        containerd-logger: enabled
   437    resources:
   438    - kind: ConfigMap
   439      name: containerd-logger-${CLUSTER_NAME}
   440    strategy: ApplyOnce
   441  ---
   442  apiVersion: addons.cluster.x-k8s.io/v1alpha1
   443  kind: HelmChartProxy
   444  metadata:
   445    name: calico
   446    namespace: default
   447  spec:
   448    chartName: tigera-operator
   449    clusterSelector:
   450      matchLabels:
   451        cni: calico
   452    namespace: tigera-operator
   453    releaseName: projectcalico
   454    repoURL: https://docs.tigera.io/calico/charts
   455    valuesTemplate: |-
   456      installation:
   457        cni:
   458          type: Calico
   459        calicoNetwork:
   460          bgp: Disabled
   461          mtu: 1350
   462          ipPools:
   463          ipPools:{{range $i, $cidr := .Cluster.spec.clusterNetwork.pods.cidrBlocks }}
   464          - cidr: {{ $cidr }}
   465            encapsulation: VXLAN{{end}}
   466        registry: mcr.microsoft.com/oss
   467      # Image and registry configuration for the tigera/operator pod.
   468      tigeraOperator:
   469        image: tigera/operator
   470        registry: mcr.microsoft.com/oss
   471      calicoctl:
   472        image: mcr.microsoft.com/oss/calico/ctl
   473    version: ${CALICO_VERSION}
   474  ---
   475  apiVersion: addons.cluster.x-k8s.io/v1alpha1
   476  kind: HelmChartProxy
   477  metadata:
   478    name: azuredisk-csi-driver-chart
   479    namespace: default
   480  spec:
   481    chartName: azuredisk-csi-driver
   482    clusterSelector:
   483      matchLabels:
   484        azuredisk-csi: "true"
   485    namespace: kube-system
   486    releaseName: azuredisk-csi-driver-oot
   487    repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts
   488    valuesTemplate: |-
   489      controller:
   490        replicas: 1
   491        runOnControlPlane: true
   492      windows:
   493        useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }}
   494  ---
   495  apiVersion: addons.cluster.x-k8s.io/v1alpha1
   496  kind: HelmChartProxy
   497  metadata:
   498    name: cloud-provider-azure-chart
   499    namespace: default
   500  spec:
   501    chartName: cloud-provider-azure
   502    clusterSelector:
   503      matchLabels:
   504        cloud-provider: azure
   505    releaseName: cloud-provider-azure-oot
   506    repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo
   507    valuesTemplate: |
   508      infra:
   509        clusterName: {{ .Cluster.metadata.name }}
   510      cloudControllerManager:
   511        clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }}
   512        logVerbosity: 4
   513  ---
   514  apiVersion: addons.cluster.x-k8s.io/v1alpha1
   515  kind: HelmChartProxy
   516  metadata:
   517    name: cloud-provider-azure-chart-ci
   518    namespace: default
   519  spec:
   520    chartName: cloud-provider-azure
   521    clusterSelector:
   522      matchLabels:
   523        cloud-provider: azure-ci
   524    releaseName: cloud-provider-azure-oot
   525    repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo
   526    valuesTemplate: |
   527      infra:
   528        clusterName: {{ .Cluster.metadata.name }}
   529      cloudControllerManager:
   530        cloudConfig: ${CLOUD_CONFIG:-"/etc/kubernetes/azure.json"}
   531        cloudConfigSecretName: ${CONFIG_SECRET_NAME:-""}
   532        clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }}
   533        imageName: "${CCM_IMAGE_NAME:-""}"
   534        imageRepository: "${IMAGE_REGISTRY:-""}"
   535        imageTag: "${IMAGE_TAG_CCM:-""}"
   536        logVerbosity: ${CCM_LOG_VERBOSITY:-4}
   537        replicas: ${CCM_COUNT:-1}
   538        enableDynamicReloading: ${ENABLE_DYNAMIC_RELOADING:-false}
   539      cloudNodeManager:
   540        imageName: "${CNM_IMAGE_NAME:-""}"
   541        imageRepository: "${IMAGE_REGISTRY:-""}"
   542        imageTag: "${IMAGE_TAG_CNM:-""}"
   543  ---
   544  apiVersion: v1
   545  data:
   546    csi-proxy: |
   547      apiVersion: apps/v1
   548      kind: DaemonSet
   549      metadata:
   550        labels:
   551          k8s-app: csi-proxy
   552        name: csi-proxy
   553        namespace: kube-system
   554      spec:
   555        selector:
   556          matchLabels:
   557            k8s-app: csi-proxy
   558        template:
   559          metadata:
   560            labels:
   561              k8s-app: csi-proxy
   562          spec:
   563            nodeSelector:
   564              "kubernetes.io/os": windows
   565            securityContext:
   566              windowsOptions:
   567                hostProcess: true
   568                runAsUserName: "NT AUTHORITY\\SYSTEM"
   569            hostNetwork: true
   570            containers:
   571              - name: csi-proxy
   572                image: ghcr.io/kubernetes-sigs/sig-windows/csi-proxy:v1.0.2
   573  kind: ConfigMap
   574  metadata:
   575    annotations:
   576      note: generated
   577    labels:
   578      type: generated
   579    name: csi-proxy-addon
   580    namespace: default
   581  ---
   582  apiVersion: v1
   583  data:
   584    containerd-windows-logger: |
   585      apiVersion: apps/v1
   586      kind: DaemonSet
   587      metadata:
   588        labels:
   589          k8s-app: containerd-logger
   590        name: containerd-logger
   591        namespace: kube-system
   592      spec:
   593        selector:
   594          matchLabels:
   595            k8s-app: containerd-logger
   596        template:
   597          metadata:
   598            labels:
   599              k8s-app: containerd-logger
   600          spec:
   601            securityContext:
   602              windowsOptions:
   603                hostProcess: true
   604                runAsUserName: "NT AUTHORITY\\system"
   605            hostNetwork: true
   606            containers:
   607            - image: ghcr.io/kubernetes-sigs/sig-windows/eventflow-logger:v0.1.0
   608              args: [ "config.json" ]
   609              name: containerd-logger
   610              imagePullPolicy: Always
   611              volumeMounts:
   612              - name: containerd-logger-config
   613                mountPath: /config.json
   614                subPath: config.json
   615            nodeSelector:
   616              kubernetes.io/os: windows
   617            tolerations:
   618            - key: CriticalAddonsOnly
   619              operator: Exists
   620            - operator: Exists
   621            volumes:
   622            - configMap:
   623                name: containerd-logger-config
   624              name: containerd-logger-config
   625        updateStrategy:
   626          type: RollingUpdate
   627      ---
   628      kind: ConfigMap
   629      apiVersion: v1
   630      metadata:
   631        name: containerd-logger-config
   632        namespace: kube-system
   633      data:
   634        config.json: |
   635          {
   636            "inputs": [
   637              {
   638                "type": "ETW",
   639                "sessionNamePrefix": "containerd",
   640                "cleanupOldSessions": true,
   641                "reuseExistingSession": true,
   642                "providers": [
   643                  {
   644                    "providerName": "Microsoft.Virtualization.RunHCS",
   645                    "providerGuid": "0B52781F-B24D-5685-DDF6-69830ED40EC3",
   646                    "level": "Verbose"
   647                  },
   648                  {
   649                    "providerName": "ContainerD",
   650                    "providerGuid": "2acb92c0-eb9b-571a-69cf-8f3410f383ad",
   651                    "level": "Verbose"
   652                  }
   653                ]
   654              }
   655            ],
   656            "filters": [
   657              {
   658                  "type": "drop",
   659                  "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == Stats && hasnoproperty error"
   660              },
   661              {
   662                  "type": "drop",
   663                  "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == hcsshim::LayerID && hasnoproperty error"
   664              },
   665              {
   666                  "type": "drop",
   667                  "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == hcsshim::NameToGuid && hasnoproperty error"
   668              },
   669              {
   670                  "type": "drop",
   671                  "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == containerd.task.v2.Task.Stats && hasnoproperty error"
   672              },
   673              {
   674                  "type": "drop",
   675                  "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == containerd.task.v2.Task.State && hasnoproperty error"
   676              },
   677              {
   678                  "type": "drop",
   679                  "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == HcsGetProcessProperties && hasnoproperty error"
   680              },
   681              {
   682                  "type": "drop",
   683                  "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == HcsGetComputeSystemProperties && hasnoproperty error"
   684              }
   685            ],
   686            "outputs": [
   687              {
   688                "type": "StdOutput"
   689              }
   690            ],
   691            "schemaVersion": "2016-08-11"
   692          }
   693  kind: ConfigMap
   694  metadata:
   695    annotations:
   696      note: generated
   697    labels:
   698      type: generated
   699    name: containerd-logger-${CLUSTER_NAME}
   700    namespace: default
   701  ---
   702  apiVersion: addons.cluster.x-k8s.io/v1beta1
   703  kind: ClusterResourceSet
   704  metadata:
   705    name: metrics-server-${CLUSTER_NAME}
   706    namespace: default
   707  spec:
   708    clusterSelector:
   709      matchLabels:
   710        metrics-server: enabled
   711    resources:
   712    - kind: ConfigMap
   713      name: metrics-server-${CLUSTER_NAME}
   714    strategy: ApplyOnce
   715  ---
   716  apiVersion: v1
   717  data:
   718    metrics-server: |
   719      apiVersion: v1
   720      kind: ServiceAccount
   721      metadata:
   722        labels:
   723          k8s-app: metrics-server
   724        name: metrics-server
   725        namespace: kube-system
   726      ---
   727      apiVersion: rbac.authorization.k8s.io/v1
   728      kind: ClusterRole
   729      metadata:
   730        labels:
   731          k8s-app: metrics-server
   732          rbac.authorization.k8s.io/aggregate-to-admin: "true"
   733          rbac.authorization.k8s.io/aggregate-to-edit: "true"
   734          rbac.authorization.k8s.io/aggregate-to-view: "true"
   735        name: system:aggregated-metrics-reader
   736      rules:
   737      - apiGroups:
   738        - metrics.k8s.io
   739        resources:
   740        - pods
   741        - nodes
   742        verbs:
   743        - get
   744        - list
   745        - watch
   746      ---
   747      apiVersion: rbac.authorization.k8s.io/v1
   748      kind: ClusterRole
   749      metadata:
   750        labels:
   751          k8s-app: metrics-server
   752        name: system:metrics-server
   753      rules:
   754      - apiGroups:
   755        - ""
   756        resources:
   757        - nodes/metrics
   758        verbs:
   759        - get
   760      - apiGroups:
   761        - ""
   762        resources:
   763        - pods
   764        - nodes
   765        verbs:
   766        - get
   767        - list
   768        - watch
   769      ---
   770      apiVersion: rbac.authorization.k8s.io/v1
   771      kind: RoleBinding
   772      metadata:
   773        labels:
   774          k8s-app: metrics-server
   775        name: metrics-server-auth-reader
   776        namespace: kube-system
   777      roleRef:
   778        apiGroup: rbac.authorization.k8s.io
   779        kind: Role
   780        name: extension-apiserver-authentication-reader
   781      subjects:
   782      - kind: ServiceAccount
   783        name: metrics-server
   784        namespace: kube-system
   785      ---
   786      apiVersion: rbac.authorization.k8s.io/v1
   787      kind: ClusterRoleBinding
   788      metadata:
   789        labels:
   790          k8s-app: metrics-server
   791        name: metrics-server:system:auth-delegator
   792      roleRef:
   793        apiGroup: rbac.authorization.k8s.io
   794        kind: ClusterRole
   795        name: system:auth-delegator
   796      subjects:
   797      - kind: ServiceAccount
   798        name: metrics-server
   799        namespace: kube-system
   800      ---
   801      apiVersion: rbac.authorization.k8s.io/v1
   802      kind: ClusterRoleBinding
   803      metadata:
   804        labels:
   805          k8s-app: metrics-server
   806        name: system:metrics-server
   807      roleRef:
   808        apiGroup: rbac.authorization.k8s.io
   809        kind: ClusterRole
   810        name: system:metrics-server
   811      subjects:
   812      - kind: ServiceAccount
   813        name: metrics-server
   814        namespace: kube-system
   815      ---
   816      apiVersion: v1
   817      kind: Service
   818      metadata:
   819        labels:
   820          k8s-app: metrics-server
   821        name: metrics-server
   822        namespace: kube-system
   823      spec:
   824        ports:
   825        - name: https
   826          port: 443
   827          protocol: TCP
   828          targetPort: https
   829        selector:
   830          k8s-app: metrics-server
   831      ---
   832      apiVersion: apps/v1
   833      kind: Deployment
   834      metadata:
   835        labels:
   836          k8s-app: metrics-server
   837        name: metrics-server
   838        namespace: kube-system
   839      spec:
   840        selector:
   841          matchLabels:
   842            k8s-app: metrics-server
   843        strategy:
   844          rollingUpdate:
   845            maxUnavailable: 0
   846        template:
   847          metadata:
   848            labels:
   849              k8s-app: metrics-server
   850          spec:
   851            containers:
   852            - args:
   853              - --cert-dir=/tmp
   854              - --secure-port=4443
   855              - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
   856              - --kubelet-use-node-status-port
   857              - --metric-resolution=15s
   858              - --kubelet-insecure-tls
   859              image: registry.k8s.io/metrics-server/metrics-server:v0.6.3
   860              imagePullPolicy: IfNotPresent
   861              livenessProbe:
   862                failureThreshold: 3
   863                httpGet:
   864                  path: /livez
   865                  port: https
   866                  scheme: HTTPS
   867                periodSeconds: 10
   868              name: metrics-server
   869              ports:
   870              - containerPort: 4443
   871                name: https
   872                protocol: TCP
   873              readinessProbe:
   874                failureThreshold: 3
   875                httpGet:
   876                  path: /readyz
   877                  port: https
   878                  scheme: HTTPS
   879                initialDelaySeconds: 20
   880                periodSeconds: 10
   881              resources:
   882                requests:
   883                  cpu: 100m
   884                  memory: 200Mi
   885              securityContext:
   886                allowPrivilegeEscalation: false
   887                readOnlyRootFilesystem: true
   888                runAsNonRoot: true
   889                runAsUser: 1000
   890              volumeMounts:
   891              - mountPath: /tmp
   892                name: tmp-dir
   893            nodeSelector:
   894              kubernetes.io/os: linux
   895            priorityClassName: system-cluster-critical
   896            serviceAccountName: metrics-server
   897            tolerations:
   898            - effect: NoSchedule
   899              key: node-role.kubernetes.io/master
   900              operator: Exists
   901            - effect: NoSchedule
   902              key: node-role.kubernetes.io/control-plane
   903              operator: Exists
   904            volumes:
   905            - emptyDir: {}
   906              name: tmp-dir
   907      ---
   908      apiVersion: apiregistration.k8s.io/v1
   909      kind: APIService
   910      metadata:
   911        labels:
   912          k8s-app: metrics-server
   913        name: v1beta1.metrics.k8s.io
   914      spec:
   915        group: metrics.k8s.io
   916        groupPriorityMinimum: 100
   917        insecureSkipTLSVerify: true
   918        service:
   919          name: metrics-server
   920          namespace: kube-system
   921        version: v1beta1
   922        versionPriority: 100
   923  kind: ConfigMap
   924  metadata:
   925    annotations:
   926      note: generated
   927    labels:
   928      type: generated
   929    name: metrics-server-${CLUSTER_NAME}
   930    namespace: default