sigs.k8s.io/cluster-api-provider-azure@v1.14.3/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml (about) 1 apiVersion: cluster.x-k8s.io/v1beta1 2 kind: Cluster 3 metadata: 4 labels: 5 cloud-provider: ${CLOUD_PROVIDER_AZURE_LABEL:=azure} 6 cni: calico 7 name: ${CLUSTER_NAME} 8 namespace: default 9 spec: 10 clusterNetwork: 11 pods: 12 cidrBlocks: 13 - 192.168.0.0/16 14 controlPlaneRef: 15 apiVersion: controlplane.cluster.x-k8s.io/v1beta1 16 kind: KubeadmControlPlane 17 name: ${CLUSTER_NAME}-control-plane 18 infrastructureRef: 19 apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 20 kind: AzureCluster 21 name: ${CLUSTER_NAME} 22 --- 23 apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 24 kind: AzureCluster 25 metadata: 26 name: ${CLUSTER_NAME} 27 namespace: default 28 spec: 29 additionalTags: 30 buildProvenance: ${BUILD_PROVENANCE} 31 creationTimestamp: ${TIMESTAMP} 32 jobName: ${JOB_NAME} 33 identityRef: 34 apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 35 kind: AzureClusterIdentity 36 name: ${CLUSTER_IDENTITY_NAME} 37 location: ${AZURE_LOCATION_GPU} 38 networkSpec: 39 subnets: 40 - name: control-plane-subnet 41 role: control-plane 42 - name: node-subnet 43 role: node 44 vnet: 45 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} 46 resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} 47 subscriptionID: ${AZURE_SUBSCRIPTION_ID} 48 --- 49 apiVersion: controlplane.cluster.x-k8s.io/v1beta1 50 kind: KubeadmControlPlane 51 metadata: 52 name: ${CLUSTER_NAME}-control-plane 53 namespace: default 54 spec: 55 kubeadmConfigSpec: 56 clusterConfiguration: 57 apiServer: 58 extraArgs: 59 cloud-provider: external 60 timeoutForControlPlane: 20m 61 controllerManager: 62 extraArgs: 63 allocate-node-cidrs: "false" 64 cloud-provider: external 65 cluster-name: ${CLUSTER_NAME} 66 v: "4" 67 etcd: 68 local: 69 dataDir: /var/lib/etcddisk/etcd 70 extraArgs: 71 quota-backend-bytes: "8589934592" 72 diskSetup: 73 filesystems: 74 - device: /dev/disk/azure/scsi1/lun0 75 extraOpts: 76 - -E 77 - lazy_itable_init=1,lazy_journal_init=1 78 filesystem: ext4 79 label: etcd_disk 80 - device: ephemeral0.1 81 filesystem: ext4 82 label: ephemeral0 83 replaceFS: ntfs 84 partitions: 85 - device: /dev/disk/azure/scsi1/lun0 86 layout: true 87 overwrite: false 88 tableType: gpt 89 files: 90 - contentFrom: 91 secret: 92 key: control-plane-azure.json 93 name: ${CLUSTER_NAME}-control-plane-azure-json 94 owner: root:root 95 path: /etc/kubernetes/azure.json 96 permissions: "0644" 97 initConfiguration: 98 nodeRegistration: 99 kubeletExtraArgs: 100 cloud-provider: external 101 name: '{{ ds.meta_data["local_hostname"] }}' 102 joinConfiguration: 103 nodeRegistration: 104 kubeletExtraArgs: 105 cloud-provider: external 106 name: '{{ ds.meta_data["local_hostname"] }}' 107 mounts: 108 - - LABEL=etcd_disk 109 - /var/lib/etcddisk 110 postKubeadmCommands: [] 111 preKubeadmCommands: [] 112 machineTemplate: 113 infrastructureRef: 114 apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 115 kind: AzureMachineTemplate 116 name: ${CLUSTER_NAME}-control-plane 117 replicas: ${CONTROL_PLANE_MACHINE_COUNT:=1} 118 version: ${KUBERNETES_VERSION} 119 --- 120 apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 121 kind: AzureMachineTemplate 122 metadata: 123 name: ${CLUSTER_NAME}-control-plane 124 namespace: default 125 spec: 126 template: 127 spec: 128 dataDisks: 129 - diskSizeGB: 256 130 lun: 0 131 nameSuffix: etcddisk 132 osDisk: 133 diskSizeGB: 128 134 osType: Linux 135 sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} 136 vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE} 137 --- 138 apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 139 kind: AzureClusterIdentity 140 metadata: 141 labels: 142 clusterctl.cluster.x-k8s.io/move-hierarchy: "true" 143 name: ${CLUSTER_IDENTITY_NAME} 144 namespace: default 145 spec: 146 allowedNamespaces: {} 147 clientID: ${AZURE_CLIENT_ID} 148 clientSecret: 149 name: ${AZURE_CLUSTER_IDENTITY_SECRET_NAME} 150 namespace: ${AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE} 151 tenantID: ${AZURE_TENANT_ID} 152 type: ServicePrincipal 153 --- 154 apiVersion: cluster.x-k8s.io/v1beta1 155 kind: MachineDeployment 156 metadata: 157 name: ${CLUSTER_NAME}-md-0 158 namespace: default 159 spec: 160 clusterName: ${CLUSTER_NAME} 161 replicas: ${WORKER_MACHINE_COUNT:=2} 162 selector: 163 matchLabels: null 164 template: 165 spec: 166 bootstrap: 167 configRef: 168 apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 169 kind: KubeadmConfigTemplate 170 name: ${CLUSTER_NAME}-md-0 171 clusterName: ${CLUSTER_NAME} 172 infrastructureRef: 173 apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 174 kind: AzureMachineTemplate 175 name: ${CLUSTER_NAME}-md-0 176 version: ${KUBERNETES_VERSION} 177 --- 178 apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 179 kind: AzureMachineTemplate 180 metadata: 181 name: ${CLUSTER_NAME}-md-0 182 namespace: default 183 spec: 184 template: 185 spec: 186 osDisk: 187 diskSizeGB: 128 188 managedDisk: 189 storageAccountType: ${AZURE_GPU_NODE_STORAGE_TYPE:=Premium_LRS} 190 osType: Linux 191 sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} 192 vmSize: ${AZURE_GPU_NODE_MACHINE_TYPE:=Standard_NV12s_v3} 193 --- 194 apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 195 kind: KubeadmConfigTemplate 196 metadata: 197 name: ${CLUSTER_NAME}-md-0 198 namespace: default 199 spec: 200 template: 201 spec: 202 files: 203 - contentFrom: 204 secret: 205 key: worker-node-azure.json 206 name: ${CLUSTER_NAME}-md-0-azure-json 207 owner: root:root 208 path: /etc/kubernetes/azure.json 209 permissions: "0644" 210 joinConfiguration: 211 nodeRegistration: 212 kubeletExtraArgs: 213 cloud-provider: external 214 name: '{{ ds.meta_data["local_hostname"] }}' 215 --- 216 apiVersion: addons.cluster.x-k8s.io/v1alpha1 217 kind: HelmChartProxy 218 metadata: 219 name: calico 220 namespace: default 221 spec: 222 chartName: tigera-operator 223 clusterSelector: 224 matchLabels: 225 cni: calico 226 namespace: tigera-operator 227 releaseName: projectcalico 228 repoURL: https://docs.tigera.io/calico/charts 229 valuesTemplate: |- 230 installation: 231 cni: 232 type: Calico 233 calicoNetwork: 234 bgp: Disabled 235 mtu: 1350 236 ipPools: 237 ipPools:{{range $i, $cidr := .Cluster.spec.clusterNetwork.pods.cidrBlocks }} 238 - cidr: {{ $cidr }} 239 encapsulation: VXLAN{{end}} 240 registry: mcr.microsoft.com/oss 241 # Image and registry configuration for the tigera/operator pod. 242 tigeraOperator: 243 image: tigera/operator 244 registry: mcr.microsoft.com/oss 245 calicoctl: 246 image: mcr.microsoft.com/oss/calico/ctl 247 version: ${CALICO_VERSION} 248 --- 249 apiVersion: addons.cluster.x-k8s.io/v1alpha1 250 kind: HelmChartProxy 251 metadata: 252 name: azuredisk-csi-driver-chart 253 namespace: default 254 spec: 255 chartName: azuredisk-csi-driver 256 clusterSelector: 257 matchLabels: 258 azuredisk-csi: "true" 259 namespace: kube-system 260 releaseName: azuredisk-csi-driver-oot 261 repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts 262 valuesTemplate: |- 263 controller: 264 replicas: 1 265 runOnControlPlane: true 266 windows: 267 useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} 268 --- 269 apiVersion: addons.cluster.x-k8s.io/v1alpha1 270 kind: HelmChartProxy 271 metadata: 272 name: cloud-provider-azure-chart 273 namespace: default 274 spec: 275 chartName: cloud-provider-azure 276 clusterSelector: 277 matchLabels: 278 cloud-provider: azure 279 releaseName: cloud-provider-azure-oot 280 repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo 281 valuesTemplate: | 282 infra: 283 clusterName: {{ .Cluster.metadata.name }} 284 cloudControllerManager: 285 clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} 286 logVerbosity: 4 287 --- 288 apiVersion: addons.cluster.x-k8s.io/v1alpha1 289 kind: HelmChartProxy 290 metadata: 291 name: cloud-provider-azure-chart-ci 292 namespace: default 293 spec: 294 chartName: cloud-provider-azure 295 clusterSelector: 296 matchLabels: 297 cloud-provider: azure-ci 298 releaseName: cloud-provider-azure-oot 299 repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo 300 valuesTemplate: | 301 infra: 302 clusterName: {{ .Cluster.metadata.name }} 303 cloudControllerManager: 304 cloudConfig: ${CLOUD_CONFIG:-"/etc/kubernetes/azure.json"} 305 cloudConfigSecretName: ${CONFIG_SECRET_NAME:-""} 306 clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} 307 imageName: "${CCM_IMAGE_NAME:-""}" 308 imageRepository: "${IMAGE_REGISTRY:-""}" 309 imageTag: "${IMAGE_TAG_CCM:-""}" 310 logVerbosity: ${CCM_LOG_VERBOSITY:-4} 311 replicas: ${CCM_COUNT:-1} 312 enableDynamicReloading: ${ENABLE_DYNAMIC_RELOADING:-false} 313 cloudNodeManager: 314 imageName: "${CNM_IMAGE_NAME:-""}" 315 imageRepository: "${IMAGE_REGISTRY:-""}" 316 imageTag: "${IMAGE_TAG_CNM:-""}"