github.com/cilium/cilium@v1.16.2/test/provision/k8s_install.sh (about) 1 #!/usr/bin/env bash 2 3 set -e 4 5 if ! [[ -z $DOCKER_LOGIN && -z $DOCKER_PASSWORD ]]; then 6 echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_LOGIN}" --password-stdin 7 fi 8 9 HOST=$(hostname) 10 export HELM_VERSION="3.7.0" 11 export TOKEN="258062.5d84c017c9b2796c" 12 export CILIUM_CONFIG_DIR="/opt/cilium" 13 export PROVISIONSRC="/tmp/provision/" 14 export SRC_FOLDER="/home/vagrant/go/src/github.com/cilium/cilium" 15 export SYSTEMD_SERVICES="$SRC_FOLDER/contrib/systemd" 16 17 NODE=$1 18 IP=$2 19 K8S_VERSION=$3 20 IPv6=$4 21 CONTAINER_RUNTIME=$5 22 23 # Kubeadm default parameters 24 export KUBEADM_ADDR='192.168.56.11' 25 export KUBEADM_POD_CIDR='10.10.0.0/16' 26 export KUBEADM_V1BETA2_POD_CIDR='10.10.0.0/16,fd02::/112' 27 export KUBEADM_SVC_CIDR='10.96.0.0/12' 28 export KUBEADM_V1BETA2_SVC_CIDR='10.96.0.0/12,fd03::/112' 29 export IPV6_DUAL_STACK_FEATURE_GATE='true' 30 export KUBEADM_CRI_SOCKET="/var/run/dockershim.sock" 31 export KUBEADM_WORKER_OPTIONS="" 32 export KUBEADM_OPTIONS="" 33 export K8S_FULL_VERSION="" 34 export CONTROLLER_FEATURE_GATES="" 35 export API_SERVER_FEATURE_GATES="" 36 export DNS_DEPLOYMENT="${PROVISIONSRC}/manifest/dns_deployment.yaml" 37 export KUBEDNS_DEPLOYMENT="${PROVISIONSRC}/manifest/kubedns_deployment.yaml" 38 export COREDNS_DEPLOYMENT="${PROVISIONSRC}/manifest/${K8S_VERSION}/coredns_deployment.yaml" 39 if [ ! -f "${COREDNS_DEPLOYMENT}" ]; then 40 export COREDNS_DEPLOYMENT="${PROVISIONSRC}/manifest/coredns_deployment.yaml" 41 fi 42 43 source ${PROVISIONSRC}/helpers.bash 44 45 sudo bash -c "echo MaxSessions 200 >> /etc/ssh/sshd_config" 46 sudo systemctl restart ssh 47 48 if [[ ! $(helm version | grep ${HELM_VERSION}) ]]; then 49 HELM_TAR=helm-v${HELM_VERSION}-linux-amd64.tar.gz 50 retry_function "wget -nv https://get.helm.sh/$HELM_TAR" 51 tar xzvf $HELM_TAR 52 mv linux-amd64/helm /usr/local/bin/ 53 rm -rf linux-amd64 $HELM_TAR 54 fi 55 helm version 56 57 # Install serial ttyS0 server 58 cat <<EOF > /etc/systemd/system/serial-getty@ttyS0.service 59 [Service] 60 ExecStart= 61 ExecStart=/sbin/agetty --autologin root -8 --keep-baud 115200,38400,9600 ttyS0 \$TERM 62 EOF 63 64 systemctl daemon-reload 65 sudo service serial-getty@ttyS0 start 66 67 # TODO: Check if the k8s version is the same 68 if [[ -f "/etc/provision_finished" ]]; then 69 echo "Checking that kubelet exists in path" 70 which kubelet 71 echo "provision is finished, recompiling" 72 $PROVISIONSRC/compile.sh 73 exit 0 74 fi 75 76 sudo ln -sf $KUBEDNS_DEPLOYMENT $DNS_DEPLOYMENT 77 $PROVISIONSRC/dns.sh 78 79 cat <<EOF >> /etc/hosts 80 127.0.0.1 localhost 81 ::1 localhost ip6-localhost ip6-loopback 82 ff02::1 ip6-allnodes 83 ff02::2 ip6-allrouters 84 192.168.56.11 k8s1 85 192.168.56.12 k8s2 86 192.168.56.13 k8s3 87 192.168.56.14 k8s4 88 192.168.56.15 k8s5 89 192.168.56.16 k8s6 90 EOF 91 92 cat <<EOF > /etc/apt/sources.list.d/kubernetes.list 93 deb http://apt.kubernetes.io/ kubernetes-xenial main 94 EOF 95 96 sudo rm /var/lib/apt/lists/lock || true 97 retry_function "wget https://packages.cloud.google.com/apt/doc/apt-key.gpg" 98 apt-key add apt-key.gpg 99 100 case $K8S_VERSION in 101 "1.24" | "1.25" | "1.26" | "1.27") 102 KUBEADM_CRI_SOCKET="unix:///run/containerd/containerd.sock" 103 ;; 104 esac 105 106 KUBEADM_CONFIG_ALPHA1=$(cat <<-EOF 107 apiVersion: kubeadm.k8s.io/v1alpha1 108 kind: MasterConfiguration 109 api: 110 advertiseAddress: "{{ .KUBEADM_ADDR }}" 111 criSocket: "{{ .KUBEADM_CRI_SOCKET }}" 112 kubernetesVersion: "v{{ .K8S_FULL_VERSION }}" 113 token: "{{ .TOKEN }}" 114 networking: 115 podSubnet: "{{ .KUBEADM_POD_CIDR }}" 116 controlPlaneEndpoint: "k8s1:6443" 117 EOF 118 ) 119 120 KUBEADM_CONFIG="${KUBEADM_CONFIG_ALPHA1}" 121 122 KUBEADM_CONFIG_ALPHA2=$(cat <<-EOF 123 apiVersion: kubeadm.k8s.io/v1alpha2 124 kind: MasterConfiguration 125 api: 126 advertiseAddress: {{ .KUBEADM_ADDR }} 127 bindPort: 6443 128 bootstrapTokens: 129 - groups: 130 - system:bootstrappers:kubeadm:default-node-token 131 token: "{{ .TOKEN }}" 132 kubernetesVersion: "v{{ .K8S_FULL_VERSION }}" 133 networking: 134 dnsDomain: cluster.local 135 podSubnet: "{{ .KUBEADM_POD_CIDR }}" 136 serviceSubnet: "{{ .KUBEADM_SVC_CIDR }}" 137 nodeRegistration: 138 criSocket: "{{ .KUBEADM_CRI_SOCKET }}" 139 controlPlaneEndpoint: "k8s1:6443" 140 EOF 141 ) 142 143 KUBEADM_CONFIG_ALPHA3=$(cat <<-EOF 144 apiVersion: kubeadm.k8s.io/v1beta1 145 kind: InitConfiguration 146 localAPIEndpoint: 147 advertiseAddress: "{{ .KUBEADM_ADDR }}" 148 bindPort: 6443 149 bootstrapTokens: 150 - groups: 151 - system:bootstrappers:kubeadm:default-node-token 152 token: {{ .TOKEN }} 153 ttl: 24h0m0s 154 usages: 155 - signing 156 - authentication 157 nodeRegistration: 158 criSocket: "{{ .KUBEADM_CRI_SOCKET }}" 159 --- 160 apiVersion: kubeadm.k8s.io/v1beta1 161 kind: ClusterConfiguration 162 kubernetesVersion: "v{{ .K8S_FULL_VERSION }}" 163 networking: 164 dnsDomain: cluster.local 165 podSubnet: "{{ .KUBEADM_POD_CIDR }}" 166 serviceSubnet: "{{ .KUBEADM_SVC_CIDR }}" 167 controlPlaneEndpoint: "k8s1:6443" 168 controllerManager: 169 extraArgs: 170 "feature-gates": "{{ .CONTROLLER_FEATURE_GATES }}" 171 apiServer: 172 extraArgs: 173 "feature-gates": "{{ .API_SERVER_FEATURE_GATES }}" 174 EOF 175 ) 176 177 # V1BETA2 configuration is enabled with DualStack feature gate by default. 178 # IPv6 only clusters can still be opted by setting IPv6 variable to 1. 179 KUBEADM_CONFIG_V1BETA2=$(cat <<-EOF 180 apiVersion: kubeadm.k8s.io/v1beta2 181 kind: InitConfiguration 182 localAPIEndpoint: 183 advertiseAddress: "{{ .KUBEADM_ADDR }}" 184 bindPort: 6443 185 bootstrapTokens: 186 - groups: 187 - system:bootstrappers:kubeadm:default-node-token 188 token: {{ .TOKEN }} 189 ttl: 24h0m0s 190 usages: 191 - signing 192 - authentication 193 nodeRegistration: 194 criSocket: "{{ .KUBEADM_CRI_SOCKET }}" 195 --- 196 apiVersion: kubeadm.k8s.io/v1beta2 197 kind: ClusterConfiguration 198 kubernetesVersion: "v{{ .K8S_FULL_VERSION }}" 199 featureGates: 200 IPv6DualStack: {{ .IPV6_DUAL_STACK_FEATURE_GATE }} 201 networking: 202 dnsDomain: cluster.local 203 podSubnet: "{{ .KUBEADM_V1BETA2_POD_CIDR }}" 204 serviceSubnet: "{{ .KUBEADM_V1BETA2_SVC_CIDR }}" 205 controlPlaneEndpoint: "k8s1:6443" 206 controllerManager: 207 extraArgs: 208 "node-cidr-mask-size-ipv6": "120" 209 "feature-gates": "{{ .CONTROLLER_FEATURE_GATES }},IPv6DualStack={{ .IPV6_DUAL_STACK_FEATURE_GATE }}" 210 apiServer: 211 extraArgs: 212 "feature-gates": "{{ .API_SERVER_FEATURE_GATES }},IPv6DualStack={{ .IPV6_DUAL_STACK_FEATURE_GATE }}" 213 EOF 214 ) 215 216 # V1BETA3 configuration is enabled with DualStack feature gate by default. 217 # IPv6 only clusters can still be opted by setting IPv6 variable to 1. 218 # It also sets the cgroup-driver to "cgroupfs", away from "systemd", 219 # so that docker does not have to be reconfigured and restarted. 220 KUBEADM_CONFIG_V1BETA3=$(cat <<-EOF 221 apiVersion: kubeadm.k8s.io/v1beta3 222 kind: InitConfiguration 223 localAPIEndpoint: 224 advertiseAddress: "{{ .KUBEADM_ADDR }}" 225 bindPort: 6443 226 bootstrapTokens: 227 - groups: 228 - system:bootstrappers:kubeadm:default-node-token 229 token: {{ .TOKEN }} 230 ttl: 24h0m0s 231 usages: 232 - signing 233 - authentication 234 nodeRegistration: 235 criSocket: "{{ .KUBEADM_CRI_SOCKET }}" 236 --- 237 apiVersion: kubeadm.k8s.io/v1beta2 238 kind: ClusterConfiguration 239 kubernetesVersion: "v{{ .K8S_FULL_VERSION }}" 240 featureGates: 241 IPv6DualStack: {{ .IPV6_DUAL_STACK_FEATURE_GATE }} 242 networking: 243 dnsDomain: cluster.local 244 podSubnet: "{{ .KUBEADM_V1BETA2_POD_CIDR }}" 245 serviceSubnet: "{{ .KUBEADM_V1BETA2_SVC_CIDR }}" 246 controlPlaneEndpoint: "k8s1:6443" 247 controllerManager: 248 extraArgs: 249 "node-cidr-mask-size-ipv6": "120" 250 "feature-gates": "{{ .CONTROLLER_FEATURE_GATES }},IPv6DualStack={{ .IPV6_DUAL_STACK_FEATURE_GATE }}" 251 apiServer: 252 extraArgs: 253 "feature-gates": "{{ .API_SERVER_FEATURE_GATES }},IPv6DualStack={{ .IPV6_DUAL_STACK_FEATURE_GATE }}" 254 --- 255 kind: KubeletConfiguration 256 apiVersion: kubelet.config.k8s.io/v1beta1 257 cgroupDriver: cgroupfs 258 --- 259 kind: JoinConfiguration 260 apiVersion: kubeadm.k8s.io/v1beta3 261 nodeRegistration: 262 criSocket: "{{ .KUBEADM_CRI_SOCKET }}" 263 ignorePreflightErrors: 264 - "cri" 265 - "SystemVerification" 266 - "swap" 267 discovery: 268 bootstrapToken: 269 token: {{ .TOKEN }} 270 apiServerEndpoint: "k8s1:6443" 271 unsafeSkipCAVerification: true 272 EOF 273 ) 274 275 # V1BETA4 configuration is enabled with DualStack feature gate by default. 276 # IPv6 only clusters can still be opted by setting IPv6 variable to 1. 277 # It also sets the cgroup-driver to "cgroupfs", away from "systemd", 278 # so that docker does not have to be reconfigured and restarted. 279 # This difffers from V1BETA4 because as it does not contain the featureGates field: 280 # - featureGates: Invalid value: map[string]bool{"IPv6DualStack":true}: IPv6DualStack is not a valid feature name. 281 KUBEADM_CONFIG_V1BETA4=$(cat <<-EOF 282 apiVersion: kubeadm.k8s.io/v1beta3 283 kind: InitConfiguration 284 localAPIEndpoint: 285 advertiseAddress: "{{ .KUBEADM_ADDR }}" 286 bindPort: 6443 287 bootstrapTokens: 288 - groups: 289 - system:bootstrappers:kubeadm:default-node-token 290 token: {{ .TOKEN }} 291 ttl: 24h0m0s 292 usages: 293 - signing 294 - authentication 295 nodeRegistration: 296 criSocket: "{{ .KUBEADM_CRI_SOCKET }}" 297 --- 298 apiVersion: kubeadm.k8s.io/v1beta2 299 kind: ClusterConfiguration 300 kubernetesVersion: "v{{ .K8S_FULL_VERSION }}" 301 networking: 302 dnsDomain: cluster.local 303 podSubnet: "{{ .KUBEADM_V1BETA2_POD_CIDR }}" 304 serviceSubnet: "{{ .KUBEADM_V1BETA2_SVC_CIDR }}" 305 controlPlaneEndpoint: "k8s1:6443" 306 controllerManager: 307 extraArgs: 308 "node-cidr-mask-size-ipv6": "120" 309 "feature-gates": "{{ .CONTROLLER_FEATURE_GATES }},IPv6DualStack={{ .IPV6_DUAL_STACK_FEATURE_GATE }}" 310 apiServer: 311 extraArgs: 312 "feature-gates": "{{ .API_SERVER_FEATURE_GATES }},IPv6DualStack={{ .IPV6_DUAL_STACK_FEATURE_GATE }}" 313 --- 314 kind: KubeletConfiguration 315 apiVersion: kubelet.config.k8s.io/v1beta1 316 cgroupDriver: cgroupfs 317 --- 318 kind: JoinConfiguration 319 apiVersion: kubeadm.k8s.io/v1beta3 320 nodeRegistration: 321 criSocket: "{{ .KUBEADM_CRI_SOCKET }}" 322 ignorePreflightErrors: 323 - "cri" 324 - "SystemVerification" 325 - "swap" 326 discovery: 327 bootstrapToken: 328 token: {{ .TOKEN }} 329 apiServerEndpoint: "k8s1:6443" 330 unsafeSkipCAVerification: true 331 EOF 332 ) 333 334 # V1BETA5 configuration is enabled with DualStack feature gate by default. 335 # IPv6 only clusters can still be opted by setting IPv6 variable to 1. 336 # It also sets the cgroup-driver to "cgroupfs", away from "systemd", 337 # so that docker does not have to be reconfigured and restarted. 338 # This difffers from V1BETA4 because as it does not contain the featureGates field: 339 # - featureGates: Invalid value: map[string]bool{"IPv6DualStack":true}: IPv6DualStack is not a valid feature name. 340 KUBEADM_CONFIG_V1BETA5=$(cat <<-EOF 341 apiVersion: kubeadm.k8s.io/v1beta3 342 kind: InitConfiguration 343 bootstrapTokens: 344 - groups: 345 - system:bootstrappers:kubeadm:default-node-token 346 token: {{ .TOKEN }} 347 ttl: 24h0m0s 348 usages: 349 - signing 350 - authentication 351 localAPIEndpoint: 352 advertiseAddress: "{{ .KUBEADM_ADDR }}" 353 bindPort: 6443 354 nodeRegistration: 355 criSocket: "{{ .KUBEADM_CRI_SOCKET }}" 356 --- 357 apiVersion: kubeadm.k8s.io/v1beta3 358 kind: ClusterConfiguration 359 kubernetesVersion: "v{{ .K8S_FULL_VERSION }}" 360 apiServer: 361 extraArgs: 362 "feature-gates": "{{ .API_SERVER_FEATURE_GATES }}" 363 timeoutForControlPlane: 4m0s 364 controlPlaneEndpoint: k8s1:6443 365 controllerManager: 366 extraArgs: 367 "node-cidr-mask-size-ipv6": "120" 368 "feature-gates": "{{ .CONTROLLER_FEATURE_GATES }}" 369 networking: 370 dnsDomain: cluster.local 371 podSubnet: "{{ .KUBEADM_V1BETA2_POD_CIDR }}" 372 serviceSubnet: "{{ .KUBEADM_V1BETA2_SVC_CIDR }}" 373 --- 374 kind: KubeletConfiguration 375 apiVersion: kubelet.config.k8s.io/v1beta1 376 cgroupDriver: cgroupfs 377 --- 378 apiVersion: kubeadm.k8s.io/v1beta3 379 kind: JoinConfiguration 380 discovery: 381 bootstrapToken: 382 token: {{ .TOKEN }} 383 apiServerEndpoint: "k8s1:6443" 384 unsafeSkipCAVerification: true 385 tlsBootstrapToken: {{ .TOKEN }} 386 nodeRegistration: 387 criSocket: "{{ .KUBEADM_CRI_SOCKET }}" 388 ignorePreflightErrors: 389 - cri 390 - SystemVerification 391 - swap 392 EOF 393 ) 394 395 # CRIO bridge disabled. 396 if [[ -f "/etc/cni/net.d/100-crio-bridge.conf" ]]; then 397 echo "Disabling crio CNI bridge" 398 sudo rm -rfv /etc/cni/net.d/100-crio-bridge.conf 399 sudo rm -rfv /etc/cni/net.d/200-loopback.conf || true 400 fi 401 402 # Around the `--ignore-preflight-errors=cri` is used because 403 # /var/run/dockershim.sock is not present (because base image has containerd) 404 # so with that option kubeadm fallback to /var/run/docker.sock 405 # 406 # SystemVerification errors are ignored as net-next VM often triggers them, eg: 407 # [ERROR SystemVerification]: unsupported kernel release: 5.0.0-rc6+ 408 case $K8S_VERSION in 409 "1.16") 410 KUBERNETES_CNI_VERSION="0.7.5" 411 K8S_FULL_VERSION="1.16.15" 412 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 413 KUBEADM_WORKER_OPTIONS="--token=$TOKEN --discovery-token-unsafe-skip-ca-verification --ignore-preflight-errors=cri,SystemVerification,swap" 414 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 415 KUBEADM_CONFIG="${KUBEADM_CONFIG_ALPHA3}" 416 ;; 417 "1.17") 418 KUBERNETES_CNI_VERSION="0.8.7" 419 K8S_FULL_VERSION="1.17.17" 420 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 421 KUBEADM_WORKER_OPTIONS="--token=$TOKEN --discovery-token-unsafe-skip-ca-verification --ignore-preflight-errors=cri,SystemVerification,swap" 422 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 423 KUBEADM_CONFIG="${KUBEADM_CONFIG_ALPHA3}" 424 ;; 425 "1.18") 426 # kubeadm 1.18 requires conntrack to be installed, we can remove this 427 # once we have upgrade the VM image version. 428 sudo apt-get install -y conntrack 429 KUBERNETES_CNI_VERSION="0.8.7" 430 KUBERNETES_CNI_OS="-linux" 431 K8S_FULL_VERSION="1.18.20" 432 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 433 KUBEADM_WORKER_OPTIONS="--token=$TOKEN --discovery-token-unsafe-skip-ca-verification --ignore-preflight-errors=cri,SystemVerification,swap" 434 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 435 KUBEADM_CONFIG="${KUBEADM_CONFIG_V1BETA2}" 436 CONTROLLER_FEATURE_GATES="EndpointSlice=true" 437 API_SERVER_FEATURE_GATES="EndpointSlice=true" 438 ;; 439 "1.19") 440 # kubeadm 1.19 requires conntrack to be installed, we can remove this 441 # once we have upgrade the VM image version. 442 sudo apt-get install -y conntrack 443 KUBERNETES_CNI_VERSION="0.8.7" 444 KUBERNETES_CNI_OS="-linux" 445 K8S_FULL_VERSION="1.19.16" 446 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 447 KUBEADM_WORKER_OPTIONS="--token=$TOKEN --discovery-token-unsafe-skip-ca-verification --ignore-preflight-errors=cri,SystemVerification,swap" 448 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 449 KUBEADM_CONFIG="${KUBEADM_CONFIG_V1BETA2}" 450 CONTROLLER_FEATURE_GATES="EndpointSlice=true" 451 API_SERVER_FEATURE_GATES="EndpointSlice=true" 452 ;; 453 "1.20") 454 # kubeadm 1.20 requires conntrack to be installed, we can remove this 455 # once we have upgrade the VM image version. 456 sudo apt-get install -y conntrack 457 KUBERNETES_CNI_VERSION="0.8.7" 458 KUBERNETES_CNI_OS="-linux" 459 K8S_FULL_VERSION="1.20.15" 460 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 461 KUBEADM_WORKER_OPTIONS="--token=$TOKEN --discovery-token-unsafe-skip-ca-verification --ignore-preflight-errors=cri,SystemVerification,swap" 462 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 463 KUBEADM_CONFIG="${KUBEADM_CONFIG_V1BETA2}" 464 CONTROLLER_FEATURE_GATES="EndpointSlice=true,EndpointSliceTerminatingCondition=true" 465 API_SERVER_FEATURE_GATES="EndpointSlice=true,EndpointSliceTerminatingCondition=true" 466 ;; 467 "1.21") 468 # kubeadm 1.21 requires conntrack to be installed, we can remove this 469 # once we have upgrade the VM image version. 470 sudo apt-get install -y conntrack 471 KUBERNETES_CNI_VERSION="0.8.7" 472 KUBERNETES_CNI_OS="-linux" 473 K8S_FULL_VERSION="1.21.14" 474 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 475 KUBEADM_WORKER_OPTIONS="--token=$TOKEN --discovery-token-unsafe-skip-ca-verification --ignore-preflight-errors=cri,SystemVerification,swap" 476 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 477 KUBEADM_CONFIG="${KUBEADM_CONFIG_V1BETA2}" 478 CONTROLLER_FEATURE_GATES="EndpointSlice=true,EndpointSliceTerminatingCondition=true" 479 API_SERVER_FEATURE_GATES="EndpointSlice=true,EndpointSliceTerminatingCondition=true" 480 ;; 481 "1.22") 482 # kubeadm 1.22 requires conntrack to be installed, we can remove this 483 # once we have upgrade the VM image version. 484 sudo apt-get install -y conntrack 485 KUBERNETES_CNI_VERSION="0.8.7" 486 KUBERNETES_CNI_OS="-linux" 487 K8S_FULL_VERSION="1.22.13" 488 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 489 KUBEADM_WORKER_OPTIONS="--token=$TOKEN --discovery-token-unsafe-skip-ca-verification --ignore-preflight-errors=cri,SystemVerification,swap" 490 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 491 KUBEADM_CONFIG="${KUBEADM_CONFIG_V1BETA3}" 492 CONTROLLER_FEATURE_GATES="EndpointSlice=true,EndpointSliceTerminatingCondition=true" 493 API_SERVER_FEATURE_GATES="EndpointSlice=true,EndpointSliceTerminatingCondition=true" 494 ;; 495 "1.23") 496 # kubeadm 1.23 requires conntrack to be installed, we can remove this 497 # once we have upgraded the VM image version. 498 sudo apt-get install -y conntrack 499 KUBERNETES_CNI_VERSION="0.8.7" 500 KUBERNETES_CNI_OS="-linux" 501 K8S_FULL_VERSION="1.23.10" 502 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 503 KUBEADM_WORKER_OPTIONS="--token=$TOKEN --discovery-token-unsafe-skip-ca-verification --ignore-preflight-errors=cri,SystemVerification,swap" 504 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 505 KUBEADM_CONFIG="${KUBEADM_CONFIG_V1BETA3}" 506 CONTROLLER_FEATURE_GATES="EndpointSlice=true,EndpointSliceTerminatingCondition=true" 507 API_SERVER_FEATURE_GATES="EndpointSlice=true,EndpointSliceTerminatingCondition=true" 508 ;; 509 "1.24") 510 # kubeadm 1.24 requires conntrack to be installed, we can remove this 511 # once we have upgraded the VM image version. 512 sudo apt-get install -y conntrack 513 KUBERNETES_CNI_VERSION="1.1.1" 514 KUBERNETES_CNI_OS="-linux" 515 K8S_FULL_VERSION="1.24.4" 516 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 517 KUBEADM_WORKER_OPTIONS="--config=/tmp/config.yaml" 518 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 519 KUBEADM_CONFIG="${KUBEADM_CONFIG_V1BETA4}" 520 CONTROLLER_FEATURE_GATES="EndpointSlice=true,EndpointSliceTerminatingCondition=true" 521 API_SERVER_FEATURE_GATES="EndpointSlice=true,EndpointSliceTerminatingCondition=true" 522 ;; 523 "1.25") 524 # kubeadm <= 1.24 requires conntrack to be installed, we can remove this 525 # once we have upgraded the VM image version. 526 sudo apt-get install -y conntrack 527 KUBERNETES_CNI_VERSION="1.1.1" 528 KUBERNETES_CNI_OS="-linux" 529 K8S_FULL_VERSION="1.25.0" 530 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 531 KUBEADM_WORKER_OPTIONS="--config=/tmp/config.yaml" 532 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 533 KUBEADM_CONFIG="${KUBEADM_CONFIG_V1BETA5}" 534 CONTROLLER_FEATURE_GATES="EndpointSliceTerminatingCondition=true" 535 API_SERVER_FEATURE_GATES="EndpointSliceTerminatingCondition=true" 536 ;; 537 "1.26") 538 # kubeadm >= 1.24 requires conntrack to be installed, we can remove this 539 # once we have upgraded the VM image version. 540 sudo apt-get install -y conntrack 541 KUBERNETES_CNI_VERSION="1.1.1" 542 KUBERNETES_CNI_OS="-linux" 543 K8S_FULL_VERSION="1.26.3" 544 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 545 KUBEADM_WORKER_OPTIONS="--config=/tmp/config.yaml" 546 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 547 KUBEADM_CONFIG="${KUBEADM_CONFIG_V1BETA5}" 548 CONTROLLER_FEATURE_GATES="EndpointSliceTerminatingCondition=true" 549 API_SERVER_FEATURE_GATES="EndpointSliceTerminatingCondition=true" 550 ;; 551 "1.27") 552 # kubeadm >= 1.24 requires conntrack to be installed, we can remove this 553 # once we have upgraded the VM image version. 554 sudo apt-get install -y conntrack 555 KUBERNETES_CNI_VERSION="1.1.1" 556 KUBERNETES_CNI_OS="-linux" 557 K8S_FULL_VERSION="1.27.1" 558 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 559 KUBEADM_WORKER_OPTIONS="--config=/tmp/config.yaml" 560 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 561 KUBEADM_CONFIG="${KUBEADM_CONFIG_V1BETA5}" 562 CONTROLLER_FEATURE_GATES="EndpointSliceTerminatingCondition=true" 563 API_SERVER_FEATURE_GATES="EndpointSliceTerminatingCondition=true" 564 ;; 565 "1.28") 566 # kubeadm >= 1.24 requires conntrack to be installed, we can remove this 567 # once we have upgraded the VM image version. 568 sudo apt-get install -y conntrack 569 KUBERNETES_CNI_VERSION="1.1.1" 570 KUBERNETES_CNI_OS="-linux" 571 K8S_FULL_VERSION="1.28.0-rc.0" 572 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 573 KUBEADM_WORKER_OPTIONS="--config=/tmp/config.yaml" 574 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 575 KUBEADM_CONFIG="${KUBEADM_CONFIG_V1BETA5}" 576 CONTROLLER_FEATURE_GATES="EndpointSliceTerminatingCondition=true" 577 API_SERVER_FEATURE_GATES="EndpointSliceTerminatingCondition=true" 578 ;; 579 "1.29") 580 # kubeadm >= 1.24 requires conntrack to be installed, we can remove this 581 # once we have upgraded the VM image version. 582 sudo apt-get install -y conntrack 583 KUBERNETES_CNI_VERSION="1.1.1" 584 KUBERNETES_CNI_OS="-linux" 585 K8S_FULL_VERSION="1.29.0" 586 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 587 KUBEADM_WORKER_OPTIONS="--config=/tmp/config.yaml" 588 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 589 KUBEADM_CONFIG="${KUBEADM_CONFIG_V1BETA5}" 590 CONTROLLER_FEATURE_GATES="EndpointSliceTerminatingCondition=true" 591 API_SERVER_FEATURE_GATES="EndpointSliceTerminatingCondition=true" 592 ;; 593 "1.30") 594 # kubeadm >= 1.24 requires conntrack to be installed, we can remove this 595 # once we have upgraded the VM image version. 596 sudo apt-get install -y conntrack 597 KUBERNETES_CNI_VERSION="1.1.1" 598 KUBERNETES_CNI_OS="-linux" 599 K8S_FULL_VERSION="1.30.0" 600 KUBEADM_OPTIONS="--ignore-preflight-errors=cri,swap" 601 KUBEADM_WORKER_OPTIONS="--config=/tmp/config.yaml" 602 sudo ln -sf $COREDNS_DEPLOYMENT $DNS_DEPLOYMENT 603 KUBEADM_CONFIG="${KUBEADM_CONFIG_V1BETA5}" 604 CONTROLLER_FEATURE_GATES="EndpointSliceTerminatingCondition=true" 605 API_SERVER_FEATURE_GATES="EndpointSliceTerminatingCondition=true" 606 ;; 607 esac 608 609 if [ "$KUBEPROXY" == "0" ]; then 610 KUBEADM_OPTIONS="$KUBEADM_OPTIONS --skip-phases=addon/kube-proxy" 611 fi 612 613 #Install kubernetes 614 set +e 615 install_k8s_using_packages \ 616 kubernetes-cni=${KUBERNETES_CNI_VERSION}* \ 617 kubelet=${K8S_FULL_VERSION}* \ 618 kubeadm=${K8S_FULL_VERSION}* \ 619 kubectl=${K8S_FULL_VERSION}* 620 if [ $? -ne 0 ]; then 621 echo "falling back on binary k8s install" 622 set -e 623 install_k8s_using_binary "v${K8S_FULL_VERSION}" "v${KUBERNETES_CNI_VERSION}" "${KUBERNETES_CNI_OS}" 624 fi 625 set -e 626 627 case $CONTAINER_RUNTIME in 628 "docker") 629 ;; 630 "containerd") 631 KUBEADM_CRI_SOCKET="unix:///run/containerd/containerd.sock" 632 ;; 633 *) 634 echo "Invalid container runtime '${CONTAINER_RUNTIME}'" 635 esac 636 637 if [ "${IPv6}" -eq "1" ]; then 638 KUBEADM_ADDR='[fd04::11]' 639 KUBEADM_POD_CIDR="fd02::/112" 640 KUBEADM_SVC_CIDR="fd03::/112" 641 KUBEADM_V1BETA2_POD_CIDR="fd02::/112" 642 KUBEADM_V1BETA2_SVC_CIDR="fd03::/112" 643 IPV6_DUAL_STACK_FEATURE_GATE='false' 644 fi 645 646 sudo mkdir -p ${CILIUM_CONFIG_DIR} 647 648 sudo mount bpffs /sys/fs/bpf -t bpf 649 sudo rm -rfv /var/lib/kubelet || true 650 651 if [[ "${PRELOAD_VM}" == "true" ]]; then 652 cd ${SRC_FOLDER} 653 ./test/provision/container-images.sh test_images test/k8s 654 ./test/provision/container-images.sh cilium_images . 655 echo "VM preloading is finished, skipping the rest" 656 exit 0 657 fi 658 659 echo KUBELET_EXTRA_ARGS=\"--fail-swap-on=false\" | tee -a /etc/default/kubelet 660 661 #check hostname to know if is kubernetes or runtime test 662 if [[ "${HOST}" == "k8s1" ]]; then 663 if [[ "${SKIP_K8S_PROVISION}" == "false" ]]; then 664 echo "${KUBEADM_CONFIG}" | envtpl > /tmp/config.yaml 665 666 # In case of failure, print the contents of all k8s containers 667 sudo kubeadm init --config /tmp/config.yaml $KUBEADM_OPTIONS || \ 668 (for containerID in $(sudo crictl --runtime-endpoint unix:///run/containerd/containerd.sock ps -a | grep kube | grep -Ev '(CONTAINER)|(pause)' | awk '{ print $1 }'); 669 do echo "${containerID}"; sudo crictl --runtime-endpoint unix:///run/containerd/containerd.sock logs "${containerID}" ; 670 done && exit 1) 671 672 mkdir -p /root/.kube 673 sudo sed -i "s/${KUBEADM_ADDR}/k8s1/" /etc/kubernetes/admin.conf 674 sudo cp -i /etc/kubernetes/admin.conf /root/.kube/config 675 sudo chown root:root /root/.kube/config 676 677 sudo -u vagrant mkdir -p /home/vagrant/.kube 678 sudo cp -fi /etc/kubernetes/admin.conf /home/vagrant/.kube/config 679 sudo chown vagrant:vagrant /home/vagrant/.kube/config 680 681 sudo cp -f /etc/kubernetes/admin.conf ${CILIUM_CONFIG_DIR}/kubeconfig 682 kubectl taint nodes --all node-role.kubernetes.io/master- || true 683 kubectl taint nodes --all node-role.kubernetes.io/control-plane- || true 684 else 685 echo "SKIPPING K8S INSTALLATION" 686 fi 687 sudo systemctl start etcd 688 689 # Install custom DNS deployment 690 kubectl -n kube-system delete -f ${PROVISIONSRC}/manifest/dns_deployment.yaml || true 691 kubectl -n kube-system apply -f ${PROVISIONSRC}/manifest/dns_deployment.yaml 692 693 $PROVISIONSRC/compile.sh 694 else 695 if [[ "${SKIP_K8S_PROVISION}" == "false" ]]; then 696 echo "${KUBEADM_CONFIG}" | envtpl > /tmp/config.yaml 697 sudo -E bash -c 'echo "${KUBEADM_ADDR} k8s1" >> /etc/hosts' 698 kubeadm join ${KUBEADM_ADDR}:6443 \ 699 ${KUBEADM_WORKER_OPTIONS} 700 else 701 echo "SKIPPING K8S INSTALLATION" 702 fi 703 sudo systemctl stop etcd 704 fi 705 706 # Add aliases and bash completion for kubectl 707 cat <<EOF >> /home/vagrant/.bashrc 708 709 # kubectl 710 source <(kubectl completion bash) 711 alias k='kubectl' 712 complete -F __start_kubectl k 713 alias ks='kubectl -n kube-system' 714 complete -F __start_kubectl ks 715 alias kslogs='kubectl -n kube-system logs -l k8s-app=cilium --tail=-1' 716 alias wk='watch -n2 kubectl get pods -o wide' 717 alias wks='watch -n2 kubectl -n kube-system get pods -o wide' 718 alias wka='watch -n2 kubectl get all --all-namespaces -o wide' 719 cilium_pod() { 720 kubectl -n kube-system get pods -l k8s-app=cilium \ 721 -o jsonpath="{.items[?(@.spec.nodeName == \"\$1\")].metadata.name}" 722 } 723 EOF 724 725 sudo touch /etc/provision_finished