github.com/dmaizel/tests@v0.0.0-20210728163746-cae6a2d9cee8/integration/kubernetes/init.sh (about)

     1  #!/bin/bash
     2  #
     3  # Copyright (c) 2018 Intel Corporation
     4  #
     5  # SPDX-License-Identifier: Apache-2.0
     6  #
     7  
     8  set -o errexit
     9  set -o nounset
    10  set -o pipefail
    11  
    12  
    13  SCRIPT_PATH=$(dirname "$(readlink -f "$0")")
    14  source "${SCRIPT_PATH}/../../.ci/lib.sh"
    15  source "${SCRIPT_PATH}/../../lib/common.bash"
    16  source "/etc/os-release" || source "/usr/lib/os-release"
    17  
    18  RUNTIME=${RUNTIME:-containerd-shim-kata-v2}
    19  RUNTIME_PATH=${RUNTIME_PATH:-$(command -v $RUNTIME)}
    20  
    21  system_pod_wait_time=120
    22  sleep_time=5
    23  wait_pods_ready()
    24  {
    25  	# Master components provide the cluster’s control plane, including kube-apisever,
    26  	# etcd, kube-scheduler, kube-controller-manager, etc.
    27  	# We need to ensure their readiness before we run any container tests.
    28  	local pods_status="kubectl get pods --all-namespaces"
    29  	local apiserver_pod="kube-apiserver.*1/1.*Running"
    30  	local controller_pod="kube-controller-manager.*1/1.*Running"
    31  	local etcd_pod="etcd.*1/1.*Running"
    32  	local scheduler_pod="kube-scheduler.*1/1.*Running"
    33  	local dns_pod="coredns.*1/1.*Running"
    34  
    35  	local system_pod=($apiserver_pod $controller_pod $etcd_pod $scheduler_pod $dns_pod)
    36  	for pod_entry in "${system_pod[@]}"
    37  	do
    38  		waitForProcess "$system_pod_wait_time" "$sleep_time" "$pods_status | grep $pod_entry"
    39  	done
    40  }
    41  
    42  cri_runtime="${CRI_RUNTIME:-crio}"
    43  kubernetes_version=$(get_version "externals.kubernetes.version")
    44  
    45  # store iptables if CI running on bare-metal
    46  BAREMETAL="${BAREMETAL:-false}"
    47  iptables_cache="${KATA_TESTS_DATADIR}/iptables_cache"
    48  if [ "${BAREMETAL}" == true ]; then
    49  	[ -d "${KATA_TESTS_DATADIR}" ] || sudo mkdir -p "${KATA_TESTS_DATADIR}"
    50  	# cleanup iptables before save
    51  	iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
    52  	iptables-save > "$iptables_cache"
    53  fi
    54  
    55  case "${cri_runtime}" in
    56  containerd)
    57  	cri_runtime_socket="/run/containerd/containerd.sock"
    58  	cgroup_driver="cgroupfs"
    59  	;;
    60  crio)
    61  	cri_runtime_socket="/var/run/crio/crio.sock"
    62  	cgroup_driver="systemd"
    63  	;;
    64  *)
    65  	echo "Runtime ${cri_runtime} not supported"
    66  	;;
    67  esac
    68  
    69  # Check no there are no kata processes from previous tests.
    70  check_processes
    71  
    72  # Remove existing CNI configurations:
    73  cni_config_dir="/etc/cni"
    74  cni_interface="cni0"
    75  sudo rm -rf /var/lib/cni/networks/*
    76  sudo rm -rf "${cni_config_dir}"/*
    77  if ip a show "$cni_interface"; then
    78  	sudo ip link set dev "$cni_interface" down
    79  	sudo ip link del "$cni_interface"
    80  fi
    81  
    82  echo "Start ${cri_runtime} service"
    83  # stop containerd first and then restart it
    84  info "Stop containerd service"
    85  systemctl is-active --quiet containerd && sudo systemctl stop containerd
    86  sudo systemctl enable --now ${cri_runtime}
    87  max_cri_socket_check=5
    88  wait_time_cri_socket_check=5
    89  
    90  for i in $(seq ${max_cri_socket_check}); do
    91  	#when the test runs two times in the CI, the second time crio takes some time to be ready
    92  	sleep "${wait_time_cri_socket_check}"
    93  	if [ -e "${cri_runtime_socket}" ]; then
    94  		break
    95  	fi
    96  
    97  	echo "Waiting for cri socket ${cri_runtime_socket} (try ${i})"
    98  done
    99  
   100  sudo systemctl status "${cri_runtime}" --no-pager
   101  
   102  echo "Init cluster using ${cri_runtime_socket}"
   103  kubeadm_config_template="${SCRIPT_PATH}/kubeadm/config.yaml"
   104  kubeadm_config_file="$(mktemp --tmpdir kubeadm_config.XXXXXX.yaml)"
   105  
   106  sed -e "s|CRI_RUNTIME_SOCKET|${cri_runtime_socket}|" "${kubeadm_config_template}" > "${kubeadm_config_file}"
   107  sed -i "s|KUBERNETES_VERSION|v${kubernetes_version/-*}|" "${kubeadm_config_file}"
   108  sed -i "s|CGROUP_DRIVER|${cgroup_driver}|" "${kubeadm_config_file}"
   109  
   110  trap 'sudo -E sh -c "rm -r "${kubeadm_config_file}""' EXIT
   111  
   112  if [ "${CI}" == true ] && [[ $(wc -l /proc/swaps | awk '{print $1}') -gt 1 ]]; then
   113  	sudo swapoff -a || true
   114  fi
   115  
   116  #reinstall kubelet to do deep cleanup
   117  if [ "${BAREMETAL}" == true -a "$(command -v kubelet)" != "" ]; then
   118  	info "reinstall kubeadm, kubelet before initialize k8s"
   119  	bash -f "${SCRIPT_PATH}/../../.ci/install_kubernetes.sh"
   120  fi
   121  
   122  sudo -E kubeadm init --config "${kubeadm_config_file}"
   123  
   124  mkdir -p "$HOME/.kube"
   125  sudo cp "/etc/kubernetes/admin.conf" "$HOME/.kube/config"
   126  sudo chown $(id -u):$(id -g) "$HOME/.kube/config"
   127  export KUBECONFIG="$HOME/.kube/config"
   128  
   129  # enable debug log for kubelet
   130  sudo sed -i 's/.$/ --v=4"/' /var/lib/kubelet/kubeadm-flags.env
   131  echo "Kubelet options:"
   132  sudo cat /var/lib/kubelet/kubeadm-flags.env
   133  sudo systemctl daemon-reload && sudo systemctl restart kubelet
   134  
   135  kubectl get nodes
   136  kubectl get pods
   137  
   138  # default network plugin should be flannel, and its config file is taken from k8s 1.12 documentation
   139  flannel_version="$(get_test_version "externals.flannel.version")"
   140  flannel_url="https://raw.githubusercontent.com/coreos/flannel/${flannel_version}/Documentation/kube-flannel.yml"
   141  
   142  arch=$("${SCRIPT_PATH}/../../.ci/kata-arch.sh")
   143  #Load arch-specific configure file
   144  if [ -f "${SCRIPT_PATH}/../../.ci/${arch}/kubernetes/init.sh" ]; then
   145          source "${SCRIPT_PATH}/../../.ci/${arch}/kubernetes/init.sh"
   146  fi
   147  
   148  network_plugin_config=${network_plugin_config:-$flannel_url}
   149  
   150  kubectl apply -f "$network_plugin_config"
   151  
   152  # we need to ensure a few specific pods ready and running
   153  wait_pods_ready
   154  
   155  runtimeclass_files_path="${SCRIPT_PATH}/runtimeclass_workloads"
   156  echo "Create kata RuntimeClass resource"
   157  kubectl create -f "${runtimeclass_files_path}/kata-runtimeclass.yaml"
   158  
   159  # Enable the master node to be able to schedule pods.
   160  kubectl taint nodes "$(hostname)" node-role.kubernetes.io/master:NoSchedule-