github.phpd.cn/cilium/cilium@v1.6.12/tests/k8s/cluster/cluster-manager.bash (about)

     1  #!/usr/bin/env bash
     2  
     3  dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
     4  source "${dir}/../helpers.bash"
     5  # dir might have been overwritten by helpers.bash
     6  dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
     7  
     8  etcd_version="v3.1.0"
     9  k8s_version=${k8s_version:-"1.7.4-00"}
    10  docker_image_tag=${DOCKER_IMAGE_TAG:-"local_build"}
    11  
    12  certs_dir="${dir}/certs"
    13  k8s_dir="${dir}/k8s"
    14  cilium_dir="${dir}/cilium"
    15  cilium_original="${dir}/../../../examples/kubernetes/cilium.yaml"
    16  
    17  function get_options(){
    18      if [[ "${1}" == "ipv6" ]]; then
    19          cat <<'EOF' > "${dir}/env.bash"
    20  # IPv6
    21  controller_ip="fd01::b"
    22  controller_ip_brackets="[${controller_ip}]"
    23  local="::1"
    24  local_with_brackets="[${local}]"
    25  cluster_cidr="F00D::C0A8:0000:0:0/96"
    26  cluster_dns_ip="FD03::A"
    27  cluster_name="cilium-k8s-tests"
    28  node_cidr_mask_size="112"
    29  service_cluster_ip_range="FD03::/112"
    30  disable_ipv4=true
    31  EOF
    32      else
    33          cat <<'EOF' > "${dir}/env.bash"
    34  # IPv4
    35  controller_ip="192.168.36.11"
    36  controller_ip_brackets="${controller_ip}"
    37  local="127.0.0.1"
    38  local_with_brackets="${local}"
    39  cluster_cidr="10.20.0.0/10"
    40  cluster_dns_ip="172.20.0.10"
    41  cluster_name="cilium-k8s-tests"
    42  node_cidr_mask_size="16"
    43  service_cluster_ip_range="172.20.0.0/16"
    44  disable_ipv4=false
    45  EOF
    46      fi
    47  
    48  echo "k8s_version=${k8s_version}" >> "${dir}/env.bash"
    49  
    50      source "${dir}/env.bash"
    51  
    52      cat <<EOF > "${dir}/kubeadm-master.conf"
    53  apiVersion: kubeadm.k8s.io/v1alpha1
    54  kind: MasterConfiguration
    55  api:
    56    advertiseAddress: ${controller_ip_brackets}
    57  kubernetesVersion: "v${k8s_version::-3}"
    58  etcd:
    59    endpoints:
    60    - https://${controller_ip_brackets}:2379
    61    caFile: /etc/kubernetes/ca.pem
    62    certFile: /etc/kubernetes/kubernetes.pem
    63    keyFile: /etc/kubernetes/kubernetes-key.pem
    64  networking:
    65    dnsDomain: ${cluster_name}.local
    66    serviceSubnet: "${service_cluster_ip_range}"
    67  token: "123456.abcdefghijklmnop"
    68  controllerManagerExtraArgs:
    69    allocate-node-cidrs: "true"
    70    cluster-cidr: "${cluster_cidr}"
    71    node-cidr-mask-size: "${node_cidr_mask_size}"
    72  EOF
    73  }
    74  
    75  function generate_certs(){
    76      bash "${certs_dir}/generate-certs.sh"
    77  }
    78  
    79  function install_etcd(){
    80      wget -nv https://github.com/coreos/etcd/releases/download/${etcd_version}/etcd-${etcd_version}-linux-amd64.tar.gz
    81      tar -xf etcd-${etcd_version}-linux-amd64.tar.gz
    82      sudo mv etcd-${etcd_version}-linux-amd64/etcd* /usr/bin/
    83  }
    84  
    85  function copy_etcd_certs(){
    86      sudo mkdir -p /etc/etcd/
    87      sudo mkdir -p /etc/kubernetes/
    88  
    89      sudo cp "${certs_dir}/ca.pem" \
    90              "${certs_dir}/kubernetes-key.pem" \
    91              "${certs_dir}/kubernetes.pem" \
    92              /etc/etcd/
    93  
    94      # kubeadm doesn't automatically mount the files to the containers
    95      # yet so we need to copy the files to directory that we specify in
    96      # the kubeadm configuration file
    97      sudo cp "${certs_dir}/ca.pem" \
    98              "${certs_dir}/kubernetes-key.pem" \
    99              "${certs_dir}/kubernetes.pem" \
   100              /etc/kubernetes
   101  }
   102  
   103  function generate_etcd_config(){
   104      sudo mkdir -p /var/lib/etcd
   105  
   106      sudo tee /etc/systemd/system/etcd.service <<EOF
   107  [Unit]
   108  Description=etcd
   109  Documentation=https://github.com/coreos
   110  
   111  [Service]
   112  ExecStart=/usr/bin/etcd --name master \\
   113    --cert-file=/etc/etcd/kubernetes.pem \\
   114    --key-file=/etc/etcd/kubernetes-key.pem \\
   115    --peer-cert-file=/etc/etcd/kubernetes.pem \\
   116    --peer-key-file=/etc/etcd/kubernetes-key.pem \\
   117    --trusted-ca-file=/etc/etcd/ca.pem \\
   118    --peer-trusted-ca-file=/etc/etcd/ca.pem \\
   119    --peer-client-cert-auth \\
   120    --initial-advertise-peer-urls https://${controller_ip_brackets}:2380 \\
   121    --listen-peer-urls https://${controller_ip_brackets}:2380 \\
   122    --listen-client-urls https://${controller_ip_brackets}:2379,http://127.0.0.1:2379 \\
   123    --advertise-client-urls https://${controller_ip_brackets}:2379 \\
   124    --initial-cluster-token etcd-cluster-0 \\
   125    --initial-cluster master=https://${controller_ip_brackets}:2380 \\
   126    --initial-cluster-state new \\
   127    --data-dir=/var/lib/etcd
   128  Restart=on-failure
   129  RestartSec=5
   130  
   131  [Install]
   132  WantedBy=multi-user.target
   133  EOF
   134  }
   135  
   136  function start_kubeadm() {
   137      cd /home/vagrant/go/src/github.com/cilium/cilium/tests/k8s/cluster
   138  
   139      sudo bash -c "cat <<EOF > /etc/systemd/system/kubelet.service.d/15-kubelet-dns-args.conf
   140  [Service]
   141  Environment='KUBELET_DNS_ARGS=--cluster-dns=${cluster_dns_ip} --cluster-domain=${cluster_name}.local'
   142  EOF
   143  "
   144      sudo systemctl daemon-reload
   145  
   146      sudo mkdir -p /home/vagrant/.kube
   147      sudo mkdir -p /root/.kube
   148      sudo mkdir -p /var/lib/cilium/
   149  
   150      if [[ "$(hostname)" -eq "k8s-1" ]]; then
   151          sudo kubeadm init --config ./kubeadm-master.conf
   152  
   153          # copy kubeconfig for cilium and vagrant user
   154          sudo cp /etc/kubernetes/admin.conf /home/vagrant/.kube/config
   155          sudo cp /etc/kubernetes/admin.conf /var/lib/cilium/kubeconfig
   156          sudo chown 1000:1000 /home/vagrant/.kube/config
   157  
   158          # copy kubeconfig for root
   159          sudo cp /etc/kubernetes/admin.conf /root/.kube/config
   160          sudo chown vagrant.vagrant -R /home/vagrant/.kube
   161  
   162          # copy kubeconfig so we can share it with node-2
   163          sudo cp /etc/kubernetes/admin.conf ./kubelet.conf
   164      else
   165          sudo kubeadm join --token 123456.abcdefghijklmnop ${controller_ip_brackets}:6443
   166  
   167          # copy kubeconfig file previously copied from the master
   168          sudo cp ./kubelet.conf /home/vagrant/.kube/config
   169          sudo cp ./kubelet.conf /var/lib/cilium/kubeconfig
   170          sudo chown 1000:1000 /home/vagrant/.kube/config
   171  
   172          # copy kubeconfig for root
   173          sudo cp ./kubelet.conf /root/.kube/config
   174          sudo chown vagrant.vagrant -R /home/vagrant/.kube
   175  
   176          # taint all node with the label master so we can schedule pods all nodes
   177          kubectl taint nodes --all node-role.kubernetes.io/master-
   178      fi
   179  }
   180  
   181  function install_kubeadm_dependencies(){
   182      # This hack may be removed when the box images are based on Ubuntu 17.10+.
   183      curl -O -s http://old-releases.ubuntu.com/ubuntu/pool/universe/s/socat/socat_1.7.3.1-2_amd64.deb
   184      dpkg -i ./socat_1.7.3.1-2_amd64.deb
   185      sudo bash -c "cat <<EOF > /etc/apt/sources.list
   186  deb http://old-releases.ubuntu.com/ubuntu/ yakkety main restricted
   187  deb http://old-releases.ubuntu.com/ubuntu/ yakkety-updates main restricted
   188  deb http://old-releases.ubuntu.com/ubuntu/ yakkety universe
   189  deb http://old-releases.ubuntu.com/ubuntu/ yakkety-updates universe
   190  deb http://old-releases.ubuntu.com/ubuntu/ yakkety multiverse
   191  deb http://old-releases.ubuntu.com/ubuntu/ yakkety-updates multiverse
   192  deb http://old-releases.ubuntu.com/ubuntu/ yakkety-backports main restricted universe multiverse
   193  EOF
   194  "
   195      sudo touch /etc/apt/sources.list.d/kubernetes.list
   196      curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg  | sudo apt-key add -
   197      sudo bash -c "cat <<EOF > /etc/apt/sources.list.d/kubernetes.list
   198  deb http://apt.kubernetes.io/ kubernetes-xenial main
   199  EOF
   200  "
   201      sudo apt-get -qq update && sudo apt-get -qq install -y apt-transport-https docker-engine
   202      sudo usermod -aG docker vagrant
   203  }
   204  
   205  function install_kubeadm() {
   206      sudo apt-get -qq install --allow-downgrades -y kubelet=${k8s_version} kubeadm=${k8s_version} kubectl=${k8s_version} kubernetes-cni
   207  }
   208  
   209  function start_etcd(){
   210      sudo systemctl daemon-reload
   211      sudo systemctl enable etcd
   212      sudo systemctl start etcd
   213      sudo systemctl status etcd --no-pager
   214  }
   215  
   216  function clean_etcd(){
   217      sudo service etcd stop
   218      sudo rm -fr /var/lib/etcd
   219  }
   220  
   221  function clean_kubeadm(){
   222      sudo kubeadm reset
   223      sudo docker rm -f `sudo docker ps -aq` 2>/dev/null
   224  }
   225  
   226  function fresh_install(){
   227      while getopts ":-:" opt; do
   228        case $opt in
   229          "-")
   230            case "${OPTARG}" in
   231              "ipv6")
   232                ipv6="ipv6"
   233              ;;
   234            esac
   235          ;;
   236        esac
   237      done
   238  
   239      get_options "${ipv6}"
   240  
   241      if [[ "$(hostname)" -eq "k8s-1" ]]; then
   242          install_etcd
   243          copy_etcd_certs
   244          generate_etcd_config
   245          start_etcd
   246      fi
   247      install_kubeadm_dependencies
   248      install_kubeadm
   249      
   250      clean_kubeadm
   251      start_kubeadm
   252  }
   253  
   254  function reinstall(){
   255      while getopts ":-:" opt; do
   256        case $opt in
   257          "-")
   258            case "${OPTARG}" in
   259              "yes-delete-all-data")
   260                clean_etcd_opt=1
   261                clean_kubeadm_opt=1
   262              ;;
   263              "yes-delete-etcd-data")
   264                clean_etcd_opt=1
   265              ;;
   266              "yes-delete-kubeadm-data")
   267                clean_kubeadm_opt=1
   268              ;;
   269              "reinstall-kubeadm")
   270                clean_kubeadm_opt=1
   271                reinstall_kubeadm_opt=1
   272              ;;
   273              "ipv6")
   274                ipv6="ipv6"
   275              ;;
   276            esac
   277          ;;
   278        esac
   279      done
   280  
   281      get_options "${ipv6}"
   282  
   283      if [[ -n "${clean_etcd_opt}" ]]; then
   284          clean_etcd
   285      fi
   286      if [[ -n "${clean_kubeadm_opt}" ]]; then
   287          clean_kubeadm
   288      fi
   289      if [[ -n "${reinstall_kubeadm_opt}" ]]; then
   290          install_kubeadm
   291      fi
   292  
   293      if [[ "$(hostname)" -eq "k8s-1" ]]; then
   294          copy_etcd_certs
   295          generate_etcd_config
   296          start_etcd
   297      fi
   298      start_kubeadm
   299  }
   300  
   301  function deploy_cilium(){
   302      while getopts ":-:" opt; do
   303        case $opt in
   304          "-")
   305            case "${OPTARG}" in
   306              "lb-mode")
   307                lb=1
   308              ;;
   309            esac
   310          ;;
   311        esac
   312      done
   313      
   314      source "${dir}/env.bash"
   315  
   316      rm "${cilium_dir}/cilium-lb-ds.yaml" \
   317         "${cilium_dir}/cilium.yaml" \
   318          2>/dev/null
   319  
   320      if [[ -n "${lb}" ]]; then
   321          # In loadbalancer mode we set the snoop and LB interface to
   322          # enp0s8, the interface with IP 192.168.36.11.
   323          iface='enp0s8'
   324  
   325          # FIXME: do we still need LB tests?
   326  
   327          sed -e "s+\$disable_ipv4+${disable_ipv4}+g;\
   328                  s+\$iface+${iface}+g" \
   329              "${cilium_dir}/cilium-lb-ds.yaml.sed" > "${cilium_dir}/cilium-lb-ds.yaml"
   330  
   331          kubectl create -f "${cilium_dir}/cilium-lb-ds.yaml"
   332  
   333          wait_for_daemon_set_ready kube-system cilium 1
   334      else
   335          # We still need to make some small modifications to the original cilium
   336          sed -e "s+- http://127.0.0.1:2379+- https://${controller_ip_brackets}:2379+g;\
   337                  s+image: cilium/cilium:stable+image: cilium/cilium:${docker_image_tag}+g;\
   338                  s+imagePullPolicy: Always+imagePullPolicy: Never+g;\
   339                  s+debug: \"false\"+debug: \"true\"+g;\
   340                  s+#trusted-ca-file: '+trusted-ca-file: '+g;\
   341                  s+etcd-ca: \"\"+etcd-ca: \""$(base64 -w 0 "${certs_dir}/ca.pem")"\"+g" \
   342              "${cilium_original}" > "${cilium_dir}/cilium.yaml"
   343  
   344          kubectl create -f "${cilium_dir}/cilium.yaml"
   345  
   346          wait_for_daemon_set_ready kube-system cilium 2
   347      fi
   348  
   349      echo "lb='${lb}'" >> "${dir}/env.bash"
   350  }
   351  
   352  function remove_cilium_ds(){
   353      kubectl delete -f "${cilium_dir}" || true
   354  }
   355  
   356  case "$1" in
   357          generate_certs)
   358              generate_certs
   359              ;;
   360          fresh_install)
   361              shift
   362              fresh_install "$@"
   363              ;;
   364          reinstall)
   365              shift
   366              reinstall "$@"
   367              ;;
   368          deploy_cilium)
   369              shift
   370              deploy_cilium "$@"
   371              ;;
   372          remove_cilium_ds)
   373              shift
   374              remove_cilium_ds "$@"
   375              ;;
   376          *)
   377              echo $"Usage: $0 {generate_certs | fresh_install [--ipv6] | \
   378  reinstall [--yes-delete-all-data] [--yes-delete-etcd-data] [--yes-delete-kubeadm-data] \
   379  [--ipv6] [--reinstall-kubeadm] | \
   380  deploy_cilium [--lb-mode] | \
   381  remove_cilium_ds}"
   382              exit 1
   383  esac