github.com/datadog/cilium@v1.6.12/contrib/vagrant/start.sh (about)

     1  #!/usr/bin/env bash
     2  
     3  dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
     4  
     5  # Master's IPv4 address. Workers' IPv4 address will have their IP incremented by
     6  # 1. The netmask used will be /24
     7  export 'MASTER_IPV4'=${MASTER_IPV4:-"192.168.33.11"}
     8  # NFS address is only set if NFS option is active. This will create a new
     9  # network interface for each VM with starting on this IP. This IP will be
    10  # available to reach from the host.
    11  export 'MASTER_IPV4_NFS'=${MASTER_IPV4_NFS:-"192.168.34.11"}
    12  # Enable IPv4 mode. It's enabled by default since it's required for several
    13  # runtime tests.
    14  export 'IPV4'=${IPV4:-1}
    15  # Exposed IPv6 node CIDR, only set if IPV4 is disabled. Each node will be setup
    16  # with a IPv6 network available from the host with $IPV6_PUBLIC_CIDR +
    17  # 6to4($MASTER_IPV4). For IPv4 "192.168.33.11" we will have for example:
    18  #   master  : FD00::B/16
    19  #   worker 1: FD00::C/16
    20  # The netmask used will be /16
    21  export 'IPV6_PUBLIC_CIDR'=${IPV4+"FD00::"}
    22  # Internal IPv6 node CIDR, always set up by default. Each node will be setup
    23  # with a IPv6 network available from the host with IPV6_INTERNAL_CIDR +
    24  # 6to4($MASTER_IPV4). For IPv4 "192.168.33.11" we will have for example:
    25  #   master  : FD01::B/16
    26  #   worker 1: FD01::C/16
    27  # The netmask used will be /16
    28  export 'IPV6_INTERNAL_CIDR'=${IPV4+"FD01::"}
    29  # Cilium IPv6 node CIDR. Each node will be setup with IPv6 network of
    30  # $CILIUM_IPV6_NODE_CIDR + 6to4($MASTER_IPV4). For IPv4 "192.168.33.11" we will
    31  # have for example:
    32  #   master  : FD02::0:0:0/96
    33  #   worker 1: FD02::1:0:0/96
    34  export 'CILIUM_IPV6_NODE_CIDR'=${CILIUM_IPV6_NODE_CIDR:-"FD02::"}
    35  # VM memory
    36  export 'VM_MEMORY'=${MEMORY:-4096}
    37  # Number of CPUs
    38  export 'VM_CPUS'=${CPUS:-2}
    39  # VM_BASENAME tag is only set if K8S option is active
    40  export 'VM_BASENAME'="runtime"
    41  export 'VM_BASENAME'=${K8S+"k8s"}
    42  # Set VAGRANT_DEFAULT_PROVIDER to virtualbox
    43  export 'VAGRANT_DEFAULT_PROVIDER'=${VAGRANT_DEFAULT_PROVIDER:-"virtualbox"}
    44  # Sets the default cilium TUNNEL_MODE to "vxlan"
    45  export 'TUNNEL_MODE_STRING'=${TUNNEL_MODE_STRING:-"-t vxlan"}
    46  # Replies Yes to all prompts asked in this script
    47  export 'YES_TO_ALL'=${YES_TO_ALL:-"0"}
    48  
    49  # Internal variables used in the Vagrantfile
    50  export 'CILIUM_SCRIPT'=true
    51  # Sets the directory where the temporary setup scripts are created
    52  export 'CILIUM_TEMP'="${dir}"
    53  
    54  # Sets VM's Command wget with HTTPS_PROXY
    55  export 'VM_PROXY'="${VM_SET_PROXY}"
    56  
    57  # Sets the RELOAD env variable with 1 if there is any VM printed by
    58  # vagrant status.
    59  function set_reload_if_vm_exists(){
    60      if [ -z "${RELOAD}" ]; then
    61          if [[ $(vagrant status 2>/dev/null | wc -l) -gt 1 && \
    62                  ! $(vagrant status 2>/dev/null | grep "not created") ]]; then
    63              RELOAD=1
    64          fi
    65      fi
    66  }
    67  
    68  # split_ipv4 splits an IPv4 address into a bash array and assigns it to ${1}.
    69  # Exits if ${2} is an invalid IPv4 address.
    70  function split_ipv4(){
    71      IFS='.' read -r -a ipv4_array <<< "${2}"
    72      eval "${1}=( ${ipv4_array[@]} )"
    73      if [[ "${#ipv4_array[@]}" -ne 4 ]]; then
    74          echo "Invalid IPv4 address: ${2}"
    75          exit 1
    76      fi
    77  }
    78  
    79  # get_cilium_node_addr sets the cilium node address in ${1} for the IPv4 address
    80  # in ${2}.
    81  function get_cilium_node_addr(){
    82      split_ipv4 ipv4_array "${2}"
    83      hexIPv4=$(printf "%02X%02X:%02X%02X" "${ipv4_array[0]}" "${ipv4_array[1]}" "${ipv4_array[2]}" "${ipv4_array[3]}")
    84      eval "${1}=${CILIUM_IPV6_NODE_CIDR}${hexIPv4}:0:0"
    85  }
    86  
    87  # write_netcfg_header creates the file in ${3} and writes the internal network
    88  # configuration for the vm IP ${1}. Sets the master's hostname with IPv6 address
    89  # in ${2}.
    90  function write_netcfg_header(){
    91      vm_ipv6="${1}"
    92      master_ipv6="${2}"
    93      filename="${3}"
    94      cat <<EOF > "${filename}"
    95  #!/usr/bin/env bash
    96  
    97  if [ -n "${K8S}" ]; then
    98      export K8S="1"
    99  fi
   100  
   101  # Use of IPv6 'documentation block' to provide example
   102  ip -6 a a ${vm_ipv6}/16 dev enp0s8
   103  
   104  echo '${master_ipv6} ${VM_BASENAME}1' >> /etc/hosts
   105  sysctl -w net.ipv6.conf.all.forwarding=1
   106  EOF
   107  }
   108  
   109  # write_master_route writes the cilium IPv4 and IPv6 routes for master in ${6}.
   110  # Uses the IPv4 suffix in ${1} for the IPv4 route and cilium IPv6 in ${2} via
   111  # ${3} for the IPv6 route. Sets the worker's hostname based on the index defined
   112  # in ${4} with the IPv6 defined in ${5}.
   113  function write_master_route(){
   114      master_ipv4_suffix="${1}"
   115      master_cilium_ipv6="${2}"
   116      master_ipv6="${3}"
   117      node_index="${4}"
   118      worker_ipv6="${5}"
   119      filename="${6}"
   120      if [ -z "${K8S}" ]; then
   121          cat <<EOF >> "${filename}"
   122  # Master route
   123  ip r a 10.${master_ipv4_suffix}.0.1/32 dev enp0s8
   124  ip r a 10.${master_ipv4_suffix}.0.0/16 via 10.${master_ipv4_suffix}.0.1
   125  EOF
   126      fi
   127  
   128      cat <<EOF >> "${filename}"
   129  echo "${worker_ipv6} ${VM_BASENAME}${node_index}" >> /etc/hosts
   130  
   131  EOF
   132  }
   133  
   134  # write_nodes_routes writes in file ${3} the routes for all nodes in the
   135  # clusters except for node with index ${1}. All routes will be based on IPv4
   136  # defined in ${2}.
   137  function write_nodes_routes(){
   138      node_index="${1}"
   139      base_ipv4_addr="${2}"
   140      filename="${3}"
   141      cat <<EOF >> "${filename}"
   142  # Node's routes
   143  EOF
   144      split_ipv4 ipv4_array "${base_ipv4_addr}"
   145      local i
   146      local index=1
   147      for i in `seq $(( ipv4_array[3] + 1 )) $(( ipv4_array[3] + NWORKERS ))`; do
   148          index=$(( index + 1 ))
   149          hexIPv4=$(printf "%02X%02X:%02X%02X" "${ipv4_array[0]}" "${ipv4_array[1]}" "${ipv4_array[2]}" "${i}")
   150          if [ "${node_index}" -eq "${index}" ]; then
   151              continue
   152          fi
   153          worker_internal_ipv6=${IPV6_INTERNAL_CIDR}$(printf "%02X" "${i}")
   154          if [ -z "${K8S}" ]; then
   155              cat <<EOF >> "${filename}"
   156  ip r a 10.${i}.0.0/16 via 10.${i}.0.1
   157  ip r a 10.${i}.0.1/32 dev enp0s8
   158  EOF
   159          fi
   160  
   161          cat <<EOF >> "${filename}"
   162  echo "${worker_internal_ipv6} ${VM_BASENAME}${index}" >> /etc/hosts
   163  EOF
   164      done
   165  
   166      cat <<EOF >> "${filename}"
   167  
   168  EOF
   169  }
   170  
   171  # write_k8s_header create the file in ${2} and writes the k8s configuration.
   172  # Sets up the k8s temporary directory inside the VM with ${1}.
   173  function write_k8s_header(){
   174      k8s_dir="${1}"
   175      filename="${2}"
   176      cat <<EOF > "${filename}"
   177  #!/usr/bin/env bash
   178  
   179  set -e
   180  
   181  # K8s installation
   182  sudo apt-get -y install curl
   183  mkdir -p "${k8s_dir}"
   184  cd "${k8s_dir}"
   185  
   186  EOF
   187  }
   188  
   189  # write_k8s_install writes the k8s installation first half in ${2} and the
   190  # second half in ${3}. Changes the k8s temporary directory inside the VM,
   191  # defined in ${1}, owner and group to vagrant.
   192  function write_k8s_install() {
   193      k8s_dir="${1}"
   194      filename="${2}"
   195      filename_2nd_half="${3}"
   196      if [[ -n "${IPV6_EXT}" ]]; then
   197          # The k8s cluster cidr will be /80
   198          # it can be any value as long it's lower than /96
   199          # k8s will assign each node a cidr for example:
   200          #   master  : FD02::0:0:0/96
   201          #   worker 1: FD02::1:0:0/96
   202          #   worker 1: FD02::2:0:0/96
   203          k8s_cluster_cidr+="FD02::/80"
   204          k8s_node_cidr_mask_size="96"
   205          k8s_service_cluster_ip_range="FD03::/112"
   206          k8s_cluster_api_server_ip="FD03::1"
   207          k8s_cluster_dns_ip="FD03::A"
   208      fi
   209      k8s_cluster_cidr=${k8s_cluster_cidr:-"10.16.0.0/12"}
   210      k8s_node_cidr_mask_size=${k8s_node_cidr_mask_size:-"16"}
   211      k8s_service_cluster_ip_range=${k8s_service_cluster_ip_range:-"172.20.0.0/24"}
   212      k8s_cluster_api_server_ip=${k8s_cluster_api_server_ip:-"172.20.0.1"}
   213      k8s_cluster_dns_ip=${k8s_cluster_dns_ip:-"172.20.0.10"}
   214  
   215      cat <<EOF >> "${filename}"
   216  # K8s
   217  k8s_path="/home/vagrant/go/src/github.com/cilium/cilium/examples/kubernetes-ingress/scripts"
   218  export IPV6_EXT="${IPV6_EXT}"
   219  export K8S_CLUSTER_CIDR="${k8s_cluster_cidr}"
   220  export K8S_NODE_CIDR_MASK_SIZE="${k8s_node_cidr_mask_size}"
   221  export K8S_SERVICE_CLUSTER_IP_RANGE="${k8s_service_cluster_ip_range}"
   222  export K8S_CLUSTER_API_SERVER_IP="${k8s_cluster_api_server_ip}"
   223  export K8S_CLUSTER_DNS_IP="${k8s_cluster_dns_ip}"
   224  export RUNTIME="${RUNTIME}"
   225  # Only do installation if RELOAD is not set
   226  if [ -z "${RELOAD}" ]; then
   227      export INSTALL="1"
   228  fi
   229  
   230  if [ -n "${VM_PROXY}" ]; then
   231      export WGET="HTTPS_PROXY=${VM_PROXY} wget"
   232  else
   233      export WGET="wget"
   234  fi
   235  export ETCD_CLEAN="${ETCD_CLEAN}"
   236  
   237  # Stop cilium before until we install kubelet. This prevents cilium from
   238  # allocating its own podCIDR without using the kubernetes allocated podCIDR.
   239  sudo service cilium stop
   240  EOF
   241      cat <<EOF >> "${filename}"
   242  if [[ "\$(hostname)" == "${VM_BASENAME}1" ]]; then
   243      echo "\$(hostname)"
   244      "\${k8s_path}/00-create-certs.sh"
   245      "\${k8s_path}/01-install-etcd.sh"
   246      "\${k8s_path}/02-install-kubernetes-master.sh"
   247  fi
   248  # All nodes are a kubernetes worker
   249  "\${k8s_path}/03-install-kubernetes-worker.sh"
   250  "\${k8s_path}/04-install-kubectl.sh"
   251  chown vagrant.vagrant -R "${k8s_dir}"
   252  
   253  EOF
   254  
   255      cat <<EOF > "${filename_2nd_half}"
   256  #!/usr/bin/env bash
   257  # K8s installation 2nd half
   258  k8s_path="/home/vagrant/go/src/github.com/cilium/cilium/examples/kubernetes-ingress/scripts"
   259  export IPV6_EXT="${IPV6_EXT}"
   260  export K8S_CLUSTER_CIDR="${k8s_cluster_cidr}"
   261  export K8S_NODE_CIDR_MASK_SIZE="${k8s_node_cidr_mask_size}"
   262  export K8S_SERVICE_CLUSTER_IP_RANGE="${k8s_service_cluster_ip_range}"
   263  export K8S_CLUSTER_API_SERVER_IP="${k8s_cluster_api_server_ip}"
   264  export K8S_CLUSTER_DNS_IP="${k8s_cluster_dns_ip}"
   265  export RUNTIME="${RUNTIME}"
   266  export K8STAG="${VM_BASENAME}"
   267  export NWORKERS="${NWORKERS}"
   268  # Only do installation if RELOAD is not set
   269  if [ -z "${RELOAD}" ]; then
   270      export INSTALL="1"
   271  fi
   272  
   273  if [ -n "${VM_PROXY}" ]; then
   274      export WGET="HTTPS_PROXY=${VM_PROXY} wget"
   275  else
   276      export WGET="wget"
   277  fi
   278  export ETCD_CLEAN="${ETCD_CLEAN}"
   279  
   280  cd "${k8s_dir}"
   281  "\${k8s_path}/05-install-cilium.sh"
   282  if [[ "\$(hostname)" == "${VM_BASENAME}1" ]]; then
   283      "\${k8s_path}/06-install-coredns.sh"
   284  else
   285      "\${k8s_path}/04-install-kubectl.sh"
   286  fi
   287  EOF
   288  }
   289  
   290  function write_cilium_cfg() {
   291      node_index="${1}"
   292      master_ipv4_suffix="${2}"
   293      ipv6_addr="${3}"
   294      filename="${4}"
   295  
   296      cilium_options=" --debug --pprof --enable-k8s-event-handover --k8s-require-ipv4-pod-cidr --auto-direct-node-routes"
   297      cilium_operator_options=" --debug"
   298  
   299      if [[ "${IPV4}" -eq "1" ]]; then
   300          if [[ -z "${K8S}" ]]; then
   301              cilium_options+=" --ipv4-range 10.${master_ipv4_suffix}.0.0/16"
   302          fi
   303      else
   304          cilium_options+=" --enable-ipv4=false"
   305      fi
   306  
   307      if [ -n "${K8S}" ]; then
   308          cilium_options+=" --k8s-kubeconfig-path /var/lib/cilium/cilium.kubeconfig"
   309          cilium_options+=" --kvstore etcd"
   310          cilium_options+=" --kvstore-opt etcd.config=/var/lib/cilium/etcd-config.yml"
   311          cilium_operator_options+=" --k8s-kubeconfig-path /var/lib/cilium/cilium.kubeconfig"
   312          cilium_operator_options+=" --kvstore etcd"
   313          cilium_operator_options+=" --kvstore-opt etcd.config=/var/lib/cilium/etcd-config.yml"
   314      else
   315          if [[ "${IPV4}" -eq "1" ]]; then
   316              cilium_options+=" --kvstore-opt consul.address=${MASTER_IPV4}:8500"
   317              cilium_operator_options+=" --kvstore-opt consul.address=${MASTER_IPV4}:8500"
   318          else
   319              cilium_options+=" --kvstore-opt consul.address=[${ipv6_addr}]:8500"
   320              cilium_operator_options+=" --kvstore-opt consul.address=[${ipv6_addr}]:8500"
   321          fi
   322          cilium_options+=" --kvstore consul"
   323          cilium_operator_options+=" --kvstore consul"
   324      fi
   325      # container runtime options
   326      case "${RUNTIME}" in
   327          "containerd" | "containerD")
   328              cilium_options+=" --container-runtime=containerd --container-runtime-endpoint=containerd=/var/run/containerd/containerd.sock"
   329              cat <<EOF >> "$filename"
   330  sed -i '4s+.*++' /lib/systemd/system/cilium.service
   331  EOF
   332              ;;
   333          "crio" | "cri-o")
   334              cilium_options+=" --container-runtime=crio --container-runtime-endpoint=crio=/var/run/crio/crio.sock"
   335              ;;
   336          *)
   337              cilium_options+=" --container-runtime=docker --container-runtime-endpoint=docker=unix:///var/run/docker.sock"
   338              ;;
   339      esac
   340  
   341  
   342      if [ "$LB" = 1 ]; then
   343          # The LB interface needs to be the "exposed" to the host
   344          # interface only for master node.
   345          # FIXME GH-1054
   346  #        if [ $((node_index)) -eq 1 ]; then
   347  #            ubuntu_1604_interface="-d enp0s9"
   348  #            ubuntu_1604_cilium_lb="--lb enp0s9"
   349  #        else
   350              ubuntu_1604_interface="-d enp0s9"
   351              ubuntu_1604_cilium_lb=""
   352  #        fi
   353      else
   354          cilium_options+=" ${TUNNEL_MODE_STRING}"
   355      fi
   356  
   357      cilium_options+=" --access-log=/var/log/cilium-access.log"
   358  
   359  cat <<EOF >> "$filename"
   360  sleep 2s
   361  echo "K8S_NODE_NAME=\$(hostname)" >> /etc/sysconfig/cilium
   362  echo 'CILIUM_OPTS="${ubuntu_1604_cilium_lb} ${ubuntu_1604_interface} ${cilium_options}"' >> /etc/sysconfig/cilium
   363  echo 'CILIUM_OPERATOR_OPTS="${cilium_operator_options}"' >> /etc/sysconfig/cilium
   364  echo 'PATH=/usr/local/sbin:/usr/local/bin:/usr/bin:/usr/sbin:/sbin:/bin' >> /etc/sysconfig/cilium
   365  chmod 644 /etc/sysconfig/cilium
   366  
   367  # Wait for the node to have a podCIDR so that cilium can use the podCIDR
   368  # allocated by k8s
   369  if [ -n "\${K8S}" ]; then
   370      for ((i = 0 ; i < 24; i++)); do
   371          if kubectl get nodes -o json | grep -i podCIDR > /dev/null 2>&1; then
   372              podCIDR=true
   373              break
   374          fi
   375          sleep 5s
   376          echo "Waiting for kubernetes node \$(hostname) to have a podCIDR"
   377      done
   378  fi
   379  
   380  systemctl daemon-reload
   381  service cilium restart
   382  /home/vagrant/go/src/github.com/cilium/cilium/test/provision/wait-cilium.sh
   383  EOF
   384  }
   385  
   386  function create_master(){
   387      split_ipv4 ipv4_array "${MASTER_IPV4}"
   388      get_cilium_node_addr master_cilium_ipv6 "${MASTER_IPV4}"
   389      output_file="${dir}/node-1.sh"
   390      write_netcfg_header "${MASTER_IPV6}" "${MASTER_IPV6}" "${output_file}"
   391  
   392      if [ -n "${NWORKERS}" ]; then
   393          write_nodes_routes 1 "${MASTER_IPV4}" "${output_file}"
   394      fi
   395  
   396      write_cilium_cfg 1 "${ipv4_array[3]}" "${master_cilium_ipv6}" "${output_file}"
   397      echo "service cilium-operator restart" >> ${output_file}
   398  }
   399  
   400  function create_workers(){
   401      split_ipv4 ipv4_array "${MASTER_IPV4}"
   402      master_prefix_ip="${ipv4_array[3]}"
   403      get_cilium_node_addr master_cilium_ipv6 "${MASTER_IPV4}"
   404      base_workers_ip=$(printf "%d.%d.%d." "${ipv4_array[0]}" "${ipv4_array[1]}" "${ipv4_array[2]}")
   405      if [ -n "${NWORKERS}" ]; then
   406          for i in `seq 2 $(( NWORKERS + 1 ))`; do
   407              output_file="${dir}/node-${i}.sh"
   408              worker_ip_suffix=$(( ipv4_array[3] + i - 1 ))
   409              worker_ipv6=${IPV6_INTERNAL_CIDR}$(printf '%02X' ${worker_ip_suffix})
   410              worker_host_ipv6=${IPV6_PUBLIC_CIDR}$(printf '%02X' ${worker_ip_suffix})
   411              ipv6_public_workers_addrs+=(${worker_host_ipv6})
   412  
   413              write_netcfg_header "${worker_ipv6}" "${MASTER_IPV6}" "${output_file}"
   414  
   415              write_master_route "${master_prefix_ip}" "${master_cilium_ipv6}" \
   416                  "${MASTER_IPV6}" "${i}" "${worker_ipv6}" "${output_file}"
   417              write_nodes_routes "${i}" ${MASTER_IPV4} "${output_file}"
   418  
   419              worker_cilium_ipv4="${base_workers_ip}${worker_ip_suffix}"
   420              get_cilium_node_addr worker_cilium_ipv6 "${worker_cilium_ipv4}"
   421              write_cilium_cfg "${i}" "${worker_ip_suffix}" "${worker_cilium_ipv6}" "${output_file}"
   422          done
   423      fi
   424  }
   425  
   426  # create_k8s_config creates k8s config
   427  function create_k8s_config(){
   428      if [ -n "${K8S}" ]; then
   429          k8s_temp_dir="/home/vagrant/k8s"
   430          output_file="${dir}/cilium-k8s-install-1st-part.sh"
   431          output_2nd_file="${dir}/cilium-k8s-install-2nd-part.sh"
   432          write_k8s_header "${k8s_temp_dir}" "${output_file}"
   433          write_k8s_install "${k8s_temp_dir}" "${output_file}" "${output_2nd_file}"
   434      fi
   435  }
   436  
   437  # set_vagrant_env sets up Vagrantfile environment variables
   438  function set_vagrant_env(){
   439      split_ipv4 ipv4_array "${MASTER_IPV4}"
   440      export 'IPV4_BASE_ADDR'="$(printf "%d.%d.%d." "${ipv4_array[0]}" "${ipv4_array[1]}" "${ipv4_array[2]}")"
   441      export 'FIRST_IP_SUFFIX'="${ipv4_array[3]}"
   442      export 'MASTER_IPV6_PUBLIC'="${IPV6_PUBLIC_CIDR}$(printf '%02X' ${ipv4_array[3]})"
   443  
   444      split_ipv4 ipv4_array_nfs "${MASTER_IPV4_NFS}"
   445      export 'IPV4_BASE_ADDR_NFS'="$(printf "%d.%d.%d." "${ipv4_array_nfs[0]}" "${ipv4_array_nfs[1]}" "${ipv4_array_nfs[2]}")"
   446      export 'FIRST_IP_SUFFIX_NFS'="${ipv4_array[3]}"
   447      if [[ -n "${NFS}" ]]; then
   448          echo "# NFS enabled. don't forget to enable this ports on your host"
   449          echo "# before starting the VMs in order to have nfs working"
   450          echo "# iptables -I INPUT -p udp -s ${IPV4_BASE_ADDR_NFS}0/24 --dport 111 -j ACCEPT"
   451          echo "# iptables -I INPUT -p udp -s ${IPV4_BASE_ADDR_NFS}0/24 --dport 2049 -j ACCEPT"
   452          echo "# iptables -I INPUT -p udp -s ${IPV4_BASE_ADDR_NFS}0/24 --dport 20048 -j ACCEPT"
   453      fi
   454  
   455      temp=$(printf " %s" "${ipv6_public_workers_addrs[@]}")
   456      export 'IPV6_PUBLIC_WORKERS_ADDRS'="${temp:1}"
   457      if [[ "${IPV4}" -ne "1" ]]; then
   458          export 'IPV6_EXT'=1
   459      fi
   460  }
   461  
   462  # vboxnet_create_new_interface creates a new host only network interface with
   463  # VBoxManage utility. Returns the created interface name in ${1}.
   464  function vboxnet_create_new_interface(){
   465      output=$(VBoxManage hostonlyif create)
   466      vboxnet_interface=$(echo "${output}" | grep -oE "'[a-zA-Z0-9]+'" | sed "s/'//g")
   467      if [ -z "${vboxnet_interface}" ]; then
   468          echo "Unable create VBox hostonly interface:"
   469          echo "${output}"
   470          return
   471      fi
   472      eval "${1}=${vboxnet_interface}"
   473  }
   474  
   475  # vboxnet_add_ipv6 adds the IPv6 in ${2} with the netmask length in ${3} in the
   476  # hostonly network interface set in ${1}.
   477  function vboxnet_add_ipv6(){
   478      vboxnetif="${1}"
   479      ipv6="${2}"
   480      ipv6_mask="${3}"
   481      VBoxManage hostonlyif ipconfig "${vboxnetif}" \
   482          --ipv6 "${ipv6}" --netmasklengthv6 "${ipv6_mask}"
   483  }
   484  
   485  # vboxnet_add_ipv4 adds the IPv4 in ${2} with the netmask in ${3} in the
   486  # hostonly network interface set in ${1}.
   487  function vboxnet_add_ipv4(){
   488      vboxnetif="${1}"
   489      ipv4="${2}"
   490      ipv4_mask="${3}"
   491      VBoxManage hostonlyif ipconfig "${vboxnetif}" \
   492          --ip "${ipv4}" --netmask "${ipv4_mask}"
   493  }
   494  
   495  # vboxnet_addr_finder checks if any vboxnet interface has the IPv6 public CIDR
   496  function vboxnet_addr_finder(){
   497      if [ -z "${IPV6_EXT}" ] && [ -z "${NFS}" ]; then
   498          return
   499      fi
   500  
   501      all_vbox_interfaces=$(VBoxManage list hostonlyifs | grep -E "^Name|IPV6Address|IPV6NetworkMaskPrefixLength" | awk -F" " '{print $2}')
   502      # all_vbox_interfaces format example:
   503      # vboxnet0
   504      # fd00:0000:0000:0000:0000:0000:0000:0001
   505      # 64
   506      # vboxnet1
   507      # fd05:0000:0000:0000:0000:0000:0000:0001
   508      # 16
   509      if [[ -n "${RELOAD}" ]]; then
   510          all_ifaces=$(echo "${all_vbox_interfaces}" | awk 'NR % 3 == 1')
   511          if [[ -n "${all_ifaces}" ]]; then
   512              while read -r iface; do
   513                  iface_addresses=$(ip addr show "$iface" | grep inet6 | sed 's/.*inet6 \([a-fA-F0-9:/]\+\).*/\1/g')
   514                  # iface_addresses format example:
   515                  # fd00::1/64
   516                  # fe80::800:27ff:fe00:2/64
   517                  if [[ -z "${iface_addresses}" ]]; then
   518                      # No inet6 addresses
   519                      continue
   520                  fi
   521                  while read -r ip; do
   522                      if [ ! -z $(echo "${ip}" | grep -i "${IPV6_PUBLIC_CIDR/::/:}") ]; then
   523                          found="1"
   524                          net_mask=$(echo "${ip}" | sed 's/.*\///')
   525                          vboxnetname="${iface}"
   526                          break
   527                      fi
   528                  done <<< "${iface_addresses}"
   529                  if [[ -n "${found}" ]]; then
   530                      break
   531                  fi
   532              done <<< "${all_ifaces}"
   533          fi
   534      fi
   535      if [[ -z "${found}" ]]; then
   536          all_ipv6=$(echo "${all_vbox_interfaces}" | awk 'NR % 3 == 2')
   537          line_ip=0
   538          if [[ -n "${all_vbox_interfaces}" ]]; then
   539              while read -r ip; do
   540                  line_ip=$(( $line_ip + 1 ))
   541                  if [ ! -z $(echo "${ip}" | grep -i "${IPV6_PUBLIC_CIDR/::/:}") ]; then
   542                      found=${line_ip}
   543                      net_mask=$(echo "${all_vbox_interfaces}" | awk "NR == 3 * ${line_ip}")
   544                      vboxnetname=$(echo "${all_vbox_interfaces}" | awk "NR == 3 * ${line_ip} - 2")
   545                      break
   546                  fi
   547              done <<< "${all_ipv6}"
   548          fi
   549      fi
   550  
   551      if [[ -z "${found}" ]]; then
   552          echo "WARN: VirtualBox interface with \"${IPV6_PUBLIC_CIDR}\" not found"
   553          if [ ${YES_TO_ALL} -eq "0" ]; then
   554              read -r -p "Create a new VBox hostonly network interface? [y/N] " response
   555          else
   556              response="Y"
   557          fi
   558          case "${response}" in
   559              [yY])
   560                  echo "Creating VBox hostonly network..."
   561              ;;
   562              *)
   563                  exit
   564              ;;
   565          esac
   566          vboxnet_create_new_interface vboxnetname
   567          if [ -z "${vboxnet_interface}" ]; then
   568              exit 1
   569          fi
   570      elif [[ "${net_mask}" -ne 64 ]]; then
   571          echo "WARN: VirtualBox interface with \"${IPV6_PUBLIC_CIDR}\" found in ${vboxnetname}"
   572          echo "but set wrong network mask (${net_mask} instead of 64)"
   573          if [ ${YES_TO_ALL} -eq "0" ]; then
   574              read -r -p "Change network mask of '${vboxnetname}' to 64? [y/N] " response
   575          else
   576              response="Y"
   577          fi
   578          case "${response}" in
   579              [yY])
   580                  echo "Changing network mask to 64..."
   581              ;;
   582              *)
   583                  exit
   584              ;;
   585          esac
   586      fi
   587      split_ipv4 ipv4_array_nfs "${MASTER_IPV4_NFS}"
   588      IPV4_BASE_ADDR_NFS="$(printf "%d.%d.%d.1" "${ipv4_array_nfs[0]}" "${ipv4_array_nfs[1]}" "${ipv4_array_nfs[2]}")"
   589      vboxnet_add_ipv6 "${vboxnetname}" "${IPV6_PUBLIC_CIDR}1" 64
   590      vboxnet_add_ipv4 "${vboxnetname}" "${IPV4_BASE_ADDR_NFS}" "255.255.255.0"
   591  }
   592  
   593  if [[ "${VAGRANT_DEFAULT_PROVIDER}" -eq "virtualbox" ]]; then
   594       vboxnet_addr_finder
   595  fi
   596  
   597  ipv6_public_workers_addrs=()
   598  
   599  split_ipv4 ipv4_array "${MASTER_IPV4}"
   600  MASTER_IPV6="${IPV6_INTERNAL_CIDR}$(printf '%02X' ${ipv4_array[3]})"
   601  
   602  set_reload_if_vm_exists
   603  
   604  create_master
   605  create_workers
   606  set_vagrant_env
   607  create_k8s_config
   608  
   609  cd "${dir}/../.."
   610  
   611  if [ -n "${RELOAD}" ]; then
   612      vagrant reload
   613  elif [ -n "${NO_PROVISION}" ]; then
   614      vagrant up --no-provision
   615  elif [ -n "${PROVISION}" ]; then
   616      vagrant provision
   617  else
   618      vagrant up
   619      if [ -n "${K8S}" ]; then
   620  		vagrant ssh k8s1 -- cat /home/vagrant/.kube/config | sed 's;server:.*:6443;server: https://k8s1:7443;g' > vagrant.kubeconfig
   621  		echo "Add '127.0.0.1 k8s1' to your /etc/hosts to use vagrant.kubeconfig file for kubectl"
   622  	fi
   623  fi
   624