k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/cluster/gce/gci/configure-helper.sh (about)

     1  #!/usr/bin/env bash
     2  
     3  # Copyright 2016 The Kubernetes Authors.
     4  #
     5  # Licensed under the Apache License, Version 2.0 (the "License");
     6  # you may not use this file except in compliance with the License.
     7  # You may obtain a copy of the License at
     8  #
     9  #     http://www.apache.org/licenses/LICENSE-2.0
    10  #
    11  # Unless required by applicable law or agreed to in writing, software
    12  # distributed under the License is distributed on an "AS IS" BASIS,
    13  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14  # See the License for the specific language governing permissions and
    15  # limitations under the License.
    16  
    17  # This script is for configuring kubernetes master and node instances. It is
    18  # uploaded in the manifests tar ball.
    19  
    20  # TODO: this script duplicates templating logic from cluster/saltbase/salt
    21  # using sed. It should use an actual template parser on the manifest
    22  # files.
    23  
    24  set -o errexit
    25  set -o nounset
    26  set -o pipefail
    27  
    28  ### Hardcoded constants
    29  METADATA_SERVER_IP="${METADATA_SERVER_IP:-169.254.169.254}"
    30  
    31  # Standard curl flags.
    32  CURL_FLAGS='--fail --silent --show-error --retry 5 --retry-delay 3 --connect-timeout 10 --retry-connrefused'
    33  
    34  function convert-manifest-params {
    35    # A helper function to convert the manifest args from a string to a list of
    36    # flag arguments.
    37    # Old format:
    38    #   command=["/bin/sh", "-c", "exec KUBE_EXEC_BINARY --param1=val1 --param2-val2"].
    39    # New format:
    40    #   command=["KUBE_EXEC_BINARY"]  # No shell dependencies.
    41    #   args=["--param1=val1", "--param2-val2"]
    42    IFS=' ' read -ra FLAGS <<< "$1"
    43    params=""
    44    for flag in "${FLAGS[@]}"; do
    45      params+="\n\"$flag\","
    46    done
    47    if [ -n "$params" ]; then
    48      echo "${params::-1}"  #  drop trailing comma
    49    fi
    50  }
    51  
    52  function append-param-if-not-present {
    53    # A helper function to add flag to an arguments string
    54    # if no such flag is present already
    55    local params="$1"
    56    local -r flag="$2"
    57    local -r value="$3"
    58    if [[ ! "${params}" =~ "--${flag}"[=\ ] ]]; then
    59      params+=" --${flag}=${value}"
    60    fi
    61    echo "${params}"
    62  }
    63  
    64  function setup-os-params {
    65    # Reset core_pattern. On GCI, the default core_pattern pipes the core dumps to
    66    # /sbin/crash_reporter which is more restrictive in saving crash dumps. So for
    67    # now, set a generic core_pattern that users can work with.
    68    echo "/core.%e.%p.%t" > /proc/sys/kernel/core_pattern
    69  }
    70  
    71  # secure_random generates a secure random string of bytes. This function accepts
    72  # a number of secure bytes desired and returns a base64 encoded string with at
    73  # least the requested entropy. Rather than directly reading from /dev/urandom,
    74  # we use uuidgen which calls getrandom(2). getrandom(2) verifies that the
    75  # entropy pool has been initialized sufficiently for the desired operation
    76  # before reading from /dev/urandom.
    77  #
    78  # ARGS:
    79  #   #1: number of secure bytes to generate. We round up to the nearest factor of 32.
    80  function secure_random {
    81    local infobytes="${1}"
    82    if ((infobytes <= 0)); then
    83      echo "Invalid argument to secure_random: infobytes='${infobytes}'" 1>&2
    84      return 1
    85    fi
    86  
    87    local out=""
    88    for (( i = 0; i < "${infobytes}"; i += 32 )); do
    89      # uuids have 122 random bits, sha256 sums have 256 bits, so concatenate
    90      # three uuids and take their sum. The sum is encoded in ASCII hex, hence the
    91      # 64 character cut.
    92      out+="$(
    93       (
    94         uuidgen --random;
    95         uuidgen --random;
    96         uuidgen --random;
    97       ) | sha256sum \
    98         | head -c 64
    99      )";
   100    done
   101    # Finally, convert the ASCII hex to base64 to increase the density.
   102    echo -n "${out}" | xxd -r -p | base64 -w 0
   103  }
   104  
   105  # Helper for configuring iptables rules for metadata server.
   106  #
   107  # $1 is the command flag (-I or -D).
   108  # $2 is the firewall action (LOG or REJECT).
   109  # $3 is the prefix for log output.
   110  # $4 is "!" to optionally invert the uid range.
   111  function gce-metadata-fw-helper {
   112    local -r command="$1"
   113    local action="$2"
   114    local -r prefix="$3"
   115    local -r invert="${4:-}"
   116  
   117    # Expand rule action to include relevant option flags.
   118    case "${action}" in
   119      LOG)
   120        action="LOG --log-prefix "${prefix}:" --log-uid --log-tcp-options --log-ip-option"
   121        ;;
   122    esac
   123  
   124    # Deliberately allow word split here
   125    # shellcheck disable=SC2086
   126    iptables -w ${command} OUTPUT -p tcp --dport 80 -d ${METADATA_SERVER_IP} -m owner ${invert:-} --uid-owner=${METADATA_SERVER_ALLOWED_UID_RANGE:-0-2999} -j ${action}
   127  }
   128  
   129  # WARNING: DO NOT USE THE FILTER TABLE! Some implementations of network policy
   130  # think they own it and will stomp all over your changes. At this time, the
   131  # mangle table is less contentious so use that if possible.
   132  function config-ip-firewall {
   133    echo "Configuring IP firewall rules"
   134  
   135    # Do not consider loopback addresses as martian source or destination while
   136    # routing. This enables the use of 127/8 for local routing purposes.
   137    sysctl -w net.ipv4.conf.all.route_localnet=1
   138  
   139    # The GCI image has host firewall which drop most inbound/forwarded packets.
   140    # We need to add rules to accept all TCP/UDP/ICMP/SCTP packets.
   141    if iptables -w -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
   142      echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
   143      iptables -w -A INPUT -w -p TCP -j ACCEPT
   144      iptables -w -A INPUT -w -p UDP -j ACCEPT
   145      iptables -w -A INPUT -w -p ICMP -j ACCEPT
   146      iptables -w -A INPUT -w -p SCTP -j ACCEPT
   147    fi
   148    if iptables -w -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
   149      echo "Add rules to accept all forwarded TCP/UDP/ICMP/SCTP packets"
   150      iptables -w -A FORWARD -w -p TCP -j ACCEPT
   151      iptables -w -A FORWARD -w -p UDP -j ACCEPT
   152      iptables -w -A FORWARD -w -p ICMP -j ACCEPT
   153      iptables -w -A FORWARD -w -p SCTP -j ACCEPT
   154    fi
   155  
   156    # Flush iptables nat table
   157    iptables -w -t nat -F || true
   158  
   159    if [[ "${NON_MASQUERADE_CIDR:-}" == "0.0.0.0/0" ]]; then
   160      echo "Add rules for ip masquerade"
   161      iptables -w -t nat -N IP-MASQ
   162      iptables -w -t nat -A POSTROUTING -m comment --comment "ip-masq: ensure nat POSTROUTING directs all non-LOCAL destination traffic to our custom IP-MASQ chain" -m addrtype ! --dst-type LOCAL -j IP-MASQ
   163      iptables -w -t nat -A IP-MASQ -d 169.254.0.0/16 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN
   164      iptables -w -t nat -A IP-MASQ -d 10.0.0.0/8 -m comment --comment "ip-masq: RFC 1918 reserved range is not subject to MASQUERADE" -j RETURN
   165      iptables -w -t nat -A IP-MASQ -d 172.16.0.0/12 -m comment --comment "ip-masq: RFC 1918 reserved range is not subject to MASQUERADE" -j RETURN
   166      iptables -w -t nat -A IP-MASQ -d 192.168.0.0/16 -m comment --comment "ip-masq: RFC 1918 reserved range is not subject to MASQUERADE" -j RETURN
   167      iptables -w -t nat -A IP-MASQ -d 240.0.0.0/4 -m comment --comment "ip-masq: RFC 5735 reserved range is not subject to MASQUERADE" -j RETURN
   168      iptables -w -t nat -A IP-MASQ -d 192.0.2.0/24 -m comment --comment "ip-masq: RFC 5737 reserved range is not subject to MASQUERADE" -j RETURN
   169      iptables -w -t nat -A IP-MASQ -d 198.51.100.0/24 -m comment --comment "ip-masq: RFC 5737 reserved range is not subject to MASQUERADE" -j RETURN
   170      iptables -w -t nat -A IP-MASQ -d 203.0.113.0/24 -m comment --comment "ip-masq: RFC 5737 reserved range is not subject to MASQUERADE" -j RETURN
   171      iptables -w -t nat -A IP-MASQ -d 100.64.0.0/10 -m comment --comment "ip-masq: RFC 6598 reserved range is not subject to MASQUERADE" -j RETURN
   172      iptables -w -t nat -A IP-MASQ -d 198.18.0.0/15 -m comment --comment "ip-masq: RFC 6815 reserved range is not subject to MASQUERADE" -j RETURN
   173      iptables -w -t nat -A IP-MASQ -d 192.0.0.0/24 -m comment --comment "ip-masq: RFC 6890 reserved range is not subject to MASQUERADE" -j RETURN
   174      iptables -w -t nat -A IP-MASQ -d 192.88.99.0/24 -m comment --comment "ip-masq: RFC 7526 reserved range is not subject to MASQUERADE" -j RETURN
   175      iptables -w -t nat -A IP-MASQ -m comment --comment "ip-masq: outbound traffic is subject to MASQUERADE (must be last in chain)" -j MASQUERADE
   176    fi
   177  
   178    # If METADATA_CONCEALMENT_NO_FIREWALL is set, don't create a firewall on this
   179    # node because we don't expect the daemonset to run on this node.
   180    if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]] && [[ ! "${METADATA_CONCEALMENT_NO_FIREWALL:-}" == "true" ]]; then
   181      echo "Add rule for metadata concealment"
   182      ip addr add dev lo 169.254.169.252/32 scope host
   183      iptables -w -t nat -I PREROUTING -p tcp ! -i eth0 -d "${METADATA_SERVER_IP}" --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 169.254.169.252:988
   184      iptables -w -t nat -I PREROUTING -p tcp ! -i eth0 -d "${METADATA_SERVER_IP}" --dport 8080 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 169.254.169.252:987
   185    fi
   186    iptables -w -t mangle -I OUTPUT -s 169.254.169.254 -j DROP
   187  
   188    # Log all metadata access not from approved processes.
   189    case "${METADATA_SERVER_FIREWALL_MODE:-off}" in
   190      log)
   191        echo "Installing metadata firewall logging rules"
   192        gce-metadata-fw-helper -I LOG "MetadataServerFirewallReject" !
   193        gce-metadata-fw-helper -I LOG "MetadataServerFirewallAccept"
   194        ;;
   195    esac
   196  }
   197  
   198  function create-dirs {
   199    echo "Creating required directories"
   200    mkdir -p /var/lib/kubelet
   201    mkdir -p /etc/kubernetes/manifests
   202    if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
   203      mkdir -p /var/lib/kube-proxy
   204    fi
   205  }
   206  
   207  # Gets the total number of $(1) and $(2) type disks specified
   208  # by the user in ${NODE_LOCAL_SSDS_EXT}
   209  function get-local-disk-num() {
   210    local interface="${1}"
   211    local format="${2}"
   212  
   213    localdisknum=0
   214    if [[ -n "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
   215      IFS=";" read -r -a ssdgroups <<< "${NODE_LOCAL_SSDS_EXT:-}"
   216      for ssdgroup in "${ssdgroups[@]}"; do
   217        IFS="," read -r -a ssdopts <<< "${ssdgroup}"
   218        local opnum="${ssdopts[0]}"
   219        local opinterface="${ssdopts[1]}"
   220        local opformat="${ssdopts[2]}"
   221  
   222        if [[ "${opformat,,}" == "${format,,}" && "${opinterface,,}" == "${interface,,}" ]]; then
   223          localdisknum=$((localdisknum+opnum))
   224        fi
   225      done
   226    fi
   227  }
   228  
   229  # Creates a symlink for a ($1) so that it may be used as block storage
   230  function safe-block-symlink(){
   231    local device="${1}"
   232    local symdir="${2}"
   233  
   234    mkdir -p "${symdir}"
   235  
   236    get-or-generate-uuid "${device}"
   237    local myuuid="${retuuid}"
   238  
   239    local sym="${symdir}/local-ssd-${myuuid}"
   240    # Do not "mkdir -p ${sym}" as that will cause unintended symlink behavior
   241    ln -s "${device}" "${sym}"
   242    echo "Created a symlink for SSD $ssd at ${sym}"
   243    chmod a+w "${sym}"
   244  }
   245  
   246  # Gets a pregenerated UUID from ${ssdmap} if it exists, otherwise generates a new
   247  # UUID and places it inside ${ssdmap}
   248  function get-or-generate-uuid(){
   249    local device="${1}"
   250  
   251    local ssdmap="/home/kubernetes/localssdmap.txt"
   252    echo "Generating or getting UUID from ${ssdmap}"
   253  
   254    if [[ ! -e "${ssdmap}" ]]; then
   255      touch "${ssdmap}"
   256      chmod +w "${ssdmap}"
   257    fi
   258  
   259    # each line of the ssdmap looks like "${device} persistent-uuid"
   260    local myuuid
   261    if grep -q "${device}" "${ssdmap}"; then
   262      #create symlink based on saved uuid
   263      myuuid=$(grep "${device}" "${ssdmap}" | cut -d ' ' -f 2)
   264    else
   265      # generate new uuid and add it to the map
   266      if ! myuuid=$(uuidgen); then
   267        echo "Failed to generate valid UUID with uuidgen" >&2
   268        exit 2
   269      fi
   270      echo "${device} ${myuuid}" >> "${ssdmap}"
   271    fi
   272  
   273    if [[ -z "${myuuid}" ]]; then
   274      echo "Failed to get a uuid for device ${device} when symlinking." >&2
   275      exit 2
   276    fi
   277  
   278    retuuid="${myuuid}"
   279  }
   280  
   281  #Formats the given device ($1) if needed and mounts it at given mount point
   282  # ($2).
   283  function safe-format-and-mount() {
   284    local device
   285    local mountpoint
   286    device="$1"
   287    mountpoint="$2"
   288  
   289    # Format only if the disk is not already formatted.
   290    if ! tune2fs -l "${device}" ; then
   291      echo "Formatting '${device}'"
   292      mkfs.ext4 -F "${device}"
   293    fi
   294  
   295    mkdir -p "${mountpoint}"
   296    echo "Mounting '${device}' at '${mountpoint}'"
   297    mount -o discard,defaults "${device}" "${mountpoint}"
   298    chmod a+w "${mountpoint}"
   299  }
   300  
   301  # Gets a devices UUID and bind mounts the device to mount location in
   302  # /mnt/disks/by-id/
   303  function unique-uuid-bind-mount(){
   304    local mountpoint
   305    local actual_device
   306    mountpoint="$1"
   307    actual_device="$2"
   308  
   309    # Trigger udev refresh so that newly formatted devices are propagated in by-uuid
   310    udevadm control --reload-rules
   311    udevadm trigger
   312    udevadm settle
   313  
   314    # find uuid for actual_device
   315    local myuuid
   316    myuuid=$(find -L /dev/disk/by-uuid -maxdepth 1 -samefile /dev/"${actual_device}" -printf '%P')
   317    # myuuid should be the uuid of the device as found in /dev/disk/by-uuid/
   318    if [[ -z "${myuuid}" ]]; then
   319      echo "Failed to get a uuid for device ${actual_device} when mounting." >&2
   320      exit 2
   321    fi
   322  
   323    # bindpoint should be the full path of the to-be-bound device
   324    local bindpoint="${UUID_MNT_PREFIX}-${interface}-fs/local-ssd-${myuuid}"
   325  
   326    safe-bind-mount "${mountpoint}" "${bindpoint}"
   327  }
   328  
   329  # Bind mounts device at mountpoint to bindpoint
   330  function safe-bind-mount(){
   331    local mountpoint="${1}"
   332    local bindpoint="${2}"
   333  
   334    # Mount device to the mountpoint
   335    mkdir -p "${bindpoint}"
   336    echo "Binding '${mountpoint}' at '${bindpoint}'"
   337    mount --bind "${mountpoint}" "${bindpoint}"
   338    chmod a+w "${bindpoint}"
   339  }
   340  
   341  
   342  # Mounts, bindmounts, or symlinks depending on the interface and format
   343  # of the incoming device
   344  function mount-ext(){
   345    local ssd="${1}"
   346    local devicenum="${2}"
   347    local interface="${3}"
   348    local format="${4}"
   349  
   350  
   351    if [[ -z "${devicenum}" ]]; then
   352      echo "Failed to get the local disk number for device ${ssd}" >&2
   353      exit 2
   354    fi
   355  
   356    # TODO: Handle partitioned disks. Right now this code just ignores partitions
   357    if [[ "${format}" == "fs" ]]; then
   358      if [[ "${interface}" == "scsi" ]]; then
   359        local actual_device
   360        actual_device=$(readlink -f "${ssd}" | cut -d '/' -f 3)
   361        # Error checking
   362        if [[ "${actual_device}" != sd* ]]; then
   363          echo "'actual_device' is not of the correct format. It must be the kernel name of the device, got ${actual_device} instead" >&2
   364          exit 1
   365        fi
   366        local mountpoint="/mnt/disks/ssd${devicenum}"
   367      else
   368        # This path is required because the existing Google images do not
   369        # expose NVMe devices in /dev/disk/by-id so we are using the /dev/nvme instead
   370        local actual_device
   371        actual_device=$(echo "${ssd}" | cut -d '/' -f 3)
   372        # Error checking
   373        if [[ "${actual_device}" != nvme* ]]; then
   374          echo "'actual_device' is not of the correct format. It must be the kernel name of the device, got ${actual_device} instead" >&2
   375          exit 1
   376        fi
   377        local mountpoint="/mnt/disks/ssd-nvme${devicenum}"
   378      fi
   379  
   380      safe-format-and-mount "${ssd}" "${mountpoint}"
   381      # We only do the bindmount if users are using the new local ssd request method
   382      # see https://github.com/kubernetes/kubernetes/pull/53466#discussion_r146431894
   383      if [[ -n "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
   384        unique-uuid-bind-mount "${mountpoint}" "${actual_device}"
   385      fi
   386    elif [[ "${format}" == "block" ]]; then
   387      local symdir="${UUID_BLOCK_PREFIX}-${interface}-block"
   388      safe-block-symlink "${ssd}" "${symdir}"
   389    else
   390      echo "Disk format must be either fs or block, got ${format}"
   391    fi
   392  }
   393  
   394  # Local ssds, if present, are mounted or symlinked to their appropriate
   395  # locations
   396  function ensure-local-ssds() {
   397    if [ "${NODE_LOCAL_SSDS_EPHEMERAL:-false}" == "true" ]; then
   398      ensure-local-ssds-ephemeral-storage
   399      return
   400    fi
   401    get-local-disk-num "scsi" "block"
   402    local scsiblocknum="${localdisknum}"
   403    local i=0
   404    for ssd in /dev/disk/by-id/google-local-ssd-*; do
   405      if [ -e "${ssd}" ]; then
   406        local devicenum
   407        devicenum=$(echo "${ssd}" | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/')
   408        if [[ "${i}" -lt "${scsiblocknum}" ]]; then
   409          mount-ext "${ssd}" "${devicenum}" "scsi" "block"
   410        else
   411          # GKE does not set NODE_LOCAL_SSDS so all non-block devices
   412          # are assumed to be filesystem devices
   413          mount-ext "${ssd}" "${devicenum}" "scsi" "fs"
   414        fi
   415        i=$((i+1))
   416      else
   417        echo "No local SCSI SSD disks found."
   418      fi
   419    done
   420  
   421    # The following mounts or symlinks NVMe devices
   422    get-local-disk-num "nvme" "block"
   423    local nvmeblocknum="${localdisknum}"
   424    get-local-disk-num "nvme" "fs"
   425    local nvmefsnum="${localdisknum}"
   426    # Check if NVMe SSD specified.
   427    if [ "${nvmeblocknum}" -eq "0" ] && [ "${nvmefsnum}" -eq "0" ]; then
   428      echo "No local NVMe SSD specified."
   429      return
   430    fi
   431    local i=0
   432    for ssd in /dev/nvme*; do
   433      if [ -e "${ssd}" ]; then
   434        # This workaround to find if the NVMe device is a disk is required because
   435        # the existing Google images does not expose NVMe devices in /dev/disk/by-id
   436        if [[ $(udevadm info --query=property --name="${ssd}" | grep DEVTYPE | sed "s/DEVTYPE=//") == "disk" ]]; then
   437          # shellcheck disable=SC2155
   438          local devicenum=$(echo "${ssd}" | sed -e 's/\/dev\/nvme0n\([0-9]*\)/\1/')
   439          if [[ "${i}" -lt "${nvmeblocknum}" ]]; then
   440            mount-ext "${ssd}" "${devicenum}" "nvme" "block"
   441          else
   442            mount-ext "${ssd}" "${devicenum}" "nvme" "fs"
   443          fi
   444          i=$((i+1))
   445        fi
   446      else
   447        echo "No local NVMe SSD disks found."
   448      fi
   449    done
   450  }
   451  
   452  # Local SSDs, if present, are used in a single RAID 0 array and directories that
   453  # back ephemeral storage are mounted on them (kubelet root, container runtime
   454  # root and pod logs).
   455  function ensure-local-ssds-ephemeral-storage() {
   456    local devices=()
   457    # Get nvme devices
   458    for ssd in /dev/nvme*n*; do
   459      if [ -e "${ssd}" ]; then
   460        # This workaround to find if the NVMe device is a local SSD is required
   461        # because the existing Google images does not them in /dev/disk/by-id
   462        if [[ "$(lsblk -o MODEL -dn "${ssd}")" == "nvme_card" ]]; then
   463          devices+=("${ssd}")
   464        fi
   465      fi
   466    done
   467    if [ "${#devices[@]}" -eq 0 ]; then
   468      echo "No local NVMe SSD disks found."
   469      return
   470    fi
   471  
   472    local device="${devices[0]}"
   473    if [ "${#devices[@]}" -ne 1 ]; then
   474      seen_arrays=(/dev/md/*)
   475      device=${seen_arrays[0]}
   476      echo "Setting RAID array with local SSDs on device ${device}"
   477      if [ ! -e "$device" ]; then
   478        device="/dev/md/0"
   479        echo "y" | mdadm --create "${device}" --level=0 --raid-devices=${#devices[@]} "${devices[@]}"
   480      fi
   481    fi
   482  
   483    local ephemeral_mountpoint="/mnt/stateful_partition/kube-ephemeral-ssd"
   484    safe-format-and-mount "${device}" "${ephemeral_mountpoint}"
   485  
   486    # mount container runtime root dir on SSD
   487    local container_runtime_name="${CONTAINER_RUNTIME_NAME:-containerd}"
   488    systemctl stop "$container_runtime_name"
   489    # Some images remount the container runtime root dir.
   490    umount "/var/lib/${container_runtime_name}" || true
   491    # Move the container runtime's directory to the new location to preserve
   492    # preloaded images.
   493    if [ ! -d "${ephemeral_mountpoint}/${container_runtime_name}" ]; then
   494      mv "/var/lib/${container_runtime_name}" "${ephemeral_mountpoint}/${container_runtime_name}"
   495    fi
   496    safe-bind-mount "${ephemeral_mountpoint}/${container_runtime_name}" "/var/lib/${container_runtime_name}"
   497    systemctl start "$container_runtime_name"
   498  
   499    # mount kubelet root dir on SSD
   500    mkdir -p "${ephemeral_mountpoint}/kubelet"
   501    safe-bind-mount "${ephemeral_mountpoint}/kubelet" "/var/lib/kubelet"
   502  
   503    # mount pod logs root dir on SSD
   504    mkdir -p "${ephemeral_mountpoint}/log_pods"
   505    safe-bind-mount "${ephemeral_mountpoint}/log_pods" "/var/log/pods"
   506  }
   507  
   508  # set journald configuration
   509  function setup-journald() {
   510    if [[ "${SET_JOURNALD_CONFIGURATION:-true}" = "true" ]]; then
   511    cat <<EOF > /etc/systemd/journald.conf
   512  [Journal]
   513  Storage=persistent
   514  SystemMaxUse=1G
   515  SystemMaxFileSize=100M
   516  RuntimeMaxUse=100M
   517  EOF
   518      systemctl restart systemd-journald.service
   519    fi
   520  }
   521  
   522  # Installs logrotate configuration files
   523  function setup-logrotate() {
   524    mkdir -p /etc/logrotate.d/
   525  
   526    if [[ "${ENABLE_LOGROTATE_FILES:-true}" = "true" ]]; then
   527      # Configure log rotation for all logs in /var/log, which is where k8s services
   528      # are configured to write their log files. Whenever logrotate is ran, this
   529      # config will:
   530      # * rotate the log file if its size is > 100Mb OR if one day has elapsed
   531      # * save rotated logs into a gzipped timestamped backup
   532      # * log file timestamp (controlled by 'dateformat') includes seconds too. This
   533      #   ensures that logrotate can generate unique logfiles during each rotation
   534      #   (otherwise it skips rotation if 'maxsize' is reached multiple times in a
   535      #   day).
   536      # * keep only 5 old (rotated) logs, and will discard older logs.
   537      cat > /etc/logrotate.d/allvarlogs <<EOF
   538  /var/log/*.log {
   539      rotate ${LOGROTATE_FILES_MAX_COUNT:-5}
   540      copytruncate
   541      missingok
   542      notifempty
   543      compress
   544      maxsize ${LOGROTATE_MAX_SIZE:-100M}
   545      daily
   546      dateext
   547      dateformat -%Y%m%d-%s
   548      create 0644 root root
   549  }
   550  EOF
   551    fi
   552  
   553    if [[ "${ENABLE_POD_LOG:-false}" = "true" ]]; then
   554      # Configure log rotation for pod logs in /var/log/pods/NAMESPACE_NAME_UID.
   555      cat > /etc/logrotate.d/allpodlogs <<EOF
   556  /var/log/pods/*/*.log {
   557      rotate ${POD_LOG_MAX_FILE:-5}
   558      copytruncate
   559      missingok
   560      notifempty
   561      compress
   562      maxsize ${POD_LOG_MAX_SIZE:-5M}
   563      daily
   564      dateext
   565      dateformat -%Y%m%d-%s
   566      create 0644 root root
   567  }
   568  EOF
   569    fi
   570  }
   571  
   572  # Finds the master PD device; returns it in MASTER_PD_DEVICE
   573  function find-master-pd {
   574    MASTER_PD_DEVICE=""
   575    if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
   576      return
   577    fi
   578    device_info=$(ls -l /dev/disk/by-id/google-master-pd)
   579    relative_path=${device_info##* }
   580    MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
   581  }
   582  
   583  # Mounts a persistent disk (formatting if needed) to store the persistent data
   584  # on the master -- etcd's data, a few settings, and security certs/keys/tokens.
   585  # safe-format-and-mount only formats an unformatted disk, and mkdir -p will
   586  # leave a directory be if it already exists.
   587  function mount-master-pd {
   588    find-master-pd
   589    if [[ -z "${MASTER_PD_DEVICE:-}" ]]; then
   590      return
   591    fi
   592  
   593    echo "Mounting master-pd"
   594    local -r pd_path="/dev/disk/by-id/google-master-pd"
   595    local -r mount_point="/mnt/disks/master-pd"
   596    # Format and mount the disk, create directories on it for all of the master's
   597    # persistent data, and link them to where they're used.
   598    mkdir -p "${mount_point}"
   599    safe-format-and-mount "${pd_path}" "${mount_point}"
   600    echo "Mounted master-pd '${pd_path}' at '${mount_point}'"
   601  
   602    # NOTE: These locations on the PD store persistent data, so to maintain
   603    # upgradeability, these locations should not change.  If they do, take care
   604    # to maintain a migration path from these locations to whatever new
   605    # locations.
   606  
   607    # Contains all the data stored in etcd.
   608    mkdir -p "${mount_point}/var/etcd"
   609    chmod 700 "${mount_point}/var/etcd"
   610    ln -s -f "${mount_point}/var/etcd" /var/etcd
   611    mkdir -p /etc/srv
   612    # Contains the dynamically generated apiserver auth certs and keys.
   613    mkdir -p "${mount_point}/srv/kubernetes"
   614    ln -s -f "${mount_point}/srv/kubernetes" /etc/srv/kubernetes
   615    # Directory for kube-apiserver to store SSH key (if necessary).
   616    mkdir -p "${mount_point}/srv/sshproxy"
   617    ln -s -f "${mount_point}/srv/sshproxy" /etc/srv/sshproxy
   618  
   619    chown -R etcd "${mount_point}/var/etcd"
   620    chgrp -R etcd "${mount_point}/var/etcd"
   621  }
   622  
   623  # append_or_replace_prefixed_line ensures:
   624  # 1. the specified file exists
   625  # 2. existing lines with the specified ${prefix} are removed
   626  # 3. a new line with the specified ${prefix}${suffix} is appended
   627  function append_or_replace_prefixed_line {
   628    local -r file="${1:-}"
   629    local -r prefix="${2:-}"
   630    local -r suffix="${3:-}"
   631    local -r dirname=$(dirname "${file}")
   632    local -r tmpfile=$(mktemp "${dirname}/filtered.XXXX")
   633  
   634    touch "${file}"
   635    awk -v pfx="${prefix}" 'substr($0,1,length(pfx)) != pfx { print }' "${file}" > "${tmpfile}"
   636    echo "${prefix}${suffix}" >> "${tmpfile}"
   637    mv "${tmpfile}" "${file}"
   638  }
   639  
   640  function write-pki-data {
   641    local data="${1}"
   642    local path="${2}"
   643    # remove the path if it exists
   644    rm -f "${path}"
   645    if [[ -n "${KUBE_PKI_READERS_GROUP:-}" ]]; then
   646      (umask 027; echo "${data}" | base64 --decode > "${path}")
   647      chgrp "${KUBE_PKI_READERS_GROUP:-}" "${path}"
   648      chmod g+r "${path}"
   649    else
   650      (umask 077; echo "${data}" | base64 --decode > "${path}")
   651    fi
   652  }
   653  
   654  function create-node-pki {
   655    echo "Creating node pki files"
   656  
   657    local -r pki_dir="/etc/srv/kubernetes/pki"
   658    mkdir -p "${pki_dir}"
   659  
   660    if [[ -z "${CA_CERT_BUNDLE:-}" ]]; then
   661      CA_CERT_BUNDLE="${CA_CERT}"
   662    fi
   663  
   664    CA_CERT_BUNDLE_PATH="${pki_dir}/ca-certificates.crt"
   665    write-pki-data "${CA_CERT_BUNDLE}" "${CA_CERT_BUNDLE_PATH}"
   666  
   667    if [[ -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
   668      KUBELET_CERT_PATH="${pki_dir}/kubelet.crt"
   669      write-pki-data "${KUBELET_CERT}" "${KUBELET_CERT_PATH}"
   670  
   671      KUBELET_KEY_PATH="${pki_dir}/kubelet.key"
   672      write-pki-data "${KUBELET_KEY}" "${KUBELET_KEY_PATH}"
   673    fi
   674  
   675    if [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'http-connect' ]]; then
   676      mkdir -p "${pki_dir}/konnectivity-agent"
   677      KONNECTIVITY_AGENT_CA_CERT_PATH="${pki_dir}/konnectivity-agent/ca.crt"
   678      KONNECTIVITY_AGENT_CLIENT_KEY_PATH="${pki_dir}/konnectivity-agent/client.key"
   679      KONNECTIVITY_AGENT_CLIENT_CERT_PATH="${pki_dir}/konnectivity-agent/client.crt"
   680      write-pki-data "${KONNECTIVITY_AGENT_CA_CERT}" "${KONNECTIVITY_AGENT_CA_CERT_PATH}"
   681      write-pki-data "${KONNECTIVITY_AGENT_CLIENT_KEY}" "${KONNECTIVITY_AGENT_CLIENT_KEY_PATH}"
   682      write-pki-data "${KONNECTIVITY_AGENT_CLIENT_CERT}" "${KONNECTIVITY_AGENT_CLIENT_CERT_PATH}"
   683    fi
   684  }
   685  
   686  function create-master-pki {
   687    echo "Creating master pki files"
   688  
   689    local -r pki_dir="/etc/srv/kubernetes/pki"
   690    mkdir -p "${pki_dir}"
   691  
   692    CA_CERT_PATH="${pki_dir}/ca.crt"
   693    write-pki-data "${CA_CERT}" "${CA_CERT_PATH}"
   694  
   695    # this is not true on GKE
   696    if [[ -n "${CA_KEY:-}" ]]; then
   697      CA_KEY_PATH="${pki_dir}/ca.key"
   698      write-pki-data "${CA_KEY}" "${CA_KEY_PATH}"
   699    fi
   700  
   701    if [[ -z "${APISERVER_SERVER_CERT:-}" || -z "${APISERVER_SERVER_KEY:-}" ]]; then
   702      APISERVER_SERVER_CERT="${MASTER_CERT}"
   703      APISERVER_SERVER_KEY="${MASTER_KEY}"
   704    fi
   705  
   706    APISERVER_SERVER_CERT_PATH="${pki_dir}/apiserver.crt"
   707    write-pki-data "${APISERVER_SERVER_CERT}" "${APISERVER_SERVER_CERT_PATH}"
   708  
   709    APISERVER_SERVER_KEY_PATH="${pki_dir}/apiserver.key"
   710    write-pki-data "${APISERVER_SERVER_KEY}" "${APISERVER_SERVER_KEY_PATH}"
   711  
   712    if [[ -z "${APISERVER_CLIENT_CERT:-}" || -z "${APISERVER_CLIENT_KEY:-}" ]]; then
   713      APISERVER_CLIENT_CERT="${KUBEAPISERVER_CERT}"
   714      APISERVER_CLIENT_KEY="${KUBEAPISERVER_KEY}"
   715    fi
   716  
   717    APISERVER_CLIENT_CERT_PATH="${pki_dir}/apiserver-client.crt"
   718    write-pki-data "${APISERVER_CLIENT_CERT}" "${APISERVER_CLIENT_CERT_PATH}"
   719  
   720    APISERVER_CLIENT_KEY_PATH="${pki_dir}/apiserver-client.key"
   721    write-pki-data "${APISERVER_CLIENT_KEY}" "${APISERVER_CLIENT_KEY_PATH}"
   722  
   723    if [[ -z "${SERVICEACCOUNT_CERT:-}" || -z "${SERVICEACCOUNT_KEY:-}" ]]; then
   724      SERVICEACCOUNT_CERT="${MASTER_CERT}"
   725      SERVICEACCOUNT_KEY="${MASTER_KEY}"
   726    fi
   727  
   728    if [[ -n "${OLD_MASTER_CERT:-}" && -n "${OLD_MASTER_KEY:-}" ]]; then
   729      OLD_MASTER_CERT_PATH="${pki_dir}/oldapiserver.crt"
   730      echo "${OLD_MASTER_CERT}" | base64 --decode > "${OLD_MASTER_CERT_PATH}"
   731      OLD_MASTER_KEY_PATH="${pki_dir}/oldapiserver.key"
   732      echo "${OLD_MASTER_KEY}" | base64 --decode > "${OLD_MASTER_KEY_PATH}"
   733    fi
   734  
   735    SERVICEACCOUNT_CERT_PATH="${pki_dir}/serviceaccount.crt"
   736    write-pki-data "${SERVICEACCOUNT_CERT}" "${SERVICEACCOUNT_CERT_PATH}"
   737  
   738    SERVICEACCOUNT_KEY_PATH="${pki_dir}/serviceaccount.key"
   739    write-pki-data "${SERVICEACCOUNT_KEY}" "${SERVICEACCOUNT_KEY_PATH}"
   740  
   741    if [[ -n "${REQUESTHEADER_CA_CERT:-}" ]]; then
   742      REQUESTHEADER_CA_CERT_PATH="${pki_dir}/aggr_ca.crt"
   743      write-pki-data "${REQUESTHEADER_CA_CERT}" "${REQUESTHEADER_CA_CERT_PATH}"
   744  
   745      PROXY_CLIENT_KEY_PATH="${pki_dir}/proxy_client.key"
   746      write-pki-data "${PROXY_CLIENT_KEY}" "${PROXY_CLIENT_KEY_PATH}"
   747  
   748      PROXY_CLIENT_CERT_PATH="${pki_dir}/proxy_client.crt"
   749      write-pki-data "${PROXY_CLIENT_CERT}" "${PROXY_CLIENT_CERT_PATH}"
   750    fi
   751  
   752    if [[ -n "${KONNECTIVITY_SERVER_CA_CERT:-}" ]]; then
   753      mkdir -p "${pki_dir}"/konnectivity-server
   754      KONNECTIVITY_SERVER_CA_CERT_PATH="${pki_dir}/konnectivity-server/ca.crt"
   755      write-pki-data "${KONNECTIVITY_SERVER_CA_CERT}" "${KONNECTIVITY_SERVER_CA_CERT_PATH}"
   756  
   757      KONNECTIVITY_SERVER_KEY_PATH="${pki_dir}/konnectivity-server/server.key"
   758      write-pki-data "${KONNECTIVITY_SERVER_KEY}" "${KONNECTIVITY_SERVER_KEY_PATH}"
   759  
   760      KONNECTIVITY_SERVER_CERT_PATH="${pki_dir}/konnectivity-server/server.crt"
   761      write-pki-data "${KONNECTIVITY_SERVER_CERT}" "${KONNECTIVITY_SERVER_CERT_PATH}"
   762  
   763      KONNECTIVITY_SERVER_CLIENT_KEY_PATH="${pki_dir}/konnectivity-server/client.key"
   764      write-pki-data "${KONNECTIVITY_SERVER_CLIENT_KEY}" "${KONNECTIVITY_SERVER_CLIENT_KEY_PATH}"
   765  
   766      KONNECTIVITY_SERVER_CLIENT_CERT_PATH="${pki_dir}/konnectivity-server/client.crt"
   767      write-pki-data "${KONNECTIVITY_SERVER_CLIENT_CERT}" "${KONNECTIVITY_SERVER_CLIENT_CERT_PATH}"
   768    fi
   769  
   770    if [[ -n "${KONNECTIVITY_AGENT_CA_CERT:-}" ]]; then
   771      mkdir -p "${pki_dir}"/konnectivity-agent
   772      KONNECTIVITY_AGENT_CA_KEY_PATH="${pki_dir}/konnectivity-agent/ca.key"
   773      write-pki-data "${KONNECTIVITY_AGENT_CA_KEY}" "${KONNECTIVITY_AGENT_CA_KEY_PATH}"
   774  
   775      KONNECTIVITY_AGENT_CA_CERT_PATH="${pki_dir}/konnectivity-agent/ca.crt"
   776      write-pki-data "${KONNECTIVITY_AGENT_CA_CERT}" "${KONNECTIVITY_AGENT_CA_CERT_PATH}"
   777  
   778      KONNECTIVITY_AGENT_KEY_PATH="${pki_dir}/konnectivity-agent/server.key"
   779      write-pki-data "${KONNECTIVITY_AGENT_KEY}" "${KONNECTIVITY_AGENT_KEY_PATH}"
   780  
   781      KONNECTIVITY_AGENT_CERT_PATH="${pki_dir}/konnectivity-agent/server.crt"
   782      write-pki-data "${KONNECTIVITY_AGENT_CERT}" "${KONNECTIVITY_AGENT_CERT_PATH}"
   783    fi
   784  
   785    if [[ -n "${CLOUD_PVL_ADMISSION_CA_CERT:-}" ]]; then
   786      mkdir -p "${pki_dir}"/cloud-pvl-admission
   787      CLOUD_PVL_ADMISSION_CA_CERT_PATH="${pki_dir}/cloud-pvl-admission/ca.crt"
   788      write-pki-data "${CLOUD_PVL_ADMISSION_CA_CERT}" "${CLOUD_PVL_ADMISSION_CA_CERT_PATH}"
   789  
   790      CLOUD_PVL_ADMISSION_KEY_PATH="${pki_dir}/cloud-pvl-admission/server.key"
   791      write-pki-data "${CLOUD_PVL_ADMISSION_KEY}" "${CLOUD_PVL_ADMISSION_KEY_PATH}"
   792  
   793      CLOUD_PVL_ADMISSION_CERT_PATH="${pki_dir}/cloud-pvl-admission/server.crt"
   794      write-pki-data "${CLOUD_PVL_ADMISSION_CERT}" "${CLOUD_PVL_ADMISSION_CERT_PATH}"
   795    fi
   796  }
   797  
   798  # After the first boot and on upgrade, these files exist on the master-pd
   799  # and should never be touched again (except perhaps an additional service
   800  # account, see NB below.) One exception is if METADATA_CLOBBERS_CONFIG is
   801  # enabled.
   802  function create-master-auth {
   803    echo "Creating master auth files"
   804    local -r auth_dir="/etc/srv/kubernetes"
   805    local -r known_tokens_csv="${auth_dir}/known_tokens.csv"
   806    if [[ -e "${known_tokens_csv}" && "${METADATA_CLOBBERS_CONFIG:-false}" == "true" ]]; then
   807      rm "${known_tokens_csv}"
   808    fi
   809    if [[ -n "${KUBE_BEARER_TOKEN:-}" ]]; then
   810      append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_BEARER_TOKEN},"             "admin,admin,system:masters"
   811    fi
   812    if [[ -n "${KUBE_BOOTSTRAP_TOKEN:-}" ]]; then
   813      append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_BOOTSTRAP_TOKEN},"          "gcp:kube-bootstrap,uid:gcp:kube-bootstrap,system:masters"
   814    fi
   815    if [[ -n "${CLOUD_CONTROLLER_MANAGER_TOKEN:-}" ]]; then
   816      append_or_replace_prefixed_line "${known_tokens_csv}" "${CLOUD_CONTROLLER_MANAGER_TOKEN}," "system:cloud-controller-manager,uid:system:cloud-controller-manager"
   817    fi
   818    if [[ -n "${KUBE_CONTROLLER_MANAGER_TOKEN:-}" ]]; then
   819      append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_CONTROLLER_MANAGER_TOKEN}," "system:kube-controller-manager,uid:system:kube-controller-manager"
   820    fi
   821    if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]]; then
   822      append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN},"          "system:kube-scheduler,uid:system:kube-scheduler"
   823    fi
   824    if [[ -n "${KUBE_CLUSTER_AUTOSCALER_TOKEN:-}" ]]; then
   825      append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_CLUSTER_AUTOSCALER_TOKEN}," "cluster-autoscaler,uid:cluster-autoscaler"
   826    fi
   827    if [[ -n "${KUBE_PROXY_TOKEN:-}" ]]; then
   828      append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_PROXY_TOKEN},"              "system:kube-proxy,uid:kube_proxy"
   829    fi
   830    if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]]; then
   831      append_or_replace_prefixed_line "${known_tokens_csv}" "${NODE_PROBLEM_DETECTOR_TOKEN},"   "system:node-problem-detector,uid:node-problem-detector"
   832    fi
   833    if [[ -n "${GCE_GLBC_TOKEN:-}" ]]; then
   834      append_or_replace_prefixed_line "${known_tokens_csv}" "${GCE_GLBC_TOKEN},"                "system:controller:glbc,uid:system:controller:glbc"
   835    fi
   836    if [[ -n "${ADDON_MANAGER_TOKEN:-}" ]]; then
   837      append_or_replace_prefixed_line "${known_tokens_csv}" "${ADDON_MANAGER_TOKEN},"           "system:addon-manager,uid:system:addon-manager,system:masters"
   838    fi
   839    if [[ -n "${KONNECTIVITY_SERVER_TOKEN:-}" ]]; then
   840      append_or_replace_prefixed_line "${known_tokens_csv}" "${KONNECTIVITY_SERVER_TOKEN},"     "system:konnectivity-server,uid:system:konnectivity-server"
   841      create-kubeconfig "konnectivity-server" "${KONNECTIVITY_SERVER_TOKEN}"
   842    fi
   843    if [[ -n "${MONITORING_TOKEN:-}" ]]; then
   844      append_or_replace_prefixed_line "${known_tokens_csv}" "${MONITORING_TOKEN},"     "system:monitoring,uid:system:monitoring,system:monitoring"
   845    fi
   846  
   847    if [[ -n "${EXTRA_STATIC_AUTH_COMPONENTS:-}" ]]; then
   848      # Create a static Bearer token and kubeconfig for extra, comma-separated components.
   849      IFS="," read -r -a extra_components <<< "${EXTRA_STATIC_AUTH_COMPONENTS:-}"
   850      for extra_component in "${extra_components[@]}"; do
   851        local token
   852        token="$(secure_random 32)"
   853        append_or_replace_prefixed_line "${known_tokens_csv}" "${token}," "system:${extra_component},uid:system:${extra_component}"
   854        create-kubeconfig "${extra_component}" "${token}"
   855      done
   856    fi
   857    local use_cloud_config="false"
   858    cat <<EOF >/etc/gce.conf
   859  [global]
   860  EOF
   861    if [[ -n "${GCE_API_ENDPOINT:-}" ]]; then
   862      cat <<EOF >>/etc/gce.conf
   863  api-endpoint = ${GCE_API_ENDPOINT}
   864  EOF
   865    fi
   866    if [[ -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" ]]; then
   867      use_cloud_config="true"
   868      cat <<EOF >>/etc/gce.conf
   869  token-url = ${TOKEN_URL}
   870  token-body = ${TOKEN_BODY}
   871  EOF
   872    fi
   873    if [[ -n "${CONTAINER_API_ENDPOINT:-}" ]]; then
   874      use_cloud_config="true"
   875      cat <<EOF >>/etc/gce.conf
   876  container-api-endpoint = ${CONTAINER_API_ENDPOINT}
   877  EOF
   878    fi
   879    if [[ -n "${PROJECT_ID:-}" ]]; then
   880      use_cloud_config="true"
   881      cat <<EOF >>/etc/gce.conf
   882  project-id = ${PROJECT_ID}
   883  EOF
   884    fi
   885    if [[ -n "${NETWORK_PROJECT_ID:-}" ]]; then
   886      use_cloud_config="true"
   887      cat <<EOF >>/etc/gce.conf
   888  network-project-id = ${NETWORK_PROJECT_ID}
   889  EOF
   890    fi
   891    if [[ -n "${STACK_TYPE:-}" ]]; then
   892      use_cloud_config="true"
   893      cat <<EOF >>/etc/gce.conf
   894  stack-type = ${STACK_TYPE}
   895  EOF
   896    fi
   897    if [[ -n "${NODE_NETWORK:-}" ]]; then
   898      use_cloud_config="true"
   899      cat <<EOF >>/etc/gce.conf
   900  network-name = ${NODE_NETWORK}
   901  EOF
   902    fi
   903    if [[ -n "${NODE_SUBNETWORK:-}" ]]; then
   904      use_cloud_config="true"
   905      cat <<EOF >>/etc/gce.conf
   906  subnetwork-name = ${NODE_SUBNETWORK}
   907  EOF
   908    fi
   909    if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then
   910      use_cloud_config="true"
   911      if [[ -n "${NODE_TAGS:-}" ]]; then
   912        # split NODE_TAGS into an array by comma.
   913        IFS=',' read -r -a node_tags <<< "${NODE_TAGS}"
   914      else
   915        local -r node_tags=("${NODE_INSTANCE_PREFIX}")
   916      fi
   917      cat <<EOF >>/etc/gce.conf
   918  node-instance-prefix = ${NODE_INSTANCE_PREFIX}
   919  EOF
   920      for tag in "${node_tags[@]}"; do
   921        cat <<EOF >>/etc/gce.conf
   922  node-tags = ${tag}
   923  EOF
   924      done
   925    fi
   926    if [[ -n "${MULTIZONE:-}" ]]; then
   927      use_cloud_config="true"
   928      cat <<EOF >>/etc/gce.conf
   929  multizone = ${MULTIZONE}
   930  EOF
   931    fi
   932  # Multimaster indicates that the cluster is HA.
   933  # Currently the only HA clusters are regional.
   934  # If we introduce zonal multimaster this will need to be revisited.
   935    if [[ -n "${MULTIMASTER:-}" ]]; then
   936      use_cloud_config="true"
   937      cat <<EOF >>/etc/gce.conf
   938  regional = ${MULTIMASTER}
   939  EOF
   940    fi
   941    if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
   942      use_cloud_config="true"
   943      # split GCE_ALPHA_FEATURES into an array by comma.
   944      IFS=',' read -r -a alpha_features <<< "${GCE_ALPHA_FEATURES}"
   945      for feature in "${alpha_features[@]}"; do
   946        cat <<EOF >>/etc/gce.conf
   947  alpha-features = ${feature}
   948  EOF
   949      done
   950    fi
   951    if [[ -n "${SECONDARY_RANGE_NAME:-}" ]]; then
   952      use_cloud_config="true"
   953      cat <<EOF >> /etc/gce.conf
   954  secondary-range-name = ${SECONDARY_RANGE_NAME}
   955  EOF
   956    fi
   957    if [[ "${use_cloud_config}" != "true" ]]; then
   958      rm -f /etc/gce.conf
   959    fi
   960  
   961    if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
   962      cat <<EOF >/etc/gcp_authn.config
   963  clusters:
   964    - name: gcp-authentication-server
   965      cluster:
   966        server: ${GCP_AUTHN_URL}
   967  users:
   968    - name: kube-apiserver
   969      user:
   970        auth-provider:
   971          name: gcp
   972  current-context: webhook
   973  contexts:
   974  - context:
   975      cluster: gcp-authentication-server
   976      user: kube-apiserver
   977    name: webhook
   978  EOF
   979    fi
   980  
   981    if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
   982      cat <<EOF >/etc/gcp_authz.config
   983  clusters:
   984    - name: gcp-authorization-server
   985      cluster:
   986        server: ${GCP_AUTHZ_URL}
   987  users:
   988    - name: kube-apiserver
   989      user:
   990        auth-provider:
   991          name: gcp
   992  current-context: webhook
   993  contexts:
   994  - context:
   995      cluster: gcp-authorization-server
   996      user: kube-apiserver
   997    name: webhook
   998  EOF
   999    fi
  1000    if [[ "${PREPARE_KONNECTIVITY_SERVICE:-false}" == "true" ]]; then
  1001      if [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'grpc' ]]; then
  1002        cat <<EOF >/etc/srv/kubernetes/egress_selector_configuration.yaml
  1003  apiVersion: apiserver.k8s.io/v1beta1
  1004  kind: EgressSelectorConfiguration
  1005  egressSelections:
  1006  - name: cluster
  1007    connection:
  1008      proxyProtocol: GRPC
  1009      transport:
  1010        uds:
  1011          udsName: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket
  1012  - name: controlplane
  1013    connection:
  1014      proxyProtocol: Direct
  1015  - name: etcd
  1016    connection:
  1017      proxyProtocol: Direct
  1018  EOF
  1019      elif [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'http-connect' ]]; then
  1020        cat <<EOF >/etc/srv/kubernetes/egress_selector_configuration.yaml
  1021  apiVersion: apiserver.k8s.io/v1beta1
  1022  kind: EgressSelectorConfiguration
  1023  egressSelections:
  1024  - name: cluster
  1025    connection:
  1026      proxyProtocol: HTTPConnect
  1027      transport:
  1028        tcp:
  1029          url: https://127.0.0.1:8131
  1030          tlsConfig:
  1031            caBundle: /etc/srv/kubernetes/pki/konnectivity-server/ca.crt
  1032            clientKey: /etc/srv/kubernetes/pki/konnectivity-server/client.key
  1033            clientCert: /etc/srv/kubernetes/pki/konnectivity-server/client.crt
  1034  - name: controlplane
  1035    connection:
  1036      proxyProtocol: Direct
  1037  - name: etcd
  1038    connection:
  1039      proxyProtocol: Direct
  1040  EOF
  1041      else
  1042        echo "KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE must be set to either grpc or http-connect"
  1043        exit 1
  1044      fi
  1045    fi
  1046  
  1047    if [[ -n "${ADMISSION_CONTROL:-}" ]]; then
  1048      # Emit a basic admission control configuration file, with no plugins specified.
  1049      cat <<EOF >/etc/srv/kubernetes/admission_controller_config.yaml
  1050  apiVersion: apiserver.k8s.io/v1alpha1
  1051  kind: AdmissionConfiguration
  1052  plugins:
  1053  EOF
  1054  
  1055      # Add resourcequota config to limit critical pods to kube-system by default
  1056      cat <<EOF >>/etc/srv/kubernetes/admission_controller_config.yaml
  1057  - name: "ResourceQuota"
  1058    configuration:
  1059      apiVersion: apiserver.config.k8s.io/v1
  1060      kind: ResourceQuotaConfiguration
  1061      limitedResources:
  1062      - resource: pods
  1063        matchScopes:
  1064        - scopeName: PriorityClass
  1065          operator: In
  1066          values: ["system-node-critical", "system-cluster-critical"]
  1067  EOF
  1068  
  1069      if [[ "${ADMISSION_CONTROL:-}" == *"ImagePolicyWebhook"* ]]; then
  1070        if [[ -z "${GCP_IMAGE_VERIFICATION_URL:-}" ]]; then
  1071          1>&2 echo "The ImagePolicyWebhook admission control plugin was requested, but GCP_IMAGE_VERIFICATION_URL was not provided."
  1072          exit 1
  1073        fi
  1074  
  1075        1>&2 echo "ImagePolicyWebhook admission control plugin requested.  Configuring it to point at ${GCP_IMAGE_VERIFICATION_URL}"
  1076  
  1077        # ImagePolicyWebhook needs special kubeconfig for authenticating to the webhook endpoint.
  1078        cat <<EOF >/etc/srv/kubernetes/gcp_image_review.kubeconfig
  1079  clusters:
  1080    - name: gcp-image-review-server
  1081      cluster:
  1082        server: ${GCP_IMAGE_VERIFICATION_URL}
  1083  users:
  1084    - name: kube-apiserver
  1085      user:
  1086        auth-provider:
  1087          name: gcp
  1088  current-context: webhook
  1089  contexts:
  1090  - context:
  1091      cluster: gcp-image-review-server
  1092      user: kube-apiserver
  1093    name: webhook
  1094  EOF
  1095  
  1096        # Append config for ImagePolicyWebhook to the shared admission controller
  1097        # configuration file.
  1098        cat <<EOF >>/etc/srv/kubernetes/admission_controller_config.yaml
  1099  - name: ImagePolicyWebhook
  1100    configuration:
  1101      imagePolicy:
  1102        kubeConfigFile: /etc/srv/kubernetes/gcp_image_review.kubeconfig
  1103        allowTTL: 30
  1104        denyTTL: 30
  1105        retryBackoff: 500
  1106        defaultAllow: true
  1107  EOF
  1108      fi
  1109    fi
  1110  }
  1111  
  1112  # Write the config for the audit policy.
  1113  function create-master-audit-policy {
  1114    local -r path="${1}"
  1115    local -r policy="${2:-}"
  1116  
  1117    if [[ -n "${policy}" ]]; then
  1118      echo "${policy}" > "${path}"
  1119      return
  1120    fi
  1121  
  1122    # Known api groups
  1123    local -r known_apis='
  1124        - group: "" # core
  1125        - group: "admissionregistration.k8s.io"
  1126        - group: "apiextensions.k8s.io"
  1127        - group: "apiregistration.k8s.io"
  1128        - group: "apps"
  1129        - group: "authentication.k8s.io"
  1130        - group: "authorization.k8s.io"
  1131        - group: "autoscaling"
  1132        - group: "batch"
  1133        - group: "certificates.k8s.io"
  1134        - group: "extensions"
  1135        - group: "metrics.k8s.io"
  1136        - group: "networking.k8s.io"
  1137        - group: "node.k8s.io"
  1138        - group: "policy"
  1139        - group: "rbac.authorization.k8s.io"
  1140        - group: "scheduling.k8s.io"
  1141        - group: "storage.k8s.io"'
  1142  
  1143    cat <<EOF >"${path}"
  1144  apiVersion: audit.k8s.io/v1
  1145  kind: Policy
  1146  rules:
  1147    # The following requests were manually identified as high-volume and low-risk,
  1148    # so drop them.
  1149    - level: None
  1150      users: ["system:kube-proxy"]
  1151      verbs: ["watch"]
  1152      resources:
  1153        - group: "" # core
  1154          resources: ["endpoints", "services", "services/status"]
  1155    - level: None
  1156      # Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
  1157      # TODO(#46983): Change this to the ingress controller service account.
  1158      users: ["system:unsecured"]
  1159      namespaces: ["kube-system"]
  1160      verbs: ["get"]
  1161      resources:
  1162        - group: "" # core
  1163          resources: ["configmaps"]
  1164    - level: None
  1165      users: ["kubelet"] # legacy kubelet identity
  1166      verbs: ["get"]
  1167      resources:
  1168        - group: "" # core
  1169          resources: ["nodes", "nodes/status"]
  1170    - level: None
  1171      userGroups: ["system:nodes"]
  1172      verbs: ["get"]
  1173      resources:
  1174        - group: "" # core
  1175          resources: ["nodes", "nodes/status"]
  1176    - level: None
  1177      users:
  1178        - system:kube-controller-manager
  1179        - system:cloud-controller-manager
  1180        - system:kube-scheduler
  1181        - system:serviceaccount:kube-system:endpoint-controller
  1182      verbs: ["get", "update"]
  1183      namespaces: ["kube-system"]
  1184      resources:
  1185        - group: "" # core
  1186          resources: ["endpoints"]
  1187    - level: None
  1188      users: ["system:apiserver"]
  1189      verbs: ["get"]
  1190      resources:
  1191        - group: "" # core
  1192          resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
  1193    - level: None
  1194      users: ["cluster-autoscaler"]
  1195      verbs: ["get", "update"]
  1196      namespaces: ["kube-system"]
  1197      resources:
  1198        - group: "" # core
  1199          resources: ["configmaps", "endpoints"]
  1200    # Don't log HPA fetching metrics.
  1201    - level: None
  1202      users:
  1203        - system:kube-controller-manager
  1204        - system:cloud-controller-manager
  1205      verbs: ["get", "list"]
  1206      resources:
  1207        - group: "metrics.k8s.io"
  1208  
  1209    # Don't log these read-only URLs.
  1210    - level: None
  1211      nonResourceURLs:
  1212        - /healthz*
  1213        - /version
  1214        - /swagger*
  1215  
  1216    # Don't log events requests because of performance impact.
  1217    - level: None
  1218      resources:
  1219        - group: "" # core
  1220          resources: ["events"]
  1221  
  1222    # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
  1223    - level: Request
  1224      users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
  1225      verbs: ["update","patch"]
  1226      resources:
  1227        - group: "" # core
  1228          resources: ["nodes/status", "pods/status"]
  1229      omitStages:
  1230        - "RequestReceived"
  1231    - level: Request
  1232      userGroups: ["system:nodes"]
  1233      verbs: ["update","patch"]
  1234      resources:
  1235        - group: "" # core
  1236          resources: ["nodes/status", "pods/status"]
  1237      omitStages:
  1238        - "RequestReceived"
  1239  
  1240    # deletecollection calls can be large, don't log responses for expected namespace deletions
  1241    - level: Request
  1242      users: ["system:serviceaccount:kube-system:namespace-controller"]
  1243      verbs: ["deletecollection"]
  1244      omitStages:
  1245        - "RequestReceived"
  1246  
  1247    # Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
  1248    # so only log at the Metadata level.
  1249    - level: Metadata
  1250      resources:
  1251        - group: "" # core
  1252          resources: ["secrets", "configmaps", "serviceaccounts/token"]
  1253        - group: authentication.k8s.io
  1254          resources: ["tokenreviews"]
  1255      omitStages:
  1256        - "RequestReceived"
  1257    # Get responses can be large; skip them.
  1258    - level: Request
  1259      verbs: ["get", "list", "watch"]
  1260      resources: ${known_apis}
  1261      omitStages:
  1262        - "RequestReceived"
  1263    # Default level for known APIs
  1264    - level: RequestResponse
  1265      resources: ${known_apis}
  1266      omitStages:
  1267        - "RequestReceived"
  1268    # Default level for all other requests.
  1269    - level: Metadata
  1270      omitStages:
  1271        - "RequestReceived"
  1272  EOF
  1273  }
  1274  
  1275  # Writes the configuration file used by the webhook advanced auditing backend.
  1276  function create-master-audit-webhook-config {
  1277    local -r path="${1}"
  1278  
  1279    if [[ -n "${GCP_AUDIT_URL:-}" ]]; then
  1280      # The webhook config file is a kubeconfig file describing the webhook endpoint.
  1281      cat <<EOF >"${path}"
  1282  clusters:
  1283    - name: gcp-audit-server
  1284      cluster:
  1285        server: ${GCP_AUDIT_URL}
  1286  users:
  1287    - name: kube-apiserver
  1288      user:
  1289        auth-provider:
  1290          name: gcp
  1291  current-context: webhook
  1292  contexts:
  1293  - context:
  1294      cluster: gcp-audit-server
  1295      user: kube-apiserver
  1296    name: webhook
  1297  EOF
  1298    fi
  1299  }
  1300  
  1301  function create-kubeconfig {
  1302    local component=$1
  1303    local token=$2
  1304    local path="/etc/srv/kubernetes/${component}/kubeconfig"
  1305    mkdir -p "/etc/srv/kubernetes/${component}"
  1306  
  1307    if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
  1308      gke-internal-create-kubeconfig "${component}" "${token}" "${path}"
  1309    else
  1310      echo "Creating kubeconfig file for component ${component}"
  1311      cat <<EOF >"${path}"
  1312  apiVersion: v1
  1313  kind: Config
  1314  users:
  1315  - name: ${component}
  1316    user:
  1317      token: ${token}
  1318  clusters:
  1319  - name: local
  1320    cluster:
  1321      insecure-skip-tls-verify: true
  1322      server: https://localhost:443
  1323  contexts:
  1324  - context:
  1325      cluster: local
  1326      user: ${component}
  1327    name: ${component}
  1328  current-context: ${component}
  1329  EOF
  1330    fi
  1331  }
  1332  
  1333  # Arg 1: the IP address of the API server
  1334  function create-kubelet-kubeconfig() {
  1335    local apiserver_address="${1}"
  1336    if [[ -z "${apiserver_address}" ]]; then
  1337      echo "Must provide API server address to create Kubelet kubeconfig file!"
  1338      exit 1
  1339    fi
  1340    if [[ "${CREATE_BOOTSTRAP_KUBECONFIG:-true}" == "true" ]]; then
  1341      echo "Creating kubelet bootstrap-kubeconfig file"
  1342      cat <<EOF >/var/lib/kubelet/bootstrap-kubeconfig
  1343  apiVersion: v1
  1344  kind: Config
  1345  users:
  1346  - name: kubelet
  1347    user:
  1348      client-certificate: ${KUBELET_CERT_PATH}
  1349      client-key: ${KUBELET_KEY_PATH}
  1350  clusters:
  1351  - name: local
  1352    cluster:
  1353      server: https://${apiserver_address}
  1354      certificate-authority: ${CA_CERT_BUNDLE_PATH}
  1355  contexts:
  1356  - context:
  1357      cluster: local
  1358      user: kubelet
  1359    name: service-account-context
  1360  current-context: service-account-context
  1361  EOF
  1362    elif [[ "${FETCH_BOOTSTRAP_KUBECONFIG:-false}" == "true" ]]; then
  1363      echo "Fetching kubelet bootstrap-kubeconfig file from metadata"
  1364      get-metadata-value "instance/attributes/bootstrap-kubeconfig" >/var/lib/kubelet/bootstrap-kubeconfig
  1365    else
  1366      echo "Fetching kubelet kubeconfig file from metadata"
  1367      get-metadata-value "instance/attributes/kubeconfig" >/var/lib/kubelet/kubeconfig
  1368    fi
  1369  }
  1370  
  1371  # Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and KUBELET_KEY
  1372  # to generate a kubeconfig file for the kubelet to securely connect to the apiserver.
  1373  # Set REGISTER_MASTER_KUBELET to true if kubelet on the master node
  1374  # should register to the apiserver.
  1375  function create-master-kubelet-auth {
  1376    # Only configure the kubelet on the master if the required variables are
  1377    # set in the environment.
  1378    if [[ -n "${KUBELET_APISERVER:-}" && -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
  1379      REGISTER_MASTER_KUBELET="true"
  1380      create-kubelet-kubeconfig "${KUBELET_APISERVER}"
  1381    fi
  1382  }
  1383  
  1384  function create-kubeproxy-user-kubeconfig {
  1385    echo "Creating kube-proxy user kubeconfig file"
  1386    cat <<EOF >/var/lib/kube-proxy/kubeconfig
  1387  apiVersion: v1
  1388  kind: Config
  1389  users:
  1390  - name: kube-proxy
  1391    user:
  1392      token: ${KUBE_PROXY_TOKEN}
  1393  clusters:
  1394  - name: local
  1395    cluster:
  1396      certificate-authority-data: ${CA_CERT_BUNDLE}
  1397  contexts:
  1398  - context:
  1399      cluster: local
  1400      user: kube-proxy
  1401    name: service-account-context
  1402  current-context: service-account-context
  1403  EOF
  1404  }
  1405  
  1406  function create-kube-scheduler-config {
  1407    echo "Creating kube-scheduler config file"
  1408    mkdir -p /etc/srv/kubernetes/kube-scheduler
  1409    cat <<EOF >/etc/srv/kubernetes/kube-scheduler/config
  1410  ${KUBE_SCHEDULER_CONFIG}
  1411  EOF
  1412  }
  1413  
  1414  # TODO(#92143): Remove legacy policy config creation once kube-scheduler config is GA.
  1415  function create-kubescheduler-policy-config {
  1416    echo "Creating kube-scheduler policy config file"
  1417    mkdir -p /etc/srv/kubernetes/kube-scheduler
  1418    cat <<EOF >/etc/srv/kubernetes/kube-scheduler/policy-config
  1419  ${SCHEDULER_POLICY_CONFIG}
  1420  EOF
  1421  }
  1422  
  1423  function create-node-problem-detector-kubeconfig {
  1424    local apiserver_address="${1}"
  1425    if [[ -z "${apiserver_address}" ]]; then
  1426      echo "Must provide API server address to create node-problem-detector kubeconfig file!"
  1427      exit 1
  1428    fi
  1429    echo "Creating node-problem-detector kubeconfig file"
  1430    mkdir -p /var/lib/node-problem-detector
  1431    cat <<EOF >/var/lib/node-problem-detector/kubeconfig
  1432  apiVersion: v1
  1433  kind: Config
  1434  users:
  1435  - name: node-problem-detector
  1436    user:
  1437      token: ${NODE_PROBLEM_DETECTOR_TOKEN}
  1438  clusters:
  1439  - name: local
  1440    cluster:
  1441      server: https://${apiserver_address}
  1442      certificate-authority-data: ${CA_CERT}
  1443  contexts:
  1444  - context:
  1445      cluster: local
  1446      user: node-problem-detector
  1447    name: service-account-context
  1448  current-context: service-account-context
  1449  EOF
  1450  }
  1451  
  1452  function create-node-problem-detector-kubeconfig-from-kubelet {
  1453    echo "Creating node-problem-detector kubeconfig from /var/lib/kubelet/kubeconfig"
  1454    mkdir -p /var/lib/node-problem-detector
  1455    cp /var/lib/kubelet/kubeconfig /var/lib/node-problem-detector/kubeconfig
  1456  }
  1457  
  1458  function create-master-etcd-auth {
  1459    if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
  1460      local -r auth_dir="/etc/srv/kubernetes"
  1461      echo "${ETCD_CA_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-ca.crt"
  1462      echo "${ETCD_PEER_KEY}" | base64 --decode > "${auth_dir}/etcd-peer.key"
  1463      echo "${ETCD_PEER_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-peer.crt"
  1464    fi
  1465  }
  1466  
  1467  function create-master-etcd-apiserver-auth {
  1468     if [[ -n "${ETCD_APISERVER_CA_CERT:-}" && -n "${ETCD_APISERVER_SERVER_KEY:-}" && -n "${ETCD_APISERVER_SERVER_CERT:-}" && -n "${ETCD_APISERVER_CLIENT_KEY:-}" && -n "${ETCD_APISERVER_CLIENT_CERT:-}" ]]; then
  1469       local -r auth_dir="/etc/srv/kubernetes/pki"
  1470  
  1471       ETCD_APISERVER_CA_KEY_PATH="${auth_dir}/etcd-apiserver-ca.key"
  1472       echo "${ETCD_APISERVER_CA_KEY}" | base64 --decode > "${ETCD_APISERVER_CA_KEY_PATH}"
  1473  
  1474       # Keep in sync with add-replica-to-etcd/remove-replica-from-etcd in util.sh.
  1475       ETCD_APISERVER_CA_CERT_PATH="${auth_dir}/etcd-apiserver-ca.crt"
  1476       echo "${ETCD_APISERVER_CA_CERT}" | base64 --decode | gunzip > "${ETCD_APISERVER_CA_CERT_PATH}"
  1477  
  1478       ETCD_APISERVER_SERVER_KEY_PATH="${auth_dir}/etcd-apiserver-server.key"
  1479       echo "${ETCD_APISERVER_SERVER_KEY}" | base64 --decode > "${ETCD_APISERVER_SERVER_KEY_PATH}"
  1480  
  1481       ETCD_APISERVER_SERVER_CERT_PATH="${auth_dir}/etcd-apiserver-server.crt"
  1482       echo "${ETCD_APISERVER_SERVER_CERT}" | base64 --decode | gunzip > "${ETCD_APISERVER_SERVER_CERT_PATH}"
  1483  
  1484       # Keep in sync with add-replica-to-etcd/remove-replica-from-etcd in util.sh.
  1485       ETCD_APISERVER_CLIENT_KEY_PATH="${auth_dir}/etcd-apiserver-client.key"
  1486       echo "${ETCD_APISERVER_CLIENT_KEY}" | base64 --decode > "${ETCD_APISERVER_CLIENT_KEY_PATH}"
  1487  
  1488       # Keep in sync with add-replica-to-etcd/remove-replica-from-etcd in util.sh.
  1489       ETCD_APISERVER_CLIENT_CERT_PATH="${auth_dir}/etcd-apiserver-client.crt"
  1490       echo "${ETCD_APISERVER_CLIENT_CERT}" | base64 --decode | gunzip > "${ETCD_APISERVER_CLIENT_CERT_PATH}"
  1491     fi
  1492  }
  1493  
  1494  function docker-installed {
  1495      if systemctl cat docker.service &> /dev/null ; then
  1496          return 0
  1497      else
  1498          return 1
  1499      fi
  1500  }
  1501  
  1502  # util function to add a docker option to daemon.json file only if the daemon.json file is present.
  1503  # accepts only one argument (docker options)
  1504  function addockeropt {
  1505  	DOCKER_OPTS_FILE=/etc/docker/daemon.json
  1506  	if [ "$#" -lt 1 ]; then
  1507  	echo "No arguments are passed while adding docker options. Expect one argument"
  1508  	exit 1
  1509  	elif [ "$#" -gt 1 ]; then
  1510  	echo "Only one argument is accepted"
  1511  	exit 1
  1512  	fi
  1513  	# appends the given input to the docker opts file i.e. /etc/docker/daemon.json file
  1514  	if [ -f "$DOCKER_OPTS_FILE" ]; then
  1515  	cat >> "${DOCKER_OPTS_FILE}" <<EOF
  1516    $1
  1517  EOF
  1518  	fi
  1519  }
  1520  
  1521  function disable_aufs() {
  1522    # disable aufs module if aufs is loaded
  1523    if lsmod | grep "aufs" &> /dev/null ; then
  1524      sudo modprobe -r aufs
  1525    fi
  1526  }
  1527  
  1528  function set_docker_options_non_ubuntu() {
  1529    # set docker options mtu and storage driver for non-ubuntu
  1530    # as it is default for ubuntu
  1531     if [[ -n "$(command -v lsb_release)" && $(lsb_release -si) == "Ubuntu" ]]; then
  1532        echo "Not adding docker options on ubuntu, as these are default on ubuntu. Bailing out..."
  1533        return
  1534     fi
  1535  
  1536     addockeropt "\"mtu\": 1460,"
  1537     addockeropt "\"storage-driver\": \"overlay2\","
  1538     echo "setting live restore"
  1539     # Disable live-restore if the environment variable is set.
  1540     if [[ "${DISABLE_DOCKER_LIVE_RESTORE:-false}" == "true" ]]; then
  1541        addockeropt "\"live-restore\": false,"
  1542     else
  1543        addockeropt "\"live-restore\": true,"
  1544     fi
  1545  }
  1546  
  1547  function assemble-docker-flags {
  1548    echo "Assemble docker options"
  1549  
  1550      # log the contents of the /etc/docker/daemon.json if already exists
  1551    if [ -f /etc/docker/daemon.json ]; then
  1552      echo "Contents of the old docker config"
  1553      cat /etc/docker/daemon.json
  1554    fi
  1555  
  1556    cat <<EOF >/etc/docker/daemon.json
  1557  {
  1558  EOF
  1559  
  1560  addockeropt "\"pidfile\": \"/var/run/docker.pid\",
  1561    \"iptables\": false,
  1562    \"ip-masq\": false,"
  1563  
  1564    echo "setting log-level"
  1565    if [[ "${TEST_CLUSTER:-}" == "true" ]]; then
  1566      addockeropt "\"log-level\": \"debug\","
  1567    else
  1568      addockeropt "\"log-level\": \"warn\","
  1569    fi
  1570  
  1571    echo "setting network bridge"
  1572    if [[ "${NETWORK_PROVIDER:-}" == "kubenet" || "${NETWORK_PROVIDER:-}" == "cni" ]]; then
  1573      # set docker0 cidr to private ip address range to avoid conflict with cbr0 cidr range
  1574      addockeropt "\"bip\": \"169.254.123.1/24\","
  1575    else
  1576      addockeropt "\"bridge\": \"cbr0\","
  1577    fi
  1578  
  1579    echo "setting registry mirror"
  1580    # TODO (vteratipally)  move the registry-mirror completely to /etc/docker/daemon.json
  1581    local docker_opts=""
  1582    # Decide whether to enable a docker registry mirror. This is taken from
  1583    # the "kube-env" metadata value.
  1584    if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then
  1585        docker_opts+="--registry-mirror=${DOCKER_REGISTRY_MIRROR_URL} "
  1586    fi
  1587  
  1588    disable_aufs
  1589    set_docker_options_non_ubuntu
  1590  
  1591  
  1592    echo "setting docker logging options"
  1593    # Configure docker logging
  1594    addockeropt "\"log-driver\": \"${DOCKER_LOG_DRIVER:-json-file}\","
  1595    addockeropt "\"log-opts\": {
  1596        \"max-size\": \"${DOCKER_LOG_MAX_SIZE:-10m}\",
  1597        \"max-file\": \"${DOCKER_LOG_MAX_FILE:-5}\"
  1598      }"
  1599    cat <<EOF >>/etc/docker/daemon.json
  1600  }
  1601  EOF
  1602    echo "DOCKER_OPTS=\"${docker_opts}${EXTRA_DOCKER_OPTS:-}\"" > /etc/default/docker
  1603  
  1604    # Ensure TasksMax is sufficient for docker.
  1605    # (https://github.com/kubernetes/kubernetes/issues/51977)
  1606    echo "Extend the docker.service configuration to set a higher pids limit"
  1607    mkdir -p /etc/systemd/system/docker.service.d
  1608    cat <<EOF >/etc/systemd/system/docker.service.d/01tasksmax.conf
  1609  [Service]
  1610  TasksMax=infinity
  1611  EOF
  1612  
  1613      systemctl daemon-reload
  1614      echo "Docker command line is updated. Restart docker to pick it up"
  1615      systemctl restart docker
  1616  }
  1617  
  1618  # This function assembles the kubelet systemd service file and starts it
  1619  # using systemctl.
  1620  function start-kubelet {
  1621    echo "Start kubelet"
  1622  
  1623    local kubelet_bin="${KUBE_HOME}/bin/kubelet"
  1624    local -r version="$("${kubelet_bin}" --version=true | cut -f2 -d " ")"
  1625    local -r builtin_kubelet="/usr/bin/kubelet"
  1626    if [[ "${TEST_CLUSTER:-}" == "true" ]]; then
  1627      # Determine which binary to use on test clusters. We use the built-in
  1628      # version only if the downloaded version is the same as the built-in
  1629      # version. This allows GCI to run some of the e2e tests to qualify the
  1630      # built-in kubelet.
  1631      if [[ -x "${builtin_kubelet}" ]]; then
  1632        local -r builtin_version="$("${builtin_kubelet}"  --version=true | cut -f2 -d " ")"
  1633        if [[ "${builtin_version}" == "${version}" ]]; then
  1634          kubelet_bin="${builtin_kubelet}"
  1635        fi
  1636      fi
  1637    fi
  1638    echo "Using kubelet binary at ${kubelet_bin}"
  1639  
  1640    local -r kubelet_env_file="/etc/default/kubelet"
  1641  
  1642    local kubelet_cgroup_driver=""
  1643    if [[ "${CGROUP_CONFIG-}" == "cgroup2fs" ]]; then
  1644      kubelet_cgroup_driver="--cgroup-driver=systemd"
  1645    fi
  1646  
  1647    local kubelet_opts="${KUBELET_ARGS} ${KUBELET_CONFIG_FILE_ARG:-} ${kubelet_cgroup_driver:-}"
  1648    echo "KUBELET_OPTS=\"${kubelet_opts}\"" > "${kubelet_env_file}"
  1649    echo "KUBE_COVERAGE_FILE=\"/var/log/kubelet.cov\"" >> "${kubelet_env_file}"
  1650  
  1651    # Write the systemd service file for kubelet.
  1652    cat <<EOF >/etc/systemd/system/kubelet.service
  1653  [Unit]
  1654  Description=Kubernetes kubelet
  1655  Requires=network-online.target
  1656  After=network-online.target
  1657  
  1658  [Service]
  1659  Restart=always
  1660  RestartSec=10
  1661  EnvironmentFile=${kubelet_env_file}
  1662  ExecStart=${kubelet_bin} \$KUBELET_OPTS
  1663  
  1664  [Install]
  1665  WantedBy=multi-user.target
  1666  EOF
  1667  
  1668    systemctl daemon-reload
  1669    systemctl start kubelet.service
  1670  }
  1671  
  1672  # This function assembles the node problem detector systemd service file and
  1673  # starts it using systemctl.
  1674  function start-node-problem-detector {
  1675    echo "Start node problem detector"
  1676    local -r npd_bin="${KUBE_HOME}/bin/node-problem-detector"
  1677    echo "Using node problem detector binary at ${npd_bin}"
  1678  
  1679    local flags="${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}"
  1680    if [[ -z "${flags}" ]]; then
  1681      local -r km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor.json"
  1682      # TODO(random-liu): Handle this for alternative container runtime.
  1683      local -r dm_config="${KUBE_HOME}/node-problem-detector/config/docker-monitor.json"
  1684      local -r sm_config="${KUBE_HOME}/node-problem-detector/config/systemd-monitor.json"
  1685      local -r ssm_config="${KUBE_HOME}/node-problem-detector/config/system-stats-monitor.json"
  1686  
  1687      local -r custom_km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor-counter.json"
  1688      local -r custom_sm_config="${KUBE_HOME}/node-problem-detector/config/systemd-monitor-counter.json"
  1689  
  1690      flags="${NPD_TEST_LOG_LEVEL:-"--v=2"} ${NPD_TEST_ARGS:-}"
  1691      flags+=" --logtostderr"
  1692      flags+=" --config.system-log-monitor=${km_config},${dm_config},${sm_config}"
  1693      flags+=" --config.system-stats-monitor=${ssm_config}"
  1694      flags+=" --config.custom-plugin-monitor=${custom_km_config},${custom_sm_config}"
  1695      local -r npd_port=${NODE_PROBLEM_DETECTOR_PORT:-20256}
  1696      flags+=" --port=${npd_port}"
  1697      if [[ -n "${EXTRA_NPD_ARGS:-}" ]]; then
  1698        flags+=" ${EXTRA_NPD_ARGS}"
  1699      fi
  1700    fi
  1701    flags+=" --apiserver-override=https://${KUBERNETES_MASTER_NAME}?inClusterConfig=false&auth=/var/lib/node-problem-detector/kubeconfig"
  1702  
  1703    # Write the systemd service file for node problem detector.
  1704    cat <<EOF >/etc/systemd/system/node-problem-detector.service
  1705  [Unit]
  1706  Description=Kubernetes node problem detector
  1707  Requires=network-online.target
  1708  After=network-online.target
  1709  
  1710  [Service]
  1711  Restart=always
  1712  RestartSec=10
  1713  ExecStart=${npd_bin} ${flags}
  1714  
  1715  [Install]
  1716  WantedBy=multi-user.target
  1717  EOF
  1718  
  1719    systemctl start node-problem-detector.service
  1720  }
  1721  
  1722  # Create the log file and set its properties.
  1723  #
  1724  # $1 is the file to create.
  1725  # $2: the log owner uid to set for the log file.
  1726  # $3: the log owner gid to set for the log file. If $KUBE_POD_LOG_READERS_GROUP
  1727  # is set then this value will not be used.
  1728  function prepare-log-file {
  1729    touch "$1"
  1730    if [[ -n "${KUBE_POD_LOG_READERS_GROUP:-}" ]]; then
  1731      chmod 640 "$1"
  1732      chown "${2:-root}":"${KUBE_POD_LOG_READERS_GROUP}" "$1"
  1733    else
  1734      chmod 644 "$1"
  1735      chown "${2:-${LOG_OWNER_USER:-root}}":"${3:-${LOG_OWNER_GROUP:-root}}" "$1"
  1736    fi
  1737  }
  1738  
  1739  # Prepares parameters for kube-proxy manifest.
  1740  # $1 source path of kube-proxy manifest.
  1741  # Assumptions: HOST_PLATFORM and HOST_ARCH are specified by calling detect_host_info.
  1742  function prepare-kube-proxy-manifest-variables {
  1743    local -r src_file=$1;
  1744  
  1745    local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig"
  1746    local kube_docker_registry="registry.k8s.io"
  1747    if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
  1748      kube_docker_registry=${KUBE_DOCKER_REGISTRY}
  1749    fi
  1750    local -r kube_proxy_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-proxy.docker_tag)
  1751    local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
  1752    local params="${KUBEPROXY_TEST_LOG_LEVEL:-"--v=2"}"
  1753    if [[ -n "${FEATURE_GATES:-}" ]]; then
  1754      params+=" --feature-gates=${FEATURE_GATES}"
  1755    fi
  1756  
  1757    case "${KUBE_PROXY_MODE:-iptables}" in
  1758      iptables)
  1759        params+=" --proxy-mode=iptables --iptables-sync-period=1m --iptables-min-sync-period=10s"
  1760        ;;
  1761      ipvs)
  1762        # use 'nf_conntrack' instead of 'nf_conntrack_ipv4' for linux kernel >= 4.19
  1763        # https://github.com/kubernetes/kubernetes/pull/70398
  1764        local -r kernel_version=$(uname -r | cut -d\. -f1,2)
  1765        local conntrack_module="nf_conntrack"
  1766        if [[ $(printf '%s\n4.18\n' "${kernel_version}" | sort -V | tail -1) == "4.18" ]]; then
  1767          conntrack_module="nf_conntrack_ipv4"
  1768        fi
  1769  
  1770        if ! sudo modprobe -a ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh ${conntrack_module}; then
  1771          # If IPVS modules are not present, make sure the node does not come up as
  1772          # healthy.
  1773          exit 1
  1774        fi
  1775        params+=" --proxy-mode=ipvs --ipvs-sync-period=1m --ipvs-min-sync-period=10s"
  1776        ;;
  1777      nftables)
  1778        # Pass --conntrack-tcp-be-liberal so we can test that this makes the
  1779        # "proxy implementation should not be vulnerable to the invalid conntrack state bug"
  1780        # test pass. https://issues.k8s.io/122663#issuecomment-1885024015
  1781        params+=" --proxy-mode=nftables --conntrack-tcp-be-liberal"
  1782        ;;
  1783    esac
  1784  
  1785    if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then
  1786      params+=" ${KUBEPROXY_TEST_ARGS}"
  1787    fi
  1788    if [[ -n "${DETECT_LOCAL_MODE:-}" ]]; then
  1789      params+=" --detect-local-mode=${DETECT_LOCAL_MODE}"
  1790    fi
  1791  
  1792    local container_env=""
  1793    local kube_cache_mutation_detector_env_name=""
  1794    local kube_cache_mutation_detector_env_value=""
  1795    if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
  1796      container_env="env:"
  1797      kube_cache_mutation_detector_env_name="- name: KUBE_CACHE_MUTATION_DETECTOR"
  1798      kube_cache_mutation_detector_env_value="value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
  1799    fi
  1800    local kube_watchlist_inconsistency_detector_env_name=""
  1801    local kube_watchlist_inconsistency_detector_env_value=""
  1802    if [[ -n "${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR:-}" ]]; then
  1803      if [[ -z "${container_env}" ]]; then
  1804        container_env="env:"
  1805      fi
  1806      kube_watchlist_inconsistency_detector_env_name="- name: KUBE_WATCHLIST_INCONSISTENCY_DETECTOR"
  1807      kube_watchlist_inconsistency_detector_env_value="value: \"${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR}\""
  1808    fi
  1809    sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" "${src_file}"
  1810    sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" "${src_file}"
  1811    sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" "${src_file}"
  1812    # TODO(#99245): Use multi-arch image and get rid of this.
  1813    sed -i -e "s@{{pillar\['host_arch'\]}}@${HOST_ARCH}@g" "${src_file}"
  1814    sed -i -e "s@{{params}}@${params}@g" "${src_file}"
  1815    sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
  1816    sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" "${src_file}"
  1817    sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" "${src_file}"
  1818    sed -i -e "s@{{kube_watchlist_inconsistency_detector_env_name}}@${kube_watchlist_inconsistency_detector_env_name}@g" "${src_file}"
  1819    sed -i -e "s@{{kube_watchlist_inconsistency_detector_env_value}}@${kube_watchlist_inconsistency_detector_env_value}@g" "${src_file}"
  1820    sed -i -e "s@{{ cpurequest }}@${KUBE_PROXY_CPU_REQUEST:-100m}@g" "${src_file}"
  1821    sed -i -e "s@{{ memoryrequest }}@${KUBE_PROXY_MEMORY_REQUEST:-50Mi}@g" "${src_file}"
  1822    sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" "${src_file}"
  1823    sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" "${src_file}"
  1824    if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
  1825      sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" "${src_file}"
  1826    fi
  1827  }
  1828  
  1829  # Starts kube-proxy static pod.
  1830  function start-kube-proxy {
  1831    echo "Start kube-proxy static pod"
  1832    prepare-log-file /var/log/kube-proxy.log
  1833    local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-proxy.manifest"
  1834    prepare-kube-proxy-manifest-variables "${src_file}"
  1835  
  1836    cp "${src_file}" /etc/kubernetes/manifests
  1837  }
  1838  
  1839  # Replaces the variables in the etcd manifest file with the real values, and then
  1840  # copy the file to the manifest dir
  1841  # $1: value for variable 'suffix'
  1842  # $2: value for variable 'port'
  1843  # $3: value for variable 'server_port'
  1844  # $4: value for variable 'cpulimit'
  1845  # $5: pod name, which should be either etcd or etcd-events
  1846  function prepare-etcd-manifest {
  1847    local host_name=${ETCD_HOSTNAME:-$(hostname -s)}
  1848  
  1849    local resolve_host_script_py='
  1850  import socket
  1851  import time
  1852  import sys
  1853  
  1854  timeout_sec=300
  1855  
  1856  def resolve(host):
  1857    for attempt in range(timeout_sec):
  1858      try:
  1859        print(socket.gethostbyname(host))
  1860        break
  1861      except Exception as e:
  1862        sys.stderr.write("error: resolving host %s to IP failed: %s\n" % (host, e))
  1863        time.sleep(1)
  1864        continue
  1865  
  1866  '
  1867  
  1868    local -r host_ip=$(python3 -c "${resolve_host_script_py}"$'\n'"resolve(\"${host_name}\")")
  1869    local etcd_cluster=""
  1870    local cluster_state="new"
  1871    local etcd_protocol="http"
  1872    local etcd_apiserver_protocol="http"
  1873    local etcd_creds=""
  1874    local etcd_apiserver_creds="${ETCD_APISERVER_CREDS:-}"
  1875    local etcd_extra_args="${ETCD_EXTRA_ARGS:-}"
  1876    local suffix="$1"
  1877    local etcd_listen_metrics_port="$2"
  1878    local etcdctl_certs=""
  1879  
  1880    if [[ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]]; then
  1881      cluster_state="${INITIAL_ETCD_CLUSTER_STATE}"
  1882    fi
  1883    if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
  1884      etcd_creds=" --peer-trusted-ca-file /etc/srv/kubernetes/etcd-ca.crt --peer-cert-file /etc/srv/kubernetes/etcd-peer.crt --peer-key-file /etc/srv/kubernetes/etcd-peer.key -peer-client-cert-auth "
  1885      etcd_protocol="https"
  1886    fi
  1887  
  1888    # mTLS should only be enabled for etcd server but not etcd-events. if $1 suffix is empty, it's etcd server.
  1889    if [[ -z "${suffix}" && -n "${ETCD_APISERVER_CA_KEY:-}" && -n "${ETCD_APISERVER_CA_CERT:-}" && -n "${ETCD_APISERVER_SERVER_KEY:-}" && -n "${ETCD_APISERVER_SERVER_CERT:-}" && -n "${ETCD_APISERVER_CLIENT_KEY:-}" && -n "${ETCD_APISERVER_CLIENT_CERT:-}" ]]; then
  1890      etcd_apiserver_creds=" --client-cert-auth --trusted-ca-file ${ETCD_APISERVER_CA_CERT_PATH} --cert-file ${ETCD_APISERVER_SERVER_CERT_PATH} --key-file ${ETCD_APISERVER_SERVER_KEY_PATH} "
  1891      etcdctl_certs="--cacert ${ETCD_APISERVER_CA_CERT_PATH} --cert ${ETCD_APISERVER_CLIENT_CERT_PATH} --key ${ETCD_APISERVER_CLIENT_KEY_PATH}"
  1892      etcd_apiserver_protocol="https"
  1893      etcd_listen_metrics_port="2382"
  1894      etcd_extra_args+=" --listen-metrics-urls=http://${ETCD_LISTEN_CLIENT_IP:-127.0.0.1}:${etcd_listen_metrics_port} "
  1895    fi
  1896  
  1897    if [[ -n "${ETCD_PROGRESS_NOTIFY_INTERVAL:-}" ]]; then
  1898      etcd_extra_args+=" --experimental-watch-progress-notify-interval=${ETCD_PROGRESS_NOTIFY_INTERVAL}"
  1899    fi
  1900  
  1901    for host in $(echo "${INITIAL_ETCD_CLUSTER:-${host_name}}" | tr "," "\n"); do
  1902      etcd_host="etcd-${host}=${etcd_protocol}://${host}:$3"
  1903      if [[ -n "${etcd_cluster}" ]]; then
  1904        etcd_cluster+=","
  1905      fi
  1906      etcd_cluster+="${etcd_host}"
  1907    done
  1908  
  1909    local -r temp_file="/tmp/$5"
  1910    cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd.manifest" "${temp_file}"
  1911    sed -i -e "s@{{ *suffix *}}@$1@g" "${temp_file}"
  1912    sed -i -e "s@{{ *port *}}@$2@g" "${temp_file}"
  1913    sed -i -e "s@{{ *server_port *}}@$3@g" "${temp_file}"
  1914    sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}"
  1915    sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}"
  1916    sed -i -e "s@{{ *host_ip *}}@$host_ip@g" "${temp_file}"
  1917    sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}"
  1918    sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}"
  1919    sed -i -e "s@{{ *listen_client_ip *}}@${ETCD_LISTEN_CLIENT_IP:-127.0.0.1}@g" "${temp_file}"
  1920    # Get default storage backend from manifest file.
  1921    local -r default_storage_backend=$( \
  1922      grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" "${temp_file}" | \
  1923      sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g")
  1924    if [[ -n "${STORAGE_BACKEND:-}" ]]; then
  1925      sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}"
  1926    else
  1927      sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" "${temp_file}"
  1928    fi
  1929    if [[ "${STORAGE_BACKEND:-${default_storage_backend}}" == "etcd3" ]]; then
  1930      sed -i -e "s@{{ *quota_bytes *}}@--quota-backend-bytes=${ETCD_QUOTA_BACKEND_BYTES:-4294967296}@g" "${temp_file}"
  1931    else
  1932      sed -i -e "s@{{ *quota_bytes *}}@@g" "${temp_file}"
  1933    fi
  1934    sed -i -e "s@{{ *cluster_state *}}@$cluster_state@g" "${temp_file}"
  1935    if [[ -n "${ETCD_IMAGE:-}" ]]; then
  1936      sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@${ETCD_IMAGE}@g" "${temp_file}"
  1937    else
  1938      sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@\1@g" "${temp_file}"
  1939    fi
  1940    if [[ -n "${ETCD_DOCKER_REPOSITORY:-}" ]]; then
  1941      sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@${ETCD_DOCKER_REPOSITORY}@g" "${temp_file}"
  1942    else
  1943      sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@\1@g" "${temp_file}"
  1944    fi
  1945    sed -i -e "s@{{ *etcd_protocol *}}@$etcd_protocol@g" "${temp_file}"
  1946    sed -i -e "s@{{ *etcd_apiserver_protocol *}}@$etcd_apiserver_protocol@g" "${temp_file}"
  1947    sed -i -e "s@{{ *etcd_creds *}}@$etcd_creds@g" "${temp_file}"
  1948    sed -i -e "s@{{ *etcdctl_certs *}}@$etcdctl_certs@g" "${temp_file}"
  1949    sed -i -e "s@{{ *etcd_apiserver_creds *}}@$etcd_apiserver_creds@g" "${temp_file}"
  1950    sed -i -e "s@{{ *etcd_extra_args *}}@$etcd_extra_args@g" "${temp_file}"
  1951    if [[ -n "${ETCD_VERSION:-}" ]]; then
  1952      sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@${ETCD_VERSION}@g" "${temp_file}"
  1953    else
  1954      sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@\1@g" "${temp_file}"
  1955    fi
  1956    # Replace the volume host path.
  1957    sed -i -e "s@/mnt/master-pd/var/etcd@/mnt/disks/master-pd/var/etcd@g" "${temp_file}"
  1958    # Replace the run as user and run as group
  1959    container_security_context=""
  1960    if [[ -n "${ETCD_RUNASUSER:-}" && -n "${ETCD_RUNASGROUP:-}" ]]; then
  1961      container_security_context="\"securityContext\": {\"runAsUser\": ${ETCD_RUNASUSER}, \"runAsGroup\": ${ETCD_RUNASGROUP}, \"allowPrivilegeEscalation\": false, \"capabilities\": {\"drop\": [\"all\"]}},"
  1962    fi
  1963    sed -i -e "s@{{security_context}}@${container_security_context}@g" "${temp_file}"
  1964    mv "${temp_file}" /etc/kubernetes/manifests
  1965  }
  1966  
  1967  # Starts etcd server pod (and etcd-events pod if needed).
  1968  # More specifically, it prepares dirs and files, sets the variable value
  1969  # in the manifests, and copies them to /etc/kubernetes/manifests.
  1970  function start-etcd-servers {
  1971    echo "Start etcd pods"
  1972    if [[ -d /etc/etcd ]]; then
  1973      rm -rf /etc/etcd
  1974    fi
  1975    if [[ -e /etc/default/etcd ]]; then
  1976      rm -f /etc/default/etcd
  1977    fi
  1978    if [[ -e /etc/systemd/system/etcd.service ]]; then
  1979      rm -f /etc/systemd/system/etcd.service
  1980    fi
  1981    if [[ -e /etc/init.d/etcd ]]; then
  1982      rm -f /etc/init.d/etcd
  1983    fi
  1984    if [[ -n "${ETCD_RUNASUSER:-}" && -n "${ETCD_RUNASGROUP:-}" ]]; then
  1985      chown -R "${ETCD_RUNASUSER}":"${ETCD_RUNASGROUP}" /mnt/disks/master-pd/var/etcd
  1986    fi
  1987    prepare-log-file /var/log/etcd.log "${ETCD_RUNASUSER:-0}"
  1988    prepare-etcd-manifest "" "2379" "2380" "200m" "etcd.manifest"
  1989  
  1990    prepare-log-file /var/log/etcd-events.log "${ETCD_RUNASUSER:-0}"
  1991    prepare-etcd-manifest "-events" "4002" "2381" "100m" "etcd-events.manifest"
  1992  }
  1993  
  1994  # Replaces the variables in the konnectivity-server manifest file with the real values, and then
  1995  # copy the file to the manifest dir
  1996  # $1: value for variable "agent_port"
  1997  # $2: value for variable "health_port"
  1998  # $3: value for variable "admin_port"
  1999  function prepare-konnectivity-server-manifest {
  2000    local -r temp_file="/tmp/konnectivity-server.yaml"
  2001    params=()
  2002    cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/konnectivity-server.yaml" "${temp_file}"
  2003    params+=("--log-file=/var/log/konnectivity-server.log")
  2004    params+=("--logtostderr=false")
  2005    params+=("--log-file-max-size=0")
  2006    if [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'grpc' ]]; then
  2007      params+=("--uds-name=/etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket")
  2008    elif [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'http-connect' ]]; then
  2009      # HTTP-CONNECT can work with either UDS or mTLS.
  2010      # Linking them here to make sure we get good coverage with two test configurations.
  2011      params+=("--server-ca-cert=${KONNECTIVITY_SERVER_CA_CERT_PATH}")
  2012      params+=("--server-cert=${KONNECTIVITY_SERVER_CERT_PATH}")
  2013      params+=("--server-key=${KONNECTIVITY_SERVER_KEY_PATH}")
  2014      params+=("--cluster-ca-cert=${KONNECTIVITY_AGENT_CA_CERT_PATH}")
  2015    fi
  2016    params+=("--cluster-cert=/etc/srv/kubernetes/pki/apiserver.crt")
  2017    params+=("--cluster-key=/etc/srv/kubernetes/pki/apiserver.key")
  2018    if [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'grpc' ]]; then
  2019      params+=("--mode=grpc")
  2020      params+=("--server-port=0")
  2021      params+=("--agent-namespace=kube-system")
  2022      params+=("--agent-service-account=konnectivity-agent")
  2023      params+=("--authentication-audience=system:konnectivity-server")
  2024      params+=("--kubeconfig=/etc/srv/kubernetes/konnectivity-server/kubeconfig")
  2025      params+=("--proxy-strategies=default")
  2026    elif [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'http-connect' ]]; then
  2027      # GRPC can work with either UDS or mTLS.
  2028      params+=("--mode=http-connect")
  2029      params+=("--server-port=8131")
  2030      params+=("--agent-namespace=")
  2031      params+=("--agent-service-account=")
  2032      params+=("--authentication-audience=")
  2033      # Need to fix ANP code to allow kubeconfig to be set with mtls.
  2034      params+=("--kubeconfig=")
  2035      params+=("--proxy-strategies=destHost,default")
  2036    else
  2037      echo "KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE must be set to either grpc or http-connect"
  2038      exit 1
  2039    fi
  2040  
  2041    params+=("--agent-port=$1")
  2042    params+=("--health-port=$2")
  2043    params+=("--admin-port=$3")
  2044    params+=("--kubeconfig-qps=75")
  2045    params+=("--kubeconfig-burst=150")
  2046    params+=("--keepalive-time=60s")
  2047    params+=("--frontend-keepalive-time=60s")
  2048    konnectivity_args=""
  2049    for param in "${params[@]}"; do
  2050      konnectivity_args+=", \"${param}\""
  2051    done
  2052    sed -i -e "s@{{ *konnectivity_args *}}@${konnectivity_args}@g" "${temp_file}"
  2053    sed -i -e "s@{{ *agent_port *}}@$1@g" "${temp_file}"
  2054    sed -i -e "s@{{ *health_port *}}@$2@g" "${temp_file}"
  2055    sed -i -e "s@{{ *admin_port *}}@$3@g" "${temp_file}"
  2056    sed -i -e "s@{{ *liveness_probe_initial_delay *}}@30@g" "${temp_file}"
  2057    if [[ -n "${KONNECTIVITY_SERVER_RUNASUSER:-}" && -n "${KONNECTIVITY_SERVER_RUNASGROUP:-}" && -n "${KONNECTIVITY_SERVER_SOCKET_WRITER_GROUP:-}" ]]; then
  2058      sed -i -e "s@{{ *run_as_user *}}@runAsUser: ${KONNECTIVITY_SERVER_RUNASUSER}@g" "${temp_file}"
  2059      sed -i -e "s@{{ *run_as_group *}}@runAsGroup: ${KONNECTIVITY_SERVER_RUNASGROUP}@g" "${temp_file}"
  2060      sed -i -e "s@{{ *supplemental_groups *}}@supplementalGroups: [${KUBE_PKI_READERS_GROUP}]@g" "${temp_file}"
  2061      sed -i -e "s@{{ *container_security_context *}}@securityContext:@g" "${temp_file}"
  2062      sed -i -e "s@{{ *capabilities *}}@capabilities:@g" "${temp_file}"
  2063      sed -i -e "s@{{ *drop_capabilities *}}@drop: [ ALL ]@g" "${temp_file}"
  2064      sed -i -e "s@{{ *disallow_privilege_escalation *}}@allowPrivilegeEscalation: false@g" "${temp_file}"
  2065      mkdir -p /etc/srv/kubernetes/konnectivity-server/
  2066      chown -R "${KONNECTIVITY_SERVER_RUNASUSER}":"${KONNECTIVITY_SERVER_RUNASGROUP}" /etc/srv/kubernetes/konnectivity-server
  2067      chmod g+w /etc/srv/kubernetes/konnectivity-server
  2068    else
  2069      sed -i -e "s@{{ *run_as_user *}}@@g" "${temp_file}"
  2070      sed -i -e "s@{{ *run_as_group *}}@@g" "${temp_file}"
  2071      sed -i -e "s@{{ *supplemental_groups *}}@@g" "${temp_file}"
  2072      sed -i -e "s@{{ *container_security_context *}}@@g" "${temp_file}"
  2073      sed -i -e "s@{{ *capabilities *}}@@g" "${temp_file}"
  2074      sed -i -e "s@{{ *drop_capabilities *}}@@g" "${temp_file}"
  2075      sed -i -e "s@{{ *disallow_privilege_escalation *}}@@g" "${temp_file}"
  2076    fi
  2077    mv "${temp_file}" /etc/kubernetes/manifests
  2078  }
  2079  
  2080  # Starts konnectivity server pod.
  2081  # More specifically, it prepares dirs and files, sets the variable value
  2082  # in the manifests, and copies them to /etc/kubernetes/manifests.
  2083  function start-konnectivity-server {
  2084    echo "Start konnectivity server pods"
  2085    prepare-log-file /var/log/konnectivity-server.log "${KONNECTIVITY_SERVER_RUNASUSER:-0}"
  2086    prepare-konnectivity-server-manifest "8132" "8133" "8134"
  2087  }
  2088  
  2089  # Calculates the following variables based on env variables, which will be used
  2090  # by the manifests of several kube-master components.
  2091  #   CLOUD_CONFIG_OPT
  2092  #   CLOUD_CONFIG_VOLUME
  2093  #   CLOUD_CONFIG_MOUNT
  2094  #   DOCKER_REGISTRY
  2095  #   FLEXVOLUME_HOSTPATH_MOUNT
  2096  #   FLEXVOLUME_HOSTPATH_VOLUME
  2097  #   INSECURE_PORT_MAPPING
  2098  function compute-master-manifest-variables {
  2099    CLOUD_CONFIG_OPT=""
  2100    CLOUD_CONFIG_VOLUME=""
  2101    CLOUD_CONFIG_MOUNT=""
  2102    if [[ -f /etc/gce.conf ]]; then
  2103      CLOUD_CONFIG_OPT="--cloud-config=/etc/gce.conf"
  2104      CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}},"
  2105      CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true},"
  2106    fi
  2107    DOCKER_REGISTRY="registry.k8s.io"
  2108    if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
  2109      DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}"
  2110    fi
  2111  
  2112    FLEXVOLUME_HOSTPATH_MOUNT=""
  2113    FLEXVOLUME_HOSTPATH_VOLUME=""
  2114    if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
  2115      FLEXVOLUME_HOSTPATH_MOUNT="{ \"name\": \"flexvolumedir\", \"mountPath\": \"${VOLUME_PLUGIN_DIR}\", \"readOnly\": true},"
  2116      FLEXVOLUME_HOSTPATH_VOLUME="{ \"name\": \"flexvolumedir\", \"hostPath\": {\"path\": \"${VOLUME_PLUGIN_DIR}\"}},"
  2117    fi
  2118  
  2119    INSECURE_PORT_MAPPING=""
  2120    if [[ "${ENABLE_APISERVER_INSECURE_PORT:-false}" == "true" ]]; then
  2121      # INSECURE_PORT_MAPPING is used by sed
  2122      # shellcheck disable=SC2089
  2123      INSECURE_PORT_MAPPING='{ "name": "local", "containerPort": 8080, "hostPort": 8080},'
  2124    fi
  2125    # shellcheck disable=SC2090
  2126    export INSECURE_PORT_MAPPING
  2127  }
  2128  
  2129  # A helper function that bind mounts kubelet dirs for running mount in a chroot
  2130  function prepare-mounter-rootfs {
  2131    echo "Prepare containerized mounter"
  2132    mount --bind "${CONTAINERIZED_MOUNTER_HOME}" "${CONTAINERIZED_MOUNTER_HOME}"
  2133    mount -o remount,exec "${CONTAINERIZED_MOUNTER_HOME}"
  2134    CONTAINERIZED_MOUNTER_ROOTFS="${CONTAINERIZED_MOUNTER_HOME}/rootfs"
  2135    mount --rbind /var/lib/kubelet/ "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
  2136    mount --make-rshared "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
  2137    mount --bind -o ro /proc "${CONTAINERIZED_MOUNTER_ROOTFS}/proc"
  2138    mount --bind -o ro /dev "${CONTAINERIZED_MOUNTER_ROOTFS}/dev"
  2139    cp /etc/resolv.conf "${CONTAINERIZED_MOUNTER_ROOTFS}/etc/"
  2140  }
  2141  
  2142  # Updates node labels used by addons.
  2143  function update-legacy-addon-node-labels() {
  2144    # need kube-apiserver to be ready
  2145    until kubectl get nodes; do
  2146      sleep 5
  2147    done
  2148    update-node-label "beta.kubernetes.io/metadata-proxy-ready=true,cloud.google.com/metadata-proxy-ready!=true" "cloud.google.com/metadata-proxy-ready=true"
  2149    update-node-label "beta.kubernetes.io/kube-proxy-ds-ready=true,node.kubernetes.io/kube-proxy-ds-ready!=true" "node.kubernetes.io/kube-proxy-ds-ready=true"
  2150    update-node-label "beta.kubernetes.io/masq-agent-ds-ready=true,node.kubernetes.io/masq-agent-ds-ready!=true" "node.kubernetes.io/masq-agent-ds-ready=true"
  2151  }
  2152  
  2153  # A helper function for labeling all nodes matching a given selector.
  2154  # Runs: kubectl label --overwrite nodes -l "${1}" "${2}"
  2155  # Retries on failure
  2156  #
  2157  # $1: label selector of nodes
  2158  # $2: label to apply
  2159  function update-node-label() {
  2160    local selector="$1"
  2161    local label="$2"
  2162    local retries=5
  2163    until (( retries == 0 )); do
  2164      if kubectl label --overwrite nodes -l "${selector}" "${label}"; then
  2165        break
  2166      fi
  2167      (( retries-- ))
  2168      sleep 3
  2169    done
  2170  }
  2171  
  2172  # Starts kubernetes controller manager.
  2173  # It prepares the log file, loads the docker image, calculates variables, sets them
  2174  # in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
  2175  #
  2176  # Assumed vars (which are calculated in function compute-master-manifest-variables)
  2177  #   CLOUD_CONFIG_OPT
  2178  #   CLOUD_CONFIG_VOLUME
  2179  #   CLOUD_CONFIG_MOUNT
  2180  #   DOCKER_REGISTRY
  2181  function start-kube-controller-manager {
  2182    if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
  2183      if ! deploy-kube-controller-manager-via-kube-up; then
  2184        echo "kube-controller-manager is configured to not be deployed through kube-up."
  2185        return
  2186      fi
  2187    fi
  2188    echo "Start kubernetes controller-manager"
  2189    create-kubeconfig "kube-controller-manager" "${KUBE_CONTROLLER_MANAGER_TOKEN}"
  2190    prepare-log-file /var/log/kube-controller-manager.log "${KUBE_CONTROLLER_MANAGER_RUNASUSER:-0}"
  2191    # Calculate variables and assemble the command line.
  2192    local params=("${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=4"}" "${KUBE_CONTROLLER_MANAGER_TEST_ARGS:-}" "${CONTROLLER_MANAGER_TEST_ARGS:-}" "${CLOUD_CONFIG_OPT}")
  2193    local config_path='/etc/srv/kubernetes/kube-controller-manager/kubeconfig'
  2194    params+=("--use-service-account-credentials")
  2195    params+=("--cloud-provider=${CLOUD_PROVIDER_FLAG:-external}")
  2196    params+=("--kubeconfig=${config_path}" "--authentication-kubeconfig=${config_path}" "--authorization-kubeconfig=${config_path}")
  2197    params+=("--root-ca-file=${CA_CERT_BUNDLE_PATH}")
  2198    params+=("--service-account-private-key-file=${SERVICEACCOUNT_KEY_PATH}")
  2199    params+=("--volume-host-allow-local-loopback=false")
  2200    if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
  2201      params+=("--enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}")
  2202    fi
  2203    if [[ -n "${INSTANCE_PREFIX:-}" ]]; then
  2204      params+=("--cluster-name=${INSTANCE_PREFIX}")
  2205    fi
  2206    if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
  2207      params+=("--cluster-cidr=${CLUSTER_IP_RANGE}")
  2208    fi
  2209    if [[ -n "${CA_KEY:-}" ]]; then
  2210      params+=("--cluster-signing-cert-file=${CA_CERT_PATH}")
  2211      params+=("--cluster-signing-key-file=${CA_KEY_PATH}")
  2212    fi
  2213    if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
  2214      params+=("--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}")
  2215    fi
  2216    if [[ -n "${CONCURRENT_SERVICE_SYNCS:-}" ]]; then
  2217      params+=("--concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}")
  2218    fi
  2219    if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then
  2220      params+=("--allocate-node-cidrs=true")
  2221    elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then
  2222      params+=("--allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}")
  2223    fi
  2224    if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then
  2225      params+=("--terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}")
  2226    fi
  2227    if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
  2228      params+=("--cidr-allocator-type=${NODE_IPAM_MODE}")
  2229      params+=("--configure-cloud-routes=false")
  2230    fi
  2231    if [[ -n "${FEATURE_GATES:-}" ]]; then
  2232      params+=("--feature-gates=${FEATURE_GATES}")
  2233    fi
  2234    if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
  2235      params+=("--flex-volume-plugin-dir=${VOLUME_PLUGIN_DIR}")
  2236    fi
  2237    if [[ -n "${CLUSTER_SIGNING_DURATION:-}" ]]; then
  2238      params+=("--cluster-signing-duration=$CLUSTER_SIGNING_DURATION")
  2239    fi
  2240    if [[ -n "${PV_RECYCLER_OVERRIDE_TEMPLATE:-}" ]]; then
  2241      params+=("--pv-recycler-pod-template-filepath-nfs=$PV_RECYCLER_OVERRIDE_TEMPLATE")
  2242      params+=("--pv-recycler-pod-template-filepath-hostpath=$PV_RECYCLER_OVERRIDE_TEMPLATE")
  2243    fi
  2244    if [[ -n "${RUN_CONTROLLERS:-}" ]]; then
  2245      params+=("--controllers=${RUN_CONTROLLERS}")
  2246    fi
  2247  
  2248    local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag)
  2249    local container_env=""
  2250    if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
  2251      container_env="{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}"
  2252    fi
  2253    if [[ -n "${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR:-}" ]]; then
  2254      if [[ -n "${container_env}" ]]; then
  2255        container_env="${container_env}, "
  2256      fi
  2257      container_env+="{\"name\": \"KUBE_WATCHLIST_INCONSISTENCY_DETECTOR\", \"value\": \"${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR}\"}"
  2258    fi
  2259    if [[ -n "${container_env}" ]]; then
  2260      container_env="\"env\":[${container_env}],"
  2261    fi
  2262  
  2263    local paramstring
  2264    paramstring="$(convert-manifest-params "${params[*]}")"
  2265    local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
  2266    # Evaluate variables.
  2267    sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
  2268    sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
  2269    sed -i -e "s@{{params}}@${paramstring}@g" "${src_file}"
  2270    sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
  2271    sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
  2272    sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
  2273    sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
  2274    sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
  2275    sed -i -e "s@{{pv_recycler_mount}}@${PV_RECYCLER_MOUNT}@g" "${src_file}"
  2276    sed -i -e "s@{{pv_recycler_volume}}@${PV_RECYCLER_VOLUME}@g" "${src_file}"
  2277    sed -i -e "s@{{flexvolume_hostpath_mount}}@${FLEXVOLUME_HOSTPATH_MOUNT}@g" "${src_file}"
  2278    sed -i -e "s@{{flexvolume_hostpath}}@${FLEXVOLUME_HOSTPATH_VOLUME}@g" "${src_file}"
  2279    sed -i -e "s@{{cpurequest}}@${KUBE_CONTROLLER_MANAGER_CPU_REQUEST}@g" "${src_file}"
  2280  
  2281    if [[ -n "${KUBE_CONTROLLER_MANAGER_RUNASUSER:-}" && -n "${KUBE_CONTROLLER_MANAGER_RUNASGROUP:-}" ]]; then
  2282      sed -i -e "s@{{runAsUser}}@\"runAsUser\": ${KUBE_CONTROLLER_MANAGER_RUNASUSER},@g" "${src_file}"
  2283      sed -i -e "s@{{runAsGroup}}@\"runAsGroup\":${KUBE_CONTROLLER_MANAGER_RUNASGROUP},@g" "${src_file}"
  2284      sed -i -e "s@{{supplementalGroups}}@\"supplementalGroups\": [ ${KUBE_PKI_READERS_GROUP} ],@g" "${src_file}"
  2285    else
  2286      sed -i -e "s@{{runAsUser}}@@g" "${src_file}"
  2287      sed -i -e "s@{{runAsGroup}}@@g" "${src_file}"
  2288      sed -i -e "s@{{supplementalGroups}}@@g" "${src_file}"
  2289    fi
  2290  
  2291    cp "${src_file}" /etc/kubernetes/manifests
  2292  }
  2293  
  2294  # Starts cloud controller manager.
  2295  # It prepares the log file, loads the docker image, calculates variables, sets them
  2296  # in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
  2297  #
  2298  # Assumed vars (which are calculated in function compute-master-manifest-variables)
  2299  #   CLOUD_CONFIG_OPT
  2300  #   CLOUD_CONFIG_VOLUME
  2301  #   CLOUD_CONFIG_MOUNT
  2302  #   DOCKER_REGISTRY
  2303  function start-cloud-controller-manager {
  2304    echo "Start cloud provider controller-manager"
  2305    setup-addon-manifests "addons" "cloud-controller-manager"
  2306  
  2307    create-kubeconfig "cloud-controller-manager" "${CLOUD_CONTROLLER_MANAGER_TOKEN}"
  2308    echo "Preparing cloud provider controller-manager log file"
  2309    prepare-log-file /var/log/cloud-controller-manager.log "${CLOUD_CONTROLLER_MANAGER_RUNASUSER:-0}"
  2310    # Calculate variables and assemble the command line.
  2311    local params=("${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=4"}" "${CONTROLLER_MANAGER_TEST_ARGS:-}" "${CLOUD_CONFIG_OPT}")
  2312    params+=("--secure-port=10258")
  2313    params+=("--use-service-account-credentials")
  2314    params+=("--cloud-provider=gce")
  2315    params+=("--concurrent-node-syncs=10")
  2316    params+=("--kubeconfig=/etc/srv/kubernetes/cloud-controller-manager/kubeconfig")
  2317    params+=("--authorization-kubeconfig=/etc/srv/kubernetes/cloud-controller-manager/kubeconfig")
  2318    params+=("--authentication-kubeconfig=/etc/srv/kubernetes/cloud-controller-manager/kubeconfig")
  2319    if [[ -n "${INSTANCE_PREFIX:-}" ]]; then
  2320      params+=("--cluster-name=${INSTANCE_PREFIX}")
  2321    fi
  2322    if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
  2323      params+=("--cluster-cidr=${CLUSTER_IP_RANGE}")
  2324    fi
  2325    if [[ -n "${CONCURRENT_SERVICE_SYNCS:-}" ]]; then
  2326      params+=("--concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}")
  2327    fi
  2328    if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then
  2329      params+=("--allocate-node-cidrs=true")
  2330    elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then
  2331      params+=("--allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}")
  2332    fi
  2333    if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
  2334      params+=("--cidr-allocator-type=${NODE_IPAM_MODE}")
  2335      params+=("--configure-cloud-routes=false")
  2336    fi
  2337    if [[ -n "${FEATURE_GATES:-}" ]]; then
  2338      # remove non-GCP feature gates, since the CCM will early exit
  2339      # if given a feature gate it doesn't recognize
  2340      echo "Setting feature gates for cloud provider controller-manager from ${CCM_FEATURE_GATES}"
  2341      local CCM_FEATURE_GATES_FILTER
  2342      CCM_FEATURE_GATES_FILTER=$(echo "${CCM_FEATURE_GATES}" | sed "s/^/(/" | sed "s/,/=[^,]*|/g" | sed "s/$/=[^,]*)/")
  2343      echo "Computing safe feature gates for cloud provider controller-manager from ${FEATURE_GATES} and filter ${CCM_FEATURE_GATES_FILTER}"
  2344      local safe_feature_gates
  2345      safe_feature_gates=$(echo "${FEATURE_GATES}" | { grep -E -o "(${CCM_FEATURE_GATES_FILTER})" || true; } | tr "\n" "," | sed "s/,$//")
  2346      echo "Setting safe feature gates for cloud provider controller-manager with ${safe_feature_gates}"
  2347      if [[ -n "${safe_feature_gates:-}" ]]; then
  2348        params+=("--feature-gates=${safe_feature_gates}")
  2349        echo "Computing unsafe feature gates for cloud provider controller-manager from ${CCM_FEATURE_GATES_FILTER}"
  2350        local filtered_feature_gates
  2351        filtered_feature_gates=$(echo "${FEATURE_GATES}" | sed "s/,/\n/g" | { grep -E -v "(${CCM_FEATURE_GATES_FILTER})" || true; } | sed -z "s/\n/,/g;s/,$/\n/")
  2352        echo "Feature gates that did not pass through the GCP filter:" "${filtered_feature_gates}"
  2353      else
  2354        echo "None of the given feature gates (${FEATURE_GATES}) were found to be safe to pass to the CCM"
  2355      fi
  2356    fi
  2357    if [[ -n "${RUN_CCM_CONTROLLERS:-}" ]]; then
  2358      params+=("--controllers=${RUN_CCM_CONTROLLERS}")
  2359    fi
  2360  
  2361    echo "Converting manifest for cloud provider controller-manager"
  2362    local paramstring
  2363    paramstring="$(convert-manifest-params "${params[*]}")"
  2364    local container_env=""
  2365    if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
  2366      container_env="{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}"
  2367    fi
  2368    if [[ -n "${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR:-}" ]]; then
  2369      if [[ -n "${container_env}" ]]; then
  2370        container_env="${container_env}, "
  2371      fi
  2372      container_env+="{\"name\": \"KUBE_WATCHLIST_INCONSISTENCY_DETECTOR\", \"value\": \"${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR}\"}"
  2373    fi
  2374    if [[ -n "${container_env}" ]]; then
  2375      container_env="\"env\":[${container_env}],"
  2376    fi
  2377  
  2378    echo "Applying over-rides for manifest for cloud provider controller-manager"
  2379    local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cloud-controller-manager.manifest"
  2380    # Evaluate variables.
  2381    sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
  2382    sed -i -e "s@{{params}}@${paramstring}@g" "${src_file}"
  2383    sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
  2384    sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
  2385    sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
  2386    sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
  2387    sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
  2388    sed -i -e "s@{{pv_recycler_mount}}@${PV_RECYCLER_MOUNT}@g" "${src_file}"
  2389    sed -i -e "s@{{pv_recycler_volume}}@${PV_RECYCLER_VOLUME}@g" "${src_file}"
  2390    sed -i -e "s@{{flexvolume_hostpath_mount}}@${FLEXVOLUME_HOSTPATH_MOUNT}@g" "${src_file}"
  2391    sed -i -e "s@{{flexvolume_hostpath}}@${FLEXVOLUME_HOSTPATH_VOLUME}@g" "${src_file}"
  2392    sed -i -e "s@{{cpurequest}}@${CLOUD_CONTROLLER_MANAGER_CPU_REQUEST}@g" "${src_file}"
  2393  
  2394    if [[ -n "${CLOUD_CONTROLLER_MANAGER_RUNASUSER:-}" && -n "${CLOUD_CONTROLLER_MANAGER_RUNASGROUP:-}" ]]; then
  2395      #run-cloud-controller-manager-as-non-root
  2396      sed -i -e "s@{{runAsUser}}@\"runAsUser\": ${CLOUD_CONTROLLER_MANAGER_RUNASUSER},@g" "${src_file}"
  2397      sed -i -e "s@{{runAsGroup}}@\"runAsGroup\":${CLOUD_CONTROLLER_MANAGER_RUNASGROUP},@g" "${src_file}"
  2398      sed -i -e "s@{{supplementalGroups}}@\"supplementalGroups\": [ ${KUBE_PKI_READERS_GROUP} ],@g" "${src_file}"
  2399    else
  2400      sed -i -e "s@{{runAsUser}}@@g" "${src_file}"
  2401      sed -i -e "s@{{runAsGroup}}@@g" "${src_file}"
  2402      sed -i -e "s@{{supplementalGroups}}@@g" "${src_file}"
  2403    fi
  2404  
  2405    echo "Writing manifest for cloud provider controller-manager"
  2406    cp "${src_file}" /etc/kubernetes/manifests
  2407  
  2408    setup-addon-manifests "addons" "cloud-pvl-admission"
  2409    setup-cloud-pvl-admission-manifest
  2410  }
  2411  
  2412  # Starts kubernetes scheduler.
  2413  # It prepares the log file, loads the docker image, calculates variables, sets them
  2414  # in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
  2415  #
  2416  # Assumed vars (which are calculated in compute-master-manifest-variables)
  2417  #   DOCKER_REGISTRY
  2418  function start-kube-scheduler {
  2419    if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
  2420      if ! deploy-kube-scheduler-via-kube-up; then
  2421        echo "kube-scheduler is configured to not be deployed through kube-up."
  2422        return
  2423      fi
  2424    fi
  2425    echo "Start kubernetes scheduler"
  2426    create-kubeconfig "kube-scheduler" "${KUBE_SCHEDULER_TOKEN}"
  2427    # User and group should never contain characters that need to be quoted
  2428    # shellcheck disable=SC2086
  2429    prepare-log-file /var/log/kube-scheduler.log ${KUBE_SCHEDULER_RUNASUSER:-2001}
  2430  
  2431    # Calculate variables and set them in the manifest.
  2432    params=("${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"}" "${SCHEDULER_TEST_ARGS:-}")
  2433    if [[ -n "${FEATURE_GATES:-}" ]]; then
  2434      params+=("--feature-gates=${FEATURE_GATES}")
  2435    fi
  2436  
  2437    # Scheduler Component Config takes precedence over some flags.
  2438    if [[ -n "${KUBE_SCHEDULER_CONFIG:-}" ]]; then
  2439      create-kube-scheduler-config
  2440      params+=("--config=/etc/srv/kubernetes/kube-scheduler/config")
  2441    else
  2442      params+=("--kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig")
  2443      if [[ -n "${SCHEDULER_POLICY_CONFIG:-}" ]]; then
  2444        create-kubescheduler-policy-config
  2445        params+=("--use-legacy-policy-config")
  2446        params+=("--policy-config-file=/etc/srv/kubernetes/kube-scheduler/policy-config")
  2447      fi
  2448    fi
  2449  
  2450    local config_path
  2451    config_path='/etc/srv/kubernetes/kube-scheduler/kubeconfig'
  2452    params+=("--authentication-kubeconfig=${config_path}" "--authorization-kubeconfig=${config_path}")
  2453  
  2454    local paramstring
  2455    paramstring="$(convert-manifest-params "${params[*]}")"
  2456    local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")
  2457  
  2458    # Remove salt comments and replace variables with values.
  2459    local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
  2460  
  2461    sed -i -e "s@{{params}}@${paramstring}@g" "${src_file}"
  2462    sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
  2463    sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
  2464    sed -i -e "s@{{cpurequest}}@${KUBE_SCHEDULER_CPU_REQUEST}@g" "${src_file}"
  2465    sed -i -e "s@{{runAsUser}}@${KUBE_SCHEDULER_RUNASUSER:-2001}@g" "${src_file}"
  2466    sed -i -e "s@{{runAsGroup}}@${KUBE_SCHEDULER_RUNASGROUP:-2001}@g" "${src_file}"
  2467    cp "${src_file}" /etc/kubernetes/manifests
  2468  }
  2469  
  2470  # Starts cluster autoscaler.
  2471  # Assumed vars (which are calculated in function compute-master-manifest-variables)
  2472  #   CLOUD_CONFIG_OPT
  2473  #   CLOUD_CONFIG_VOLUME
  2474  #   CLOUD_CONFIG_MOUNT
  2475  function start-cluster-autoscaler {
  2476    if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
  2477      echo "Start kubernetes cluster autoscaler"
  2478      setup-addon-manifests "addons" "rbac/cluster-autoscaler"
  2479      create-kubeconfig "cluster-autoscaler" "${KUBE_CLUSTER_AUTOSCALER_TOKEN}"
  2480      prepare-log-file /var/log/cluster-autoscaler.log "${CLUSTER_AUTOSCALER_RUNASUSER:-0}"
  2481  
  2482      # Remove salt comments and replace variables with values
  2483      local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
  2484  
  2485      local params
  2486      read -r -a params <<< "${AUTOSCALER_MIG_CONFIG}"
  2487      params+=("${CLOUD_CONFIG_OPT}" "${AUTOSCALER_EXPANDER_CONFIG:---expander=price}")
  2488      params+=("--kubeconfig=/etc/srv/kubernetes/cluster-autoscaler/kubeconfig")
  2489  
  2490      # split the params into separate arguments passed to binary
  2491      local params_split
  2492      params_split=$(eval 'for param in "${params[@]}"; do echo -n \""$param"\",; done')
  2493      params_split=${params_split%?}
  2494  
  2495      sed -i -e "s@{{params}}@${params_split}@g" "${src_file}"
  2496      sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
  2497      sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
  2498      sed -i -e "s@{%.*%}@@g" "${src_file}"
  2499  
  2500      if [[ -n "${CLUSTER_AUTOSCALER_RUNASUSER:-}" && -n "${CLUSTER_AUTOSCALER_RUNASGROUP:-}" ]]; then
  2501        #run-cluster-autoscaler-as-non-root
  2502        sed -i -e "s@{{runAsUser}}@\"runAsUser\": ${CLUSTER_AUTOSCALER_RUNASUSER},@g" "${src_file}"
  2503        sed -i -e "s@{{runAsGroup}}@\"runAsGroup\":${CLUSTER_AUTOSCALER_RUNASGROUP},@g" "${src_file}"
  2504        sed -i -e "s@{{supplementalGroups}}@\"supplementalGroups\": [ ${KUBE_PKI_READERS_GROUP} ],@g" "${src_file}"
  2505      else
  2506        sed -i -e "s@{{runAsUser}}@@g" "${src_file}"
  2507        sed -i -e "s@{{runAsGroup}}@@g" "${src_file}"
  2508        sed -i -e "s@{{supplementalGroups}}@@g" "${src_file}"
  2509      fi
  2510  
  2511      cp "${src_file}" /etc/kubernetes/manifests
  2512    fi
  2513  }
  2514  
  2515  # A helper function for setting up addon manifests.
  2516  #
  2517  # $1: addon category under /etc/kubernetes
  2518  # $2: manifest source dir
  2519  # $3: (optional) auxiliary manifest source dir
  2520  function setup-addon-manifests {
  2521    local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
  2522    local -r dst_dir="/etc/kubernetes/$1/$2"
  2523  
  2524    copy-manifests "${src_dir}/$2" "${dst_dir}"
  2525  }
  2526  
  2527  # A function that downloads extra addons from a URL and puts them in the GCI
  2528  # manifests directory.
  2529  function download-extra-addons {
  2530    local -r out_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/gce-extras"
  2531  
  2532    mkdir -p "${out_dir}"
  2533  
  2534    # shellcheck disable=SC2206
  2535    local curl_cmd=(
  2536      "curl"
  2537      ${CURL_FLAGS}
  2538    )
  2539    if [[ -n "${EXTRA_ADDONS_HEADER:-}" ]]; then
  2540      curl_cmd+=("-H" "${EXTRA_ADDONS_HEADER}")
  2541    fi
  2542    curl_cmd+=("-o" "${out_dir}/extras.json")
  2543    curl_cmd+=("${EXTRA_ADDONS_URL}")
  2544  
  2545    "${curl_cmd[@]}"
  2546  }
  2547  
  2548  # A function that fetches a GCE metadata value and echoes it out.
  2549  # Args:
  2550  #   $1 : URL path after /computeMetadata/v1/ (without heading slash).
  2551  #   $2 : An optional default value to echo out if the fetch fails.
  2552  #
  2553  # NOTE: this function is duplicated in configure.sh, any changes here should be
  2554  # duplicated there as well.
  2555  function get-metadata-value {
  2556    local default="${2:-}"
  2557  
  2558    local status
  2559    # shellcheck disable=SC2086
  2560    curl ${CURL_FLAGS} \
  2561      -H 'Metadata-Flavor: Google' \
  2562      "http://metadata/computeMetadata/v1/${1}" \
  2563    || status="$?"
  2564    status="${status:-0}"
  2565  
  2566    if [[ "${status}" -eq 0 || -z "${default}" ]]; then
  2567      return "${status}"
  2568    else
  2569      echo "${default}"
  2570    fi
  2571  }
  2572  
  2573  # A helper function for copying manifests and setting dir/files
  2574  # permissions.
  2575  #
  2576  # $1: absolute source dir
  2577  # $2: absolute destination dir
  2578  function copy-manifests {
  2579    local -r src_dir="$1"
  2580    local -r dst_dir="$2"
  2581    if [[ ! -d "${dst_dir}" ]]; then
  2582      mkdir -p "${dst_dir}"
  2583    fi
  2584    if [[ -n "$(ls "${src_dir}"/*.yaml 2>/dev/null)" ]]; then
  2585      cp "${src_dir}/"*.yaml "${dst_dir}"
  2586    fi
  2587    if [[ -n "$(ls "${src_dir}"/*.json 2>/dev/null)" ]]; then
  2588      cp "${src_dir}/"*.json "${dst_dir}"
  2589    fi
  2590    if [[ -n "$(ls "${src_dir}"/*.yaml.in 2>/dev/null)" ]]; then
  2591      cp "${src_dir}/"*.yaml.in "${dst_dir}"
  2592    fi
  2593    chown -R root:root "${dst_dir}"
  2594    chmod 755 "${dst_dir}"
  2595    chmod 644 "${dst_dir}"/*
  2596  }
  2597  
  2598  # Fluentd resources are modified using ScalingPolicy CR, which may not be
  2599  # available at this point. Run this as a background process.
  2600  function wait-for-apiserver-and-update-fluentd {
  2601    local any_overrides=false
  2602    if [[ -n "${FLUENTD_GCP_MEMORY_LIMIT:-}" ]]; then
  2603      any_overrides=true
  2604    fi
  2605    if [[ -n "${FLUENTD_GCP_CPU_REQUEST:-}" ]]; then
  2606      any_overrides=true
  2607    fi
  2608    if [[ -n "${FLUENTD_GCP_MEMORY_REQUEST:-}" ]]; then
  2609      any_overrides=true
  2610    fi
  2611    if ! $any_overrides; then
  2612      # Nothing to do here.
  2613      exit
  2614    fi
  2615  
  2616    # Wait until ScalingPolicy CRD is in place.
  2617    until kubectl get scalingpolicies.scalingpolicy.kope.io
  2618    do
  2619      sleep 10
  2620    done
  2621  
  2622    # Single-shot, not managed by addon manager. Can be later modified or removed
  2623    # at will.
  2624    cat <<EOF | kubectl apply -f -
  2625  apiVersion: scalingpolicy.kope.io/v1alpha1
  2626  kind: ScalingPolicy
  2627  metadata:
  2628    name: fluentd-gcp-scaling-policy
  2629    namespace: kube-system
  2630  spec:
  2631    containers:
  2632    - name: fluentd-gcp
  2633      resources:
  2634        requests:
  2635        - resource: cpu
  2636          base: ${FLUENTD_GCP_CPU_REQUEST:-}
  2637        - resource: memory
  2638          base: ${FLUENTD_GCP_MEMORY_REQUEST:-}
  2639        limits:
  2640        - resource: memory
  2641          base: ${FLUENTD_GCP_MEMORY_LIMIT:-}
  2642  EOF
  2643  }
  2644  
  2645  # Trigger background process that will ultimately update fluentd resource
  2646  # requirements.
  2647  function start-fluentd-resource-update {
  2648    wait-for-apiserver-and-update-fluentd &
  2649  }
  2650  
  2651  # VolumeSnapshot CRDs and controller are installed by cluster addon manager,
  2652  # which may not be available at this point. Run this as a background process.
  2653  function wait-for-volumesnapshot-crd-and-controller {
  2654    # Wait until volumesnapshot CRDs and controller are in place.
  2655    echo "Wait until volume snapshot CRDs are installed"
  2656    until kubectl get volumesnapshotclasses.snapshot.storage.k8s.io
  2657    do
  2658      sleep 10
  2659    done
  2660  
  2661    until kubectl get volumesnapshotcontents.snapshot.storage.k8s.io
  2662    do
  2663      sleep 10
  2664    done
  2665  
  2666    until kubectl get volumesnapshots.snapshot.storage.k8s.io
  2667    do
  2668      sleep 10
  2669    done
  2670  
  2671    echo "Wait until volume snapshot RBAC rules are installed"
  2672    until kubectl get clusterrolebinding volume-snapshot-controller-role
  2673    do
  2674      sleep 10
  2675    done
  2676  
  2677    echo "Wait until volume snapshot controller is installed"
  2678    until kubectl get statefulset volume-snapshot-controller | grep volume-snapshot-controller | grep "1/1"
  2679    do
  2680      sleep 10
  2681    done
  2682  }
  2683  
  2684  # Trigger background process that will wait for volumesnapshot CRDs
  2685  # and snapshot-controller to be installed
  2686  function start-volumesnapshot-crd-and-controller {
  2687    wait-for-volumesnapshot-crd-and-controller &
  2688  }
  2689  
  2690  # Update {{ fluentd_container_runtime_service }} with actual container runtime name,
  2691  # and {{ container_runtime_endpoint }} with actual container runtime
  2692  # endpoint.
  2693  function update-container-runtime {
  2694    local -r file="$1"
  2695    local -r container_runtime_endpoint="${CONTAINER_RUNTIME_ENDPOINT:-unix:///run/containerd/containerd.sock}"
  2696    sed -i \
  2697      -e "s@{{ *fluentd_container_runtime_service *}}@${FLUENTD_CONTAINER_RUNTIME_SERVICE:-${CONTAINER_RUNTIME_NAME:-containerd}}@g" \
  2698      -e "s@{{ *container_runtime_endpoint *}}@${container_runtime_endpoint#unix://}@g" \
  2699      "${file}"
  2700  }
  2701  
  2702  # Remove configuration in yaml file if node journal is not enabled.
  2703  function update-node-journal {
  2704    local -r configmap_yaml="$1"
  2705    if [[ "${ENABLE_NODE_JOURNAL:-}" != "true" ]]; then
  2706      # Removes all lines between two patterns (throws away node-journal)
  2707      sed -i -e "/# BEGIN_NODE_JOURNAL/,/# END_NODE_JOURNAL/d" "${configmap_yaml}"
  2708    fi
  2709  }
  2710  
  2711  # Updates parameters in yaml file for prometheus-to-sd configuration, or
  2712  # removes component if it is disabled.
  2713  function update-prometheus-to-sd-parameters {
  2714    if [[ "${ENABLE_PROMETHEUS_TO_SD:-}" == "true" ]]; then
  2715      sed -i -e "s@{{ *prometheus_to_sd_prefix *}}@${PROMETHEUS_TO_SD_PREFIX}@g" "$1"
  2716      sed -i -e "s@{{ *prometheus_to_sd_endpoint *}}@${PROMETHEUS_TO_SD_ENDPOINT}@g" "$1"
  2717    else
  2718      # Removes all lines between two patterns (throws away prometheus-to-sd)
  2719      sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
  2720     fi
  2721  }
  2722  
  2723  # Updates parameters in yaml file for prometheus-to-sd configuration in daemon sets, or
  2724  # removes component if it is disabled.
  2725  function update-daemon-set-prometheus-to-sd-parameters {
  2726    if [[ "${DISABLE_PROMETHEUS_TO_SD_IN_DS:-}" == "true" ]]; then
  2727      # Removes all lines between two patterns (throws away prometheus-to-sd)
  2728      sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
  2729    else
  2730      update-prometheus-to-sd-parameters "$1"
  2731    fi
  2732  }
  2733  
  2734  # Updates parameters in yaml file for event-exporter configuration
  2735  function update-event-exporter {
  2736      local -r stackdriver_resource_model="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
  2737      sed -i -e "s@{{ exporter_sd_resource_model }}@${stackdriver_resource_model}@g" "$1"
  2738      sed -i -e "s@{{ exporter_sd_endpoint }}@${STACKDRIVER_ENDPOINT:-}@g" "$1"
  2739  }
  2740  
  2741  # Sets up the manifests of coreDNS for k8s addons.
  2742  function setup-coredns-manifest {
  2743    setup-addon-manifests "addons" "0-dns/coredns"
  2744    local -r coredns_file="${dst_dir}/0-dns/coredns/coredns.yaml"
  2745    mv "${dst_dir}/0-dns/coredns/coredns.yaml.in" "${coredns_file}"
  2746    # Replace the salt configurations with variable values.
  2747    sed -i -e "s@dns_domain@${DNS_DOMAIN}@g" "${coredns_file}"
  2748    sed -i -e "s@dns_server@${DNS_SERVER_IP}@g" "${coredns_file}"
  2749    sed -i -e "s@{{ *pillar\['service_cluster_ip_range'\] *}}@${SERVICE_CLUSTER_IP_RANGE}@g" "${coredns_file}"
  2750    sed -i -e "s@dns_memory_limit@${DNS_MEMORY_LIMIT:-170Mi}@g" "${coredns_file}"
  2751  
  2752    if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
  2753      setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
  2754      local -r dns_autoscaler_file="${dst_dir}/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml"
  2755      sed -i'' -e "s@{{.Target}}@${COREDNS_AUTOSCALER}@g" "${dns_autoscaler_file}"
  2756    fi
  2757  }
  2758  
  2759  # Sets up the manifests of Fluentd configmap and yamls for k8s addons.
  2760  function setup-fluentd {
  2761    local -r dst_dir="$1"
  2762    local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
  2763    local -r fluentd_gcp_scaler_yaml="${dst_dir}/fluentd-gcp/scaler-deployment.yaml"
  2764    # Ingest logs against new resources like "k8s_container" and "k8s_node" if
  2765    # LOGGING_STACKDRIVER_RESOURCE_TYPES is "new".
  2766    # Ingest logs against old resources like "gke_container" and "gce_instance" if
  2767    # LOGGING_STACKDRIVER_RESOURCE_TYPES is "old".
  2768    if [[ "${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}" == "new" ]]; then
  2769      local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap.yaml"
  2770      fluentd_gcp_configmap_name="fluentd-gcp-config"
  2771    else
  2772      local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap-old.yaml"
  2773      fluentd_gcp_configmap_name="fluentd-gcp-config-old"
  2774    fi
  2775    sed -i -e "s@{{ fluentd_gcp_configmap_name }}@${fluentd_gcp_configmap_name}@g" "${fluentd_gcp_yaml}"
  2776    fluentd_gcp_yaml_version="${FLUENTD_GCP_YAML_VERSION:-v3.2.0}"
  2777    sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_yaml}"
  2778    sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_scaler_yaml}"
  2779    fluentd_gcp_version="${FLUENTD_GCP_VERSION:-1.6.17}"
  2780    sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
  2781    update-daemon-set-prometheus-to-sd-parameters "${fluentd_gcp_yaml}"
  2782    start-fluentd-resource-update "${fluentd_gcp_yaml}"
  2783    update-container-runtime "${fluentd_gcp_configmap_yaml}"
  2784    update-node-journal "${fluentd_gcp_configmap_yaml}"
  2785  }
  2786  
  2787  # Sets up the manifests of kube-dns for k8s addons.
  2788  function setup-kube-dns-manifest {
  2789    setup-addon-manifests "addons" "0-dns/kube-dns"
  2790    local -r kubedns_file="${dst_dir}/0-dns/kube-dns/kube-dns.yaml"
  2791    mv "${dst_dir}/0-dns/kube-dns/kube-dns.yaml.in" "${kubedns_file}"
  2792    if [ -n "${CUSTOM_KUBE_DNS_YAML:-}" ]; then
  2793      # Replace with custom GKE kube-dns deployment.
  2794      cat > "${kubedns_file}" <<EOF
  2795  $CUSTOM_KUBE_DNS_YAML
  2796  EOF
  2797      update-prometheus-to-sd-parameters "${kubedns_file}"
  2798    fi
  2799    # Replace the salt configurations with variable values.
  2800    sed -i -e "s@dns_domain@${DNS_DOMAIN}@g" "${kubedns_file}"
  2801    sed -i -e "s@dns_server@${DNS_SERVER_IP}@g" "${kubedns_file}"
  2802    sed -i -e "s@dns_memory_limit@${DNS_MEMORY_LIMIT:-170Mi}@g" "${kubedns_file}"
  2803  
  2804    if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
  2805      setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
  2806      local -r dns_autoscaler_file="${dst_dir}/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml"
  2807      sed -i'' -e "s@{{.Target}}@${KUBEDNS_AUTOSCALER}@g" "${dns_autoscaler_file}"
  2808    fi
  2809  }
  2810  
  2811  # Sets up the manifests of local dns cache agent for k8s addons.
  2812  function setup-nodelocaldns-manifest {
  2813    setup-addon-manifests "addons" "0-dns/nodelocaldns"
  2814    local -r localdns_file="${dst_dir}/0-dns/nodelocaldns/nodelocaldns.yaml"
  2815    setup-addon-custom-yaml "addons" "0-dns/nodelocaldns" "nodelocaldns.yaml" "${CUSTOM_NODELOCAL_DNS_YAML:-}"
  2816    # eventually all the __PILLAR__ stuff will be gone, but theyre still in nodelocaldns for backward compat.
  2817    sed -i -e "s/__PILLAR__DNS__DOMAIN__/${DNS_DOMAIN}/g" "${localdns_file}"
  2818    sed -i -e "s/__PILLAR__DNS__SERVER__/${DNS_SERVER_IP}/g" "${localdns_file}"
  2819    sed -i -e "s/__PILLAR__LOCAL__DNS__/${LOCAL_DNS_IP}/g" "${localdns_file}"
  2820  }
  2821  
  2822  # Sets up the manifests of netd for k8s addons.
  2823  function setup-netd-manifest {
  2824    local -r netd_file="${dst_dir}/netd/netd.yaml"
  2825    mkdir -p "${dst_dir}/netd"
  2826    touch "${netd_file}"
  2827    if [ -n "${CUSTOM_NETD_YAML:-}" ]; then
  2828      # Replace with custom GCP netd deployment.
  2829      cat > "${netd_file}" <<EOF
  2830  $CUSTOM_NETD_YAML
  2831  EOF
  2832    fi
  2833  }
  2834  
  2835  # A helper function to set up a custom yaml for a k8s addon.
  2836  #
  2837  # $1: addon category under /etc/kubernetes
  2838  # $2: manifest source dir
  2839  # $3: manifest file
  2840  # $4: custom yaml
  2841  function setup-addon-custom-yaml {
  2842    local -r manifest_path="/etc/kubernetes/$1/$2/$3"
  2843    local -r custom_yaml="$4"
  2844    if [ -n "${custom_yaml:-}" ]; then
  2845      # Replace with custom manifest.
  2846      cat > "${manifest_path}" <<EOF
  2847  $custom_yaml
  2848  EOF
  2849    fi
  2850  }
  2851  
  2852  # Prepares the manifests of k8s addons, and starts the addon manager.
  2853  # Vars assumed:
  2854  #   CLUSTER_NAME
  2855  function start-kube-addons {
  2856    echo "Prepare kube-addons manifests and start kube addon manager"
  2857    local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
  2858    local -r dst_dir="/etc/kubernetes/addons"
  2859  
  2860    create-kubeconfig "addon-manager" "${ADDON_MANAGER_TOKEN}"
  2861    # User and group should never contain characters that need to be quoted
  2862    # shellcheck disable=SC2086
  2863    prepare-log-file /var/log/kube-addon-manager.log ${KUBE_ADDON_MANAGER_RUNASUSER:-2002}
  2864  
  2865    # prep addition kube-up specific rbac objects
  2866    setup-addon-manifests "addons" "rbac/kubelet-api-auth"
  2867    setup-addon-manifests "addons" "rbac/kubelet-cert-rotation"
  2868    if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then
  2869      setup-addon-manifests "addons" "rbac/legacy-kubelet-user"
  2870    else
  2871      setup-addon-manifests "addons" "rbac/legacy-kubelet-user-disable"
  2872    fi
  2873  
  2874    # Set up manifests of other addons.
  2875    if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" ]] && [[ "${KUBE_PROXY_DISABLE:-}" != "true" ]]; then
  2876      if [ -n "${CUSTOM_KUBE_PROXY_YAML:-}" ]; then
  2877        # Replace with custom GKE kube proxy.
  2878        cat > "$src_dir/kube-proxy/kube-proxy-ds.yaml" <<EOF
  2879  $CUSTOM_KUBE_PROXY_YAML
  2880  EOF
  2881        update-daemon-set-prometheus-to-sd-parameters "$src_dir/kube-proxy/kube-proxy-ds.yaml"
  2882      fi
  2883      prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
  2884      setup-addon-manifests "addons" "kube-proxy"
  2885    fi
  2886    if [[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]] &&
  2887       [[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
  2888      if [[ "${ENABLE_METADATA_AGENT:-}" == "stackdriver" ]]; then
  2889        metadata_agent_cpu_request="${METADATA_AGENT_CPU_REQUEST:-40m}"
  2890        metadata_agent_memory_request="${METADATA_AGENT_MEMORY_REQUEST:-50Mi}"
  2891        metadata_agent_cluster_level_cpu_request="${METADATA_AGENT_CLUSTER_LEVEL_CPU_REQUEST:-40m}"
  2892        metadata_agent_cluster_level_memory_request="${METADATA_AGENT_CLUSTER_LEVEL_MEMORY_REQUEST:-50Mi}"
  2893        setup-addon-manifests "addons" "metadata-agent/stackdriver"
  2894        metadata_agent_yaml="${dst_dir}/metadata-agent/stackdriver/metadata-agent.yaml"
  2895        sed -i -e "s@{{ metadata_agent_cpu_request }}@${metadata_agent_cpu_request}@g" "${metadata_agent_yaml}"
  2896        sed -i -e "s@{{ metadata_agent_memory_request }}@${metadata_agent_memory_request}@g" "${metadata_agent_yaml}"
  2897        sed -i -e "s@{{ metadata_agent_cluster_level_cpu_request }}@${metadata_agent_cluster_level_cpu_request}@g" "${metadata_agent_yaml}"
  2898        sed -i -e "s@{{ metadata_agent_cluster_level_memory_request }}@${metadata_agent_cluster_level_memory_request}@g" "${metadata_agent_yaml}"
  2899      fi
  2900    fi
  2901    if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
  2902      setup-addon-manifests "addons" "metrics-server"
  2903      base_metrics_server_cpu="40m"
  2904      base_metrics_server_memory="40Mi"
  2905      metrics_server_memory_per_node="4"
  2906      metrics_server_min_cluster_size="16"
  2907      if [[ "${ENABLE_SYSTEM_ADDON_RESOURCE_OPTIMIZATIONS:-}" == "true" ]]; then
  2908        base_metrics_server_cpu="40m"
  2909        base_metrics_server_memory="35Mi"
  2910        metrics_server_memory_per_node="4"
  2911        metrics_server_min_cluster_size="5"
  2912      fi
  2913      local -r metrics_server_yaml="${dst_dir}/metrics-server/metrics-server-deployment.yaml"
  2914      sed -i -e "s@{{ base_metrics_server_cpu }}@${base_metrics_server_cpu}@g" "${metrics_server_yaml}"
  2915      sed -i -e "s@{{ base_metrics_server_memory }}@${base_metrics_server_memory}@g" "${metrics_server_yaml}"
  2916      sed -i -e "s@{{ metrics_server_memory_per_node }}@${metrics_server_memory_per_node}@g" "${metrics_server_yaml}"
  2917      sed -i -e "s@{{ metrics_server_min_cluster_size }}@${metrics_server_min_cluster_size}@g" "${metrics_server_yaml}"
  2918    fi
  2919    if [[ "${ENABLE_NVIDIA_GPU_DEVICE_PLUGIN:-}" == "true" ]]; then
  2920      setup-addon-manifests "addons" "device-plugins/nvidia-gpu"
  2921    fi
  2922    # Setting up the konnectivity-agent daemonset
  2923    if [[ "${RUN_KONNECTIVITY_PODS:-false}" == "true" ]]; then
  2924      setup-addon-manifests "addons" "konnectivity-agent"
  2925      setup-konnectivity-agent-manifest
  2926    fi
  2927    if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
  2928      # Create a new directory for the DNS addon and prepend a "0" on the name.
  2929      # Prepending "0" to the directory ensures that add-on manager
  2930      # creates the dns service first. This ensures no other add-on
  2931      # can "steal" the designated DNS clusterIP.
  2932      BASE_ADDON_DIR=${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty
  2933      BASE_DNS_DIR=${BASE_ADDON_DIR}/dns
  2934      NEW_DNS_DIR=${BASE_ADDON_DIR}/0-dns
  2935      mkdir "${NEW_DNS_DIR}" && mv "${BASE_DNS_DIR}"/* "${NEW_DNS_DIR}" && rm -r "${BASE_DNS_DIR}"
  2936      if [[ "${CLUSTER_DNS_CORE_DNS:-}" == "true" ]]; then
  2937        setup-coredns-manifest
  2938      else
  2939        setup-kube-dns-manifest
  2940      fi
  2941      if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
  2942        setup-nodelocaldns-manifest
  2943      fi
  2944    fi
  2945    if [[ "${ENABLE_NETD:-}" == "true" ]]; then
  2946      setup-netd-manifest
  2947    fi
  2948    if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
  2949       [[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]] && \
  2950       [[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]]; then
  2951      echo "fluentd-elasticsearch addon is no longer included here. Terminate cluster initialization."
  2952      echo "The addon can be installed from https://github.com/kubernetes-sigs/instrumentation-addons"
  2953      exit 1
  2954    fi
  2955    if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
  2956       [[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
  2957      setup-addon-manifests "addons" "fluentd-gcp"
  2958      setup-fluentd ${dst_dir}
  2959      local -r event_exporter_yaml="${dst_dir}/fluentd-gcp/event-exporter.yaml"
  2960      update-event-exporter ${event_exporter_yaml}
  2961      update-prometheus-to-sd-parameters ${event_exporter_yaml}
  2962    fi
  2963    if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "daemonset" ]]; then
  2964      setup-addon-manifests "addons" "node-problem-detector"
  2965    fi
  2966    if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
  2967      # Setup role binding(s) for standalone node problem detector.
  2968      if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]]; then
  2969        setup-addon-manifests "addons" "node-problem-detector/standalone"
  2970      fi
  2971      setup-addon-manifests "addons" "node-problem-detector/kubelet-user-standalone" "node-problem-detector"
  2972    fi
  2973    if echo "${ADMISSION_CONTROL:-}" | grep -q "LimitRanger"; then
  2974      setup-addon-manifests "admission-controls" "limit-range" "gce"
  2975    fi
  2976    setup-addon-manifests "addons" "admission-resource-quota-critical-pods"
  2977    if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
  2978      setup-addon-manifests "addons" "calico-policy-controller"
  2979  
  2980      setup-addon-custom-yaml "addons" "calico-policy-controller" "calico-node-daemonset.yaml" "${CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
  2981      setup-addon-custom-yaml "addons" "calico-policy-controller" "typha-deployment.yaml" "${CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
  2982  
  2983      # Configure Calico CNI directory.
  2984      local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
  2985      sed -i -e "s@__CALICO_CNI_DIR__@/home/kubernetes/bin@g" "${ds_file}"
  2986    fi
  2987    if [[ "${NETWORK_POLICY_PROVIDER:-}" == "kube-network-policies" ]]; then
  2988      setup-addon-manifests "addons" "kube-network-policies"
  2989    fi
  2990    if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
  2991      setup-addon-manifests "addons" "storage-class/gce"
  2992    fi
  2993    if [[ "${ENABLE_VOLUME_SNAPSHOTS:-}" == "true" ]]; then
  2994      setup-addon-manifests "addons" "volumesnapshots/crd"
  2995      setup-addon-manifests "addons" "volumesnapshots/volume-snapshot-controller"
  2996      start-volumesnapshot-crd-and-controller
  2997    fi
  2998    if [[ "${ENABLE_IP_MASQ_AGENT:-}" == "true" ]]; then
  2999      setup-addon-manifests "addons" "ip-masq-agent"
  3000    fi
  3001    if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
  3002      setup-addon-manifests "addons" "metadata-proxy/gce"
  3003      local -r metadata_proxy_yaml="${dst_dir}/metadata-proxy/gce/metadata-proxy.yaml"
  3004      update-daemon-set-prometheus-to-sd-parameters ${metadata_proxy_yaml}
  3005    fi
  3006    if [[ "${ENABLE_ISTIO:-}" == "true" ]]; then
  3007      if [[ "${ISTIO_AUTH_TYPE:-}" == "MUTUAL_TLS" ]]; then
  3008        setup-addon-manifests "addons" "istio/auth"
  3009      else
  3010        setup-addon-manifests "addons" "istio/noauth"
  3011      fi
  3012    fi
  3013    if [[ -n "${EXTRA_ADDONS_URL:-}" ]]; then
  3014      download-extra-addons
  3015      setup-addon-manifests "addons" "gce-extras"
  3016    fi
  3017  
  3018  
  3019    # Place addon manager pod manifest.
  3020    src_file="${src_dir}/kube-addon-manager.yaml"
  3021    sed -i -e "s@{{kubectl_prune_whitelist_override}}@${KUBECTL_PRUNE_WHITELIST_OVERRIDE:-}@g" "${src_file}"
  3022    sed -i -e "s@{{kubectl_extra_prune_whitelist}}@${ADDON_MANAGER_PRUNE_WHITELIST:-}@g" "${src_file}"
  3023    sed -i -e "s@{{runAsUser}}@${KUBE_ADDON_MANAGER_RUNASUSER:-2002}@g" "${src_file}"
  3024    sed -i -e "s@{{runAsGroup}}@${KUBE_ADDON_MANAGER_RUNASGROUP:-2002}@g" "${src_file}"
  3025    cp "${src_file}" /etc/kubernetes/manifests
  3026  }
  3027  
  3028  function setup-konnectivity-agent-manifest {
  3029      local -r manifest="/etc/kubernetes/addons/konnectivity-agent/konnectivity-agent-ds.yaml"
  3030      sed -i "s|__APISERVER_IP__|${KUBERNETES_MASTER_NAME}|g" "${manifest}"
  3031      if [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'http-connect' ]]; then
  3032        sed -i "s|__EXTRA_PARAMS__|\t\t\"--agent-cert=/etc/srv/kubernetes/pki/konnectivity-agent/client.crt\",\n\t\t\"--agent-key=/etc/srv/kubernetes/pki/konnectivity-agent/client.key\",|g" "${manifest}"
  3033        sed -i "s|__EXTRA_VOL_MNTS__|            - name: pki\n              mountPath: /etc/srv/kubernetes/pki/konnectivity-agent|g" "${manifest}"
  3034        sed -i "s|__EXTRA_VOLS__|        - name: pki\n          hostPath:\n            path: /etc/srv/kubernetes/pki/konnectivity-agent|g" "${manifest}"
  3035      else
  3036        sed -i "s|__EXTRA_PARAMS__||g" "${manifest}"
  3037        sed -i "s|__EXTRA_VOL_MNTS__||g" "${manifest}"
  3038        sed -i "s|__EXTRA_VOLS__||g" "${manifest}"
  3039      fi
  3040  }
  3041  
  3042  function setup-cloud-pvl-admission-manifest {
  3043    local -r manifest="/etc/kubernetes/addons/cloud-pvl-admission/mutating-webhook-configuration.yaml"
  3044    sed -i "s|__CLOUD_PVL_ADMISSION_CA_CERT__|${CLOUD_PVL_ADMISSION_CA_CERT}|g" "${manifest}"
  3045  }
  3046  
  3047  # Setups manifests for ingress controller and gce-specific policies for service controller.
  3048  function start-lb-controller {
  3049    setup-addon-manifests "addons" "loadbalancing"
  3050  
  3051    # Starts a l7 loadbalancing controller for ingress.
  3052    if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then
  3053      echo "Start GCE L7 pod"
  3054      prepare-log-file /var/log/glbc.log
  3055      setup-addon-manifests "addons" "cluster-loadbalancing/glbc"
  3056      setup-addon-manifests "addons" "rbac/cluster-loadbalancing/glbc"
  3057      create-kubeconfig "l7-lb-controller" "${GCE_GLBC_TOKEN}"
  3058  
  3059      local -r src_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/glbc.manifest"
  3060      local -r dest_manifest="/etc/kubernetes/manifests/glbc.manifest"
  3061  
  3062      if [[ -n "${CUSTOM_INGRESS_YAML:-}" ]]; then
  3063        echo "${CUSTOM_INGRESS_YAML}" > "${dest_manifest}"
  3064      else
  3065        cp "${src_manifest}" "${dest_manifest}"
  3066      fi
  3067  
  3068      # Override the glbc image if GCE_GLBC_IMAGE is specified.
  3069      if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
  3070        sed -i "s|image:.*|image: ${GCE_GLBC_IMAGE}|" "${dest_manifest}"
  3071      fi
  3072    fi
  3073  }
  3074  
  3075  # Setup working directory for kubelet.
  3076  function setup-kubelet-dir {
  3077      echo "Making /var/lib/kubelet executable for kubelet"
  3078      mount -B /var/lib/kubelet /var/lib/kubelet/
  3079      mount -B -o remount,exec,suid,dev /var/lib/kubelet
  3080  
  3081      # TODO(#60123): The kubelet should create the cert-dir directory if it doesn't exist
  3082      mkdir -p /var/lib/kubelet/pki/
  3083  
  3084      # Mount /var/lib/kubelet/pki on a tmpfs so it doesn't persist across
  3085      # reboots. This can help avoid some rare instances of corrupt cert files
  3086      # (e.g. created but not written during a shutdown). Kubelet crash-loops
  3087      # in these cases. Do this after above mount calls so it isn't overwritten.
  3088      echo "Mounting /var/lib/kubelet/pki on tmpfs"
  3089      mount -t tmpfs tmpfs /var/lib/kubelet/pki
  3090  }
  3091  
  3092  # Override for GKE custom master setup scripts (no-op outside of GKE).
  3093  function gke-master-start {
  3094    if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
  3095      echo "Running GKE internal configuration script"
  3096      . "${KUBE_HOME}/bin/gke-internal-configure-helper.sh"
  3097      gke-internal-master-start
  3098   elif [[ -n "${KUBE_BEARER_TOKEN:-}" ]]; then
  3099     echo "setting up local admin kubeconfig"
  3100     create-kubeconfig "local-admin" "${KUBE_BEARER_TOKEN}"
  3101     echo "export KUBECONFIG=/etc/srv/kubernetes/local-admin/kubeconfig" > /etc/profile.d/kubeconfig.sh
  3102    fi
  3103  }
  3104  
  3105  function reset-motd {
  3106    # kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
  3107    local -r version="$("${KUBE_HOME}"/bin/kubelet --version=true | cut -f2 -d " ")"
  3108    # This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1),
  3109    # or the git hash that's in the build info.
  3110    local gitref
  3111    gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
  3112    local devel=""
  3113    if [[ "${gitref}" != "${version}" ]]; then
  3114      devel="
  3115  Note: This looks like a development version, which might not be present on GitHub.
  3116  If it isn't, the closest tag is at:
  3117    https://github.com/kubernetes/kubernetes/tree/${gitref}
  3118  "
  3119      gitref="${version//*+/}"
  3120    fi
  3121    cat > /etc/motd <<EOF
  3122  
  3123  Welcome to Kubernetes ${version}!
  3124  
  3125  You can find documentation for Kubernetes at:
  3126    http://docs.kubernetes.io/
  3127  
  3128  The source for this release can be found at:
  3129    /home/kubernetes/kubernetes-src.tar.gz
  3130  Or you can download it at:
  3131    https://storage.googleapis.com/gke-release/kubernetes/release/${version}/kubernetes-src.tar.gz
  3132  
  3133  It is based on the Kubernetes source at:
  3134    https://github.com/kubernetes/kubernetes/tree/${gitref}
  3135  ${devel}
  3136  For Kubernetes copyright and licensing information, see:
  3137    /home/kubernetes/LICENSES
  3138  
  3139  EOF
  3140  }
  3141  
  3142  function override-kubectl {
  3143      echo "overriding kubectl"
  3144      echo "export PATH=${KUBE_HOME}/bin:\$PATH" > /etc/profile.d/kube_env.sh
  3145  
  3146      # source the file explicitly otherwise we have
  3147      # issues on a ubuntu OS image finding the kubectl
  3148      # shellcheck disable=SC1091
  3149      source /etc/profile.d/kube_env.sh
  3150  
  3151      # Add ${KUBE_HOME}/bin into sudoer secure path.
  3152      local sudo_path
  3153      sudo_path=$(sudo env | grep "^PATH=")
  3154      if [[ -n "${sudo_path}" ]]; then
  3155        sudo_path=${sudo_path#PATH=}
  3156        (
  3157          umask 027
  3158          echo "Defaults secure_path=\"${KUBE_HOME}/bin:${sudo_path}\"" > /etc/sudoers.d/kube_secure_path
  3159        )
  3160      fi
  3161  }
  3162  
  3163  function detect-cgroup-config {
  3164    CGROUP_CONFIG=$(stat -fc %T /sys/fs/cgroup/)
  3165    echo "Detected cgroup config as ${CGROUP_CONFIG}"
  3166  }
  3167  
  3168  function override-pv-recycler {
  3169    if [[ -z "${PV_RECYCLER_OVERRIDE_TEMPLATE:-}" ]]; then
  3170      echo "PV_RECYCLER_OVERRIDE_TEMPLATE is not set"
  3171      exit 1
  3172    fi
  3173  
  3174    PV_RECYCLER_VOLUME="{\"name\": \"pv-recycler-mount\",\"hostPath\": {\"path\": \"${PV_RECYCLER_OVERRIDE_TEMPLATE}\", \"type\": \"FileOrCreate\"}},"
  3175    PV_RECYCLER_MOUNT="{\"name\": \"pv-recycler-mount\",\"mountPath\": \"${PV_RECYCLER_OVERRIDE_TEMPLATE}\", \"readOnly\": true},"
  3176  
  3177    cat > "${PV_RECYCLER_OVERRIDE_TEMPLATE}" <<\EOF
  3178  version: v1
  3179  kind: Pod
  3180  metadata:
  3181    generateName: pv-recycler-
  3182    namespace: default
  3183  spec:
  3184    activeDeadlineSeconds: 60
  3185    restartPolicy: Never
  3186    volumes:
  3187    - name: vol
  3188    containers:
  3189    - name: pv-recycler
  3190      image: registry.k8s.io/build-image/debian-base:bookworm-v1.0.2
  3191      command:
  3192      - /bin/sh
  3193      args:
  3194      - -c
  3195      - test -e /scrub && find /scrub -mindepth 1 -delete && test -z $(ls -A /scrub) || exit 1
  3196      volumeMounts:
  3197      - name: vol
  3198        mountPath: /scrub
  3199  EOF
  3200  
  3201  # fixup the alternate registry if specified
  3202  if [[ -n "${KUBE_ADDON_REGISTRY:-}" ]]; then
  3203    sed -i -e "s@registry.k8s.io@${KUBE_ADDON_REGISTRY}@g" "${PV_RECYCLER_OVERRIDE_TEMPLATE}"
  3204  fi
  3205  }
  3206  
  3207  function wait-till-apiserver-ready() {
  3208    until kubectl get nodes; do
  3209      sleep 5
  3210    done
  3211  }
  3212  
  3213  function ensure-master-bootstrap-kubectl-auth {
  3214    # By default, `kubectl` uses http://localhost:8080
  3215    # If the insecure port is disabled, kubectl will need to use an admin-authenticated kubeconfig.
  3216    if [[ -n "${KUBE_BOOTSTRAP_TOKEN:-}" ]]; then
  3217      create-kubeconfig "kube-bootstrap" "${KUBE_BOOTSTRAP_TOKEN}"
  3218      export KUBECONFIG=/etc/srv/kubernetes/kube-bootstrap/kubeconfig
  3219    fi
  3220  }
  3221  
  3222  function setup-containerd {
  3223    echo "Generate containerd config"
  3224    local config_path="${CONTAINERD_CONFIG_PATH:-"/etc/containerd/config.toml"}"
  3225    mkdir -p "$(dirname "${config_path}")"
  3226    local cni_template_path="${KUBE_HOME}/cni.template"
  3227    cat > "${cni_template_path}" <<EOF
  3228  {
  3229    "name": "k8s-pod-network",
  3230    "cniVersion": "0.3.1",
  3231    "plugins": [
  3232      {
  3233        "type": "ptp",
  3234        "mtu": 1460,
  3235        "ipam": {
  3236          "type": "host-local",
  3237          "subnet": "{{.PodCIDR}}",
  3238          "routes": [
  3239            {
  3240              "dst": "0.0.0.0/0"
  3241            }
  3242          ]
  3243        }
  3244      },
  3245      {
  3246        "type": "portmap",
  3247        "capabilities": {
  3248          "portMappings": true
  3249        }
  3250      }
  3251    ]
  3252  }
  3253  EOF
  3254    if [[ "${KUBERNETES_MASTER:-}" != "true" ]]; then
  3255      if [[ "${NETWORK_POLICY_PROVIDER:-"none"}" == "calico" || "${ENABLE_NETD:-}" == "true" ]]; then
  3256        # Use Kubernetes cni daemonset on node if network policy provider calico is specified
  3257        # or netd is enabled.
  3258        cni_template_path=""
  3259      fi
  3260    fi
  3261  
  3262     # Use systemd cgroup driver when running on cgroupv2
  3263    local systemdCgroup="false"
  3264    if [[ "${CGROUP_CONFIG-}" == "cgroup2fs" ]]; then
  3265      systemdCgroup="true"
  3266    fi
  3267  
  3268    cat > "${config_path}" <<EOF
  3269  version = 2
  3270  # Kubernetes requires the cri plugin.
  3271  required_plugins = ["io.containerd.grpc.v1.cri"]
  3272  # Kubernetes doesn't use containerd restart manager.
  3273  disabled_plugins = ["io.containerd.internal.v1.restart"]
  3274  oom_score = -999
  3275  
  3276  [debug]
  3277    level = "${CONTAINERD_LOG_LEVEL:-"info"}"
  3278  
  3279  [plugins."io.containerd.grpc.v1.cri"]
  3280    stream_server_address = "127.0.0.1"
  3281    max_container_log_line_size = ${CONTAINERD_MAX_CONTAINER_LOG_LINE:-262144}
  3282    sandbox_image = "${CONTAINERD_INFRA_CONTAINER:-"registry.k8s.io/pause:3.9"}"
  3283  [plugins."io.containerd.grpc.v1.cri".cni]
  3284    bin_dir = "${KUBE_HOME}/bin"
  3285    conf_dir = "/etc/cni/net.d"
  3286    conf_template = "${cni_template_path}"
  3287  [plugins."io.containerd.grpc.v1.cri".containerd]
  3288    default_runtime_name = "runc"
  3289  [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
  3290    runtime_type = "io.containerd.runc.v2"
  3291  [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
  3292    endpoint = ["https://mirror.gcr.io","https://registry-1.docker.io"]
  3293  # Enable registry.k8s.io as the primary mirror for k8s.gcr.io
  3294  # See: https://github.com/kubernetes/k8s.io/issues/3411
  3295  [plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]
  3296    endpoint = ["https://registry.k8s.io", "https://k8s.gcr.io",]
  3297  [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
  3298    SystemdCgroup = ${systemdCgroup}
  3299  EOF
  3300  
  3301    if [[ "${CONTAINER_RUNTIME_TEST_HANDLER:-}" == "true" ]]; then
  3302    cat >> "${config_path}" <<EOF
  3303  # Setup a runtime with the magic name ("test-handler") used for Kubernetes
  3304  # runtime class tests ...
  3305  [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.test-handler]
  3306    runtime_type = "io.containerd.runc.v2"
  3307  EOF
  3308    fi
  3309  
  3310    # Reuse docker group for containerd.
  3311    local -r containerd_gid="$(grep ^docker: /etc/group | cut -d: -f 3)"
  3312    if [[ -n "${containerd_gid:-}" ]]; then
  3313      cat >> "${config_path}" <<EOF
  3314  # reuse id of the docker group
  3315  [grpc]
  3316    gid = ${containerd_gid}
  3317  EOF
  3318    fi
  3319    chmod 644 "${config_path}"
  3320  
  3321    echo "Restart containerd to load the config change"
  3322    systemctl restart containerd
  3323  }
  3324  
  3325  # This function detects the platform/arch of the machine where the script runs,
  3326  # and sets the HOST_PLATFORM and HOST_ARCH environment variables accordingly.
  3327  # Callers can specify HOST_PLATFORM_OVERRIDE and HOST_ARCH_OVERRIDE to skip the detection.
  3328  # This function is adapted from the detect_client_info function in cluster/get-kube-binaries.sh
  3329  # and kube::util::host_os, kube::util::host_arch functions in hack/lib/util.sh
  3330  # This function should be synced with detect_host_info in ./configure.sh
  3331  function detect_host_info() {
  3332    HOST_PLATFORM=${HOST_PLATFORM_OVERRIDE:-"$(uname -s)"}
  3333    case "${HOST_PLATFORM}" in
  3334      Linux|linux)
  3335        HOST_PLATFORM="linux"
  3336        ;;
  3337      *)
  3338        echo "Unknown, unsupported platform: ${HOST_PLATFORM}." >&2
  3339        echo "Supported platform(s): linux." >&2
  3340        echo "Bailing out." >&2
  3341        exit 2
  3342    esac
  3343  
  3344    HOST_ARCH=${HOST_ARCH_OVERRIDE:-"$(uname -m)"}
  3345    case "${HOST_ARCH}" in
  3346      x86_64*|i?86_64*|amd64*)
  3347        HOST_ARCH="amd64"
  3348        ;;
  3349      aHOST_arch64*|aarch64*|arm64*)
  3350        HOST_ARCH="arm64"
  3351        ;;
  3352      *)
  3353        echo "Unknown, unsupported architecture (${HOST_ARCH})." >&2
  3354        echo "Supported architecture(s): amd64 and arm64." >&2
  3355        echo "Bailing out." >&2
  3356        exit 2
  3357        ;;
  3358    esac
  3359  }
  3360  
  3361  # Initializes variables used by the log-* functions.
  3362  #
  3363  # get-metadata-value must be defined before calling this function.
  3364  #
  3365  # NOTE: this function is duplicated in configure.sh, any changes here should be
  3366  # duplicated there as well.
  3367  function log-init {
  3368    # Used by log-* functions.
  3369    LOG_CLUSTER_ID=$(get-metadata-value 'instance/attributes/cluster-uid' 'get-metadata-value-error')
  3370    LOG_INSTANCE_NAME=$(hostname)
  3371    LOG_BOOT_ID=$(journalctl --list-boots | grep -E '^ *0' | awk '{print $2}')
  3372    declare -Ag LOG_START_TIMES
  3373    declare -ag LOG_TRAP_STACK
  3374  
  3375    LOG_STATUS_STARTED='STARTED'
  3376    LOG_STATUS_COMPLETED='COMPLETED'
  3377    LOG_STATUS_ERROR='ERROR'
  3378  }
  3379  
  3380  # Sets an EXIT trap.
  3381  # Args:
  3382  #   $1:... : the trap command.
  3383  #
  3384  # NOTE: this function is duplicated in configure.sh, any changes here should be
  3385  # duplicated there as well.
  3386  function log-trap-push {
  3387    local t="${*:1}"
  3388    LOG_TRAP_STACK+=("${t}")
  3389    # shellcheck disable=2064
  3390    trap "${t}" EXIT
  3391  }
  3392  
  3393  # Removes and restores an EXIT trap.
  3394  #
  3395  # NOTE: this function is duplicated in configure.sh, any changes here should be
  3396  # duplicated there as well.
  3397  function log-trap-pop {
  3398    # Remove current trap.
  3399    unset 'LOG_TRAP_STACK[-1]'
  3400  
  3401    # Restore previous trap.
  3402    if [ ${#LOG_TRAP_STACK[@]} -ne 0 ]; then
  3403      local t="${LOG_TRAP_STACK[-1]}"
  3404      # shellcheck disable=2064
  3405      trap "${t}" EXIT
  3406    else
  3407      # If no traps in stack, clear.
  3408      trap EXIT
  3409    fi
  3410  }
  3411  
  3412  # Logs the end of a bootstrap step that errored.
  3413  # Args:
  3414  #  $1 : bootstrap step name.
  3415  #
  3416  # NOTE: this function is duplicated in configure.sh, any changes here should be
  3417  # duplicated there as well.
  3418  function log-error {
  3419    local bootstep="$1"
  3420  
  3421    log-proto "${bootstep}" "${LOG_STATUS_ERROR}" "encountered non-zero exit code"
  3422  }
  3423  
  3424  # Wraps a command with bootstrap logging.
  3425  # Args:
  3426  #   $1    : bootstrap step name.
  3427  #   $2... : the command to run.
  3428  #
  3429  # NOTE: this function is duplicated in configure.sh, any changes here should be
  3430  # duplicated there as well.
  3431  function log-wrap {
  3432    local bootstep="$1"
  3433    local command="${*:2}"
  3434  
  3435    log-trap-push "log-error ${bootstep}"
  3436    log-proto "${bootstep}" "${LOG_STATUS_STARTED}"
  3437    $command
  3438    log-proto "${bootstep}" "${LOG_STATUS_COMPLETED}"
  3439    log-trap-pop
  3440  }
  3441  
  3442  # Logs a bootstrap step start. Prefer log-wrap.
  3443  # Args:
  3444  #   $1 : bootstrap step name.
  3445  #
  3446  # NOTE: this function is duplicated in configure.sh, any changes here should be
  3447  # duplicated there as well.
  3448  function log-start {
  3449    local bootstep="$1"
  3450  
  3451    log-trap-push "log-error ${bootstep}"
  3452    log-proto "${bootstep}" "${LOG_STATUS_STARTED}"
  3453  }
  3454  
  3455  # Logs a bootstrap step end. Prefer log-wrap.
  3456  # Args:
  3457  #   $1 : bootstrap step name.
  3458  #
  3459  # NOTE: this function is duplicated in configure.sh, any changes here should be
  3460  # duplicated there as well.
  3461  function log-end {
  3462    local bootstep="$1"
  3463  
  3464    log-proto "${bootstep}" "${LOG_STATUS_COMPLETED}"
  3465    log-trap-pop
  3466  }
  3467  
  3468  # Writes a log proto to stdout.
  3469  # Args:
  3470  #   $1: bootstrap step name.
  3471  #   $2: status. Either 'STARTED', 'COMPLETED', or 'ERROR'.
  3472  #   $3: optional status reason.
  3473  #
  3474  # NOTE: this function is duplicated in configure.sh, any changes here should be
  3475  # duplicated there as well.
  3476  function log-proto {
  3477    local bootstep="$1"
  3478    local status="$2"
  3479    local status_reason="${3:-}"
  3480  
  3481    # Get current time.
  3482    local current_time
  3483    current_time="$(date --utc '+%s.%N')"
  3484    # ...formatted as UTC RFC 3339.
  3485    local timestamp
  3486    timestamp="$(date --utc --date="@${current_time}" '+%FT%T.%NZ')"
  3487  
  3488    # Calculate latency.
  3489    local latency='null'
  3490    if [ "${status}" == "${LOG_STATUS_STARTED}" ]; then
  3491      LOG_START_TIMES["${bootstep}"]="${current_time}"
  3492    else
  3493      local start_time="${LOG_START_TIMES["${bootstep}"]}"
  3494      unset 'LOG_START_TIMES['"${bootstep}"']'
  3495  
  3496      # Bash cannot do non-integer math, shell out to awk.
  3497      latency="$(echo "${current_time} ${start_time}" | awk '{print $1 - $2}')s"
  3498  
  3499      # The default latency is null which cannot be wrapped as a string so we must
  3500      # do it here instead of the printf.
  3501      latency="\"${latency}\""
  3502    fi
  3503  
  3504    printf '[cloud.kubernetes.monitoring.proto.SerialportLog] {"cluster_hash":"%s","vm_instance_name":"%s","boot_id":"%s","timestamp":"%s","bootstrap_status":{"step_name":"%s","status":"%s","status_reason":"%s","latency":%s}}\n' \
  3505    "${LOG_CLUSTER_ID}" "${LOG_INSTANCE_NAME}" "${LOG_BOOT_ID}" "${timestamp}" "${bootstep}" "${status}" "${status_reason}" "${latency}"
  3506  }
  3507  
  3508  ########### Main Function ###########
  3509  function main() {
  3510    echo "Start to configure instance for kubernetes"
  3511    log-wrap 'DetectHostInfo' detect_host_info
  3512  
  3513    readonly UUID_MNT_PREFIX="/mnt/disks/by-uuid/google-local-ssds"
  3514    readonly UUID_BLOCK_PREFIX="/dev/disk/by-uuid/google-local-ssds"
  3515    readonly COREDNS_AUTOSCALER="Deployment/coredns"
  3516    readonly KUBEDNS_AUTOSCALER="Deployment/kube-dns"
  3517  
  3518    # Resource requests of master components.
  3519    CLOUD_CONTROLLER_MANAGER_CPU_REQUEST="${KUBE_CONTROLLER_MANAGER_CPU_REQUEST:-50m}"
  3520    KUBE_CONTROLLER_MANAGER_CPU_REQUEST="${KUBE_CONTROLLER_MANAGER_CPU_REQUEST:-200m}"
  3521    KUBE_SCHEDULER_CPU_REQUEST="${KUBE_SCHEDULER_CPU_REQUEST:-75m}"
  3522  
  3523    KUBE_HOME="/home/kubernetes"
  3524    KUBE_BIN=${KUBE_HOME}/bin
  3525    CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
  3526    PV_RECYCLER_OVERRIDE_TEMPLATE="${KUBE_HOME}/kube-manifests/kubernetes/pv-recycler-template.yaml"
  3527  
  3528    log-start 'SourceKubeEnv'
  3529    if [[ ! -e "${KUBE_HOME}/kube-env" ]]; then
  3530      echo "The ${KUBE_HOME}/kube-env file does not exist!! Terminate cluster initialization."
  3531      exit 1
  3532    fi
  3533    source "${KUBE_HOME}/kube-env"
  3534    log-end 'SourceKubeEnv'
  3535  
  3536    if [[ -f "${KUBE_HOME}/kubelet-config.yaml" ]]; then
  3537      echo "Found Kubelet config file at ${KUBE_HOME}/kubelet-config.yaml"
  3538      KUBELET_CONFIG_FILE_ARG="--config ${KUBE_HOME}/kubelet-config.yaml"
  3539    fi
  3540  
  3541    if [[ -e "${KUBE_HOME}/kube-master-certs" ]]; then
  3542      log-wrap 'SourceKubeMasterCerts' source "${KUBE_HOME}/kube-master-certs"
  3543    fi
  3544  
  3545    log-start 'VerifyKubeUser'
  3546    if [[ -n "${KUBE_USER:-}" ]]; then
  3547      if ! [[ "${KUBE_USER}" =~ ^[-._@a-zA-Z0-9]+$ ]]; then
  3548        echo "Bad KUBE_USER format."
  3549        exit 1
  3550      fi
  3551    fi
  3552    log-end 'VerifyKubeUser'
  3553  
  3554    log-start 'GenerateTokens'
  3555    KUBE_CONTROLLER_MANAGER_TOKEN="$(secure_random 32)"
  3556    CLOUD_CONTROLLER_MANAGER_TOKEN="$(secure_random 32)"
  3557    KUBE_SCHEDULER_TOKEN="$(secure_random 32)"
  3558    KUBE_CLUSTER_AUTOSCALER_TOKEN="$(secure_random 32)"
  3559    if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then
  3560      GCE_GLBC_TOKEN="$(secure_random 32)"
  3561    fi
  3562    ADDON_MANAGER_TOKEN="$(secure_random 32)"
  3563    if [[ "${ENABLE_APISERVER_INSECURE_PORT:-false}" != "true" ]]; then
  3564      KUBE_BOOTSTRAP_TOKEN="$(secure_random 32)"
  3565    fi
  3566    if [[ "${PREPARE_KONNECTIVITY_SERVICE:-false}" == "true" ]]; then
  3567      KONNECTIVITY_SERVER_TOKEN="$(secure_random 32)"
  3568    fi
  3569    if [[ "${ENABLE_MONITORING_TOKEN:-false}" == "true" ]]; then
  3570      MONITORING_TOKEN="$(secure_random 32)"
  3571    fi
  3572    log-end 'GenerateTokens'
  3573  
  3574    log-wrap 'SetupOSParams' setup-os-params
  3575    log-wrap 'ConfigIPFirewall' config-ip-firewall
  3576    log-wrap 'CreateDirs' create-dirs
  3577    log-wrap 'EnsureLocalSSDs' ensure-local-ssds
  3578    log-wrap 'SetupKubeletDir' setup-kubelet-dir
  3579    log-wrap 'SetupJournald' setup-journald
  3580    log-wrap 'SetupLogrotate' setup-logrotate
  3581    if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
  3582      log-wrap 'MountMasterPD' mount-master-pd
  3583      log-wrap 'CreateNodePKI' create-node-pki
  3584      log-wrap 'CreateMasterPKI' create-master-pki
  3585      log-wrap 'CreateMasterAuth' create-master-auth
  3586      log-wrap 'EnsureMasterBootstrapKubectlAuth' ensure-master-bootstrap-kubectl-auth
  3587      log-wrap 'CreateMasterKubeletAuth' create-master-kubelet-auth
  3588      log-wrap 'CreateMasterEtcdAuth' create-master-etcd-auth
  3589      log-wrap 'CreateMasterEtcdApiserverAuth' create-master-etcd-apiserver-auth
  3590      log-wrap 'OverridePVRecycler' override-pv-recycler
  3591      log-wrap 'GKEMasterStart' gke-master-start
  3592    else
  3593      log-wrap 'CreateNodePKI' create-node-pki
  3594      log-wrap 'CreateKubeletKubeconfig' create-kubelet-kubeconfig "${KUBERNETES_MASTER_NAME}"
  3595      if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]] && [[ "${KUBE_PROXY_DISABLE:-}" != "true" ]]; then
  3596        log-wrap 'CreateKubeproxyUserKubeconfig' create-kubeproxy-user-kubeconfig
  3597      fi
  3598      if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
  3599        if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]]; then
  3600          log-wrap 'CreateNodeProblemDetectorKubeconfig' create-node-problem-detector-kubeconfig "${KUBERNETES_MASTER_NAME}"
  3601        elif [[ -f "/var/lib/kubelet/kubeconfig" ]]; then
  3602          log-wrap 'CreateNodeProblemDetectorKubeconfigFromKubelet' create-node-problem-detector-kubeconfig-from-kubelet
  3603        else
  3604          echo "Either NODE_PROBLEM_DETECTOR_TOKEN or /var/lib/kubelet/kubeconfig must be set"
  3605          exit 1
  3606        fi
  3607      fi
  3608    fi
  3609  
  3610    log-wrap 'DetectCgroupConfig' detect-cgroup-config
  3611    log-wrap 'OverrideKubectl' override-kubectl
  3612    if docker-installed; then
  3613      # We still need to configure docker so it wouldn't reserver the 172.17.0/16 subnet
  3614      # And if somebody will start docker to build or pull something, logging will also be set up
  3615      log-wrap 'AssembleDockerFlags' assemble-docker-flags
  3616      # stop docker if it is present as we want to use just containerd
  3617      log-wrap 'StopDocker' systemctl stop docker || echo "unable to stop docker"
  3618    fi
  3619  
  3620    if [[ ! -e "/etc/profile.d/containerd_env.sh" ]]; then
  3621      log-wrap 'SetupContainerd' setup-containerd
  3622    else
  3623      echo "Skipping SetupContainerd step because containerd has already been setup by containerd's configure.sh script"
  3624    fi
  3625  
  3626    log-start 'SetupKubePodLogReadersGroupDir'
  3627    if [[ -n "${KUBE_POD_LOG_READERS_GROUP:-}" ]]; then
  3628       mkdir -p /var/log/pods/
  3629       chgrp -R "${KUBE_POD_LOG_READERS_GROUP:-}" /var/log/pods/
  3630       chmod -R g+s /var/log/pods/
  3631    fi
  3632    log-end 'SetupKubePodLogReadersGroupDir'
  3633  
  3634    # Note prepare-mounter-rootfs must be called before the kubelet starts, as
  3635    # kubelet startup updates its nameserver.
  3636    log-wrap 'PrepareMounterRootfs' prepare-mounter-rootfs
  3637  
  3638    log-wrap 'StartKubelet' start-kubelet
  3639  
  3640    if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
  3641      log-wrap 'ComputeMasterManifestVariables' compute-master-manifest-variables
  3642      if [[ -z "${ETCD_SERVERS:-}" ]]; then
  3643        log-wrap 'StartEtcdServers' start-etcd-servers
  3644      fi
  3645      log-wrap 'SourceConfigureKubeApiserver' source ${KUBE_BIN}/configure-kubeapiserver.sh
  3646      log-wrap 'StartKubeApiserver' start-kube-apiserver
  3647      if [[ "${RUN_KONNECTIVITY_PODS:-false}" == "true" ]]; then
  3648        log-wrap 'StartKonnectivityServer' start-konnectivity-server
  3649      fi
  3650      log-wrap 'StartKubeControllerManager' start-kube-controller-manager
  3651      if [[ "${CLOUD_PROVIDER_FLAG:-external}" == "external" ]]; then
  3652        log-wrap 'StartCloudControllerManager' start-cloud-controller-manager
  3653      fi
  3654      log-wrap 'StartKubeScheduler' start-kube-scheduler
  3655      log-wrap 'WaitTillApiserverReady' wait-till-apiserver-ready
  3656      log-wrap 'StartKubeAddons' start-kube-addons
  3657      log-wrap 'StartClusterAutoscaler' start-cluster-autoscaler
  3658      log-wrap 'StartLBController' start-lb-controller
  3659      log-wrap 'UpdateLegacyAddonNodeLabels' update-legacy-addon-node-labels &
  3660    else
  3661      if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]] && [[ "${KUBE_PROXY_DISABLE:-}" != "true" ]]; then
  3662        log-wrap 'StartKubeProxy' start-kube-proxy
  3663      fi
  3664      if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
  3665        log-wrap 'StartNodeProblemDetector' start-node-problem-detector
  3666      fi
  3667    fi
  3668    log-wrap 'ResetMotd' reset-motd
  3669  
  3670    # Wait for all background jobs to finish.
  3671    wait
  3672    echo "Done for the configuration for kubernetes"
  3673  }
  3674  
  3675  if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
  3676    log-init
  3677    log-wrap 'ConfigureHelperMain' main "${@}"
  3678  
  3679    if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
  3680      # Give kube-bootstrap-logs-forwarder.service some time to write all logs.
  3681      sleep 3
  3682    fi
  3683  fi