k8s.io/kubernetes@v1.29.3/test/kubemark/resources/start-kubemark-master.sh (about)

     1  #!/usr/bin/env bash
     2  
     3  # Copyright 2015 The Kubernetes Authors.
     4  #
     5  # Licensed under the Apache License, Version 2.0 (the "License");
     6  # you may not use this file except in compliance with the License.
     7  # You may obtain a copy of the License at
     8  #
     9  #     http://www.apache.org/licenses/LICENSE-2.0
    10  #
    11  # Unless required by applicable law or agreed to in writing, software
    12  # distributed under the License is distributed on an "AS IS" BASIS,
    13  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14  # See the License for the specific language governing permissions and
    15  # limitations under the License.
    16  
    17  # Script that starts kubelet on kubemark-master as a supervisord process
    18  # and then runs the master components as pods using kubelet.
    19  
    20  set -o errexit
    21  set -o nounset
    22  set -o pipefail
    23  
    24  # Define key path variables.
    25  KUBE_ROOT="/home/kubernetes"
    26  KUBE_BINDIR="${KUBE_ROOT}/kubernetes/server/bin"
    27  
    28  function config-ip-firewall {
    29    echo "Configuring IP firewall rules"
    30    # The GCI image has host firewall which drop most inbound/forwarded packets.
    31    # We need to add rules to accept all TCP/UDP/ICMP packets.
    32    if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
    33      echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
    34      iptables -A INPUT -w -p TCP -j ACCEPT
    35      iptables -A INPUT -w -p UDP -j ACCEPT
    36      iptables -A INPUT -w -p ICMP -j ACCEPT
    37    fi
    38    if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
    39      echo "Add rules to accept all forwarded TCP/UDP/ICMP packets"
    40      iptables -A FORWARD -w -p TCP -j ACCEPT
    41      iptables -A FORWARD -w -p UDP -j ACCEPT
    42      iptables -A FORWARD -w -p ICMP -j ACCEPT
    43    fi
    44  }
    45  
    46  function create-dirs {
    47  	echo "Creating required directories"
    48  	mkdir -p /var/lib/kubelet
    49  	mkdir -p /etc/kubernetes/manifests
    50  	mkdir -p /etc/kubernetes/addons
    51  }
    52  
    53  # Setup working directory for kubelet.
    54  function setup-kubelet-dir {
    55  	echo "Making /var/lib/kubelet executable for kubelet"
    56  	mount -B /var/lib/kubelet /var/lib/kubelet/
    57  	mount -B -o remount,exec,suid,dev /var/lib/kubelet
    58  }
    59  
    60  # Remove any default etcd config dirs/files.
    61  function delete-default-etcd-configs {
    62  	if [[ -d /etc/etcd ]]; then
    63  		rm -rf /etc/etcd
    64  	fi
    65  	if [[ -e /etc/default/etcd ]]; then
    66  		rm -f /etc/default/etcd
    67  	fi
    68  	if [[ -e /etc/systemd/system/etcd.service ]]; then
    69  		rm -f /etc/systemd/system/etcd.service
    70  	fi
    71  	if [[ -e /etc/init.d/etcd ]]; then
    72  		rm -f /etc/init.d/etcd
    73  	fi
    74  }
    75  
    76  # Compute etcd related variables.
    77  function compute-etcd-variables {
    78  	ETCD_IMAGE="${ETCD_IMAGE:-}"
    79  	ETCD_QUOTA_BYTES=""
    80  	if [ "${ETCD_VERSION:0:2}" == "3." ]; then
    81  		# TODO: Set larger quota to see if that helps with
    82  		# 'mvcc: database space exceeded' errors. If so, pipe
    83  		# though our setup scripts.
    84  		ETCD_QUOTA_BYTES=" --quota-backend-bytes=4294967296 "
    85  	fi
    86  }
    87  
    88  # Formats the given device ($1) if needed and mounts it at given mount point
    89  # ($2).
    90  function safe-format-and-mount() {
    91  	device=$1
    92  	mountpoint=$2
    93  
    94  	# Format only if the disk is not already formatted.
    95  	if ! tune2fs -l "${device}" ; then
    96  		echo "Formatting '${device}'"
    97  		mkfs.ext4 -F "${device}"
    98  	fi
    99  
   100  	echo "Mounting '${device}' at '${mountpoint}'"
   101  	mount -o discard,defaults "${device}" "${mountpoint}"
   102  }
   103  
   104  # Finds a PD device with name '$1' attached to the master.
   105  function find-attached-pd() {
   106  	local -r pd_name=$1
   107  	if [[ ! -e /dev/disk/by-id/${pd_name} ]]; then
   108  		echo ""
   109  	fi
   110  	device_info=$(ls -l "/dev/disk/by-id/${pd_name}")
   111  	relative_path=${device_info##* }
   112  	echo "/dev/disk/by-id/${relative_path}"
   113  }
   114  
   115  # Mounts a persistent disk (formatting if needed) to store the persistent data
   116  # on the master. safe-format-and-mount only formats an unformatted disk, and
   117  # mkdir -p will leave a directory be if it already exists.
   118  function mount-pd() {
   119  	local -r pd_name=$1
   120  	local -r mount_point=$2
   121  
   122  	if [[ -z "${find-attached-pd ${pd_name}}" ]]; then
   123  		echo "Can't find ${pd_name}. Skipping mount."
   124  		return
   125  	fi
   126  
   127  	local -r pd_path="/dev/disk/by-id/${pd_name}"
   128  	echo "Mounting PD '${pd_path}' at '${mount_point}'"
   129  	# Format and mount the disk, create directories on it for all of the master's
   130  	# persistent data, and link them to where they're used.
   131  	mkdir -p "${mount_point}"
   132  	safe-format-and-mount "${pd_path}" "${mount_point}"
   133  	echo "Mounted PD '${pd_path}' at '${mount_point}'"
   134  
   135  	# NOTE: These locations on the PD store persistent data, so to maintain
   136  	# upgradeability, these locations should not change.  If they do, take care
   137  	# to maintain a migration path from these locations to whatever new
   138  	# locations.
   139  }
   140  
   141  # Create kubeconfig for controller-manager's service account authentication.
   142  function create-kubecontrollermanager-kubeconfig {
   143  	echo "Creating kube-controller-manager kubeconfig file"
   144  	mkdir -p "${KUBE_ROOT}/k8s_auth_data/kube-controller-manager"
   145  	cat <<EOF >"${KUBE_ROOT}/k8s_auth_data/kube-controller-manager/kubeconfig"
   146  apiVersion: v1
   147  kind: Config
   148  users:
   149  - name: kube-controller-manager
   150    user:
   151      token: ${KUBE_CONTROLLER_MANAGER_TOKEN}
   152  clusters:
   153  - name: local
   154    cluster:
   155      insecure-skip-tls-verify: true
   156      server: https://localhost:443
   157  contexts:
   158  - context:
   159      cluster: local
   160      user: kube-controller-manager
   161    name: service-account-context
   162  current-context: service-account-context
   163  EOF
   164  }
   165  
   166  function create-kubescheduler-kubeconfig {
   167    echo "Creating kube-scheduler kubeconfig file"
   168    mkdir -p "${KUBE_ROOT}/k8s_auth_data/kube-scheduler"
   169    cat <<EOF >"${KUBE_ROOT}/k8s_auth_data/kube-scheduler/kubeconfig"
   170  apiVersion: v1
   171  kind: Config
   172  users:
   173  - name: kube-scheduler
   174    user:
   175      token: ${KUBE_SCHEDULER_TOKEN}
   176  clusters:
   177  - name: local
   178    cluster:
   179      insecure-skip-tls-verify: true
   180      server: https://localhost:443
   181  contexts:
   182  - context:
   183      cluster: local
   184      user: kube-scheduler
   185    name: kube-scheduler
   186  current-context: kube-scheduler
   187  EOF
   188  }
   189  
   190  function create-addonmanager-kubeconfig {
   191    echo "Creating addonmanager kubeconfig file"
   192    mkdir -p "${KUBE_ROOT}/k8s_auth_data/addon-manager"
   193    cat <<EOF >"${KUBE_ROOT}/k8s_auth_data/addon-manager/kubeconfig"
   194  apiVersion: v1
   195  kind: Config
   196  users:
   197  - name: addon-manager
   198    user:
   199      token: ${ADDON_MANAGER_TOKEN}
   200  clusters:
   201  - name: local
   202    cluster:
   203      insecure-skip-tls-verify: true
   204      server: https://localhost:443
   205  contexts:
   206  - context:
   207      cluster: local
   208      user: addon-manager
   209    name: addon-manager
   210  current-context: addon-manager
   211  EOF
   212  }
   213  
   214  function assemble-docker-flags {
   215  	echo "Assemble docker command line flags"
   216  	local docker_opts="-p /var/run/docker.pid --iptables=false --ip-masq=false"
   217  	docker_opts+=" --log-level=debug"  # Since it's a test cluster
   218  	# TODO(shyamjvs): Incorporate network plugin options, etc later.
   219  	echo "DOCKER_OPTS=\"${docker_opts}\"" > /etc/default/docker
   220  	echo "DOCKER_NOFILE=65536" >> /etc/default/docker  # For setting ulimit -n
   221  	systemctl restart docker
   222  }
   223  
   224  # A helper function for loading a docker image. It keeps trying up to 5 times.
   225  #
   226  # $1: Full path of the docker image
   227  function try-load-docker-image {
   228  	local -r img=$1
   229  	echo "Try to load docker image file ${img}"
   230  	# Temporarily turn off errexit, because we don't want to exit on first failure.
   231  	set +e
   232  	local -r max_attempts=5
   233  	local -i attempt_num=1
   234  	until timeout 30 docker load -i "${img}"; do
   235  		if [[ "${attempt_num}" == "${max_attempts}" ]]; then
   236  			echo "Fail to load docker image file ${img} after ${max_attempts} retries. Exit!!"
   237  			exit 1
   238  		else
   239  			attempt_num=$((attempt_num+1))
   240  			sleep 5
   241  		fi
   242  	done
   243  	# Re-enable errexit.
   244  	set -e
   245  }
   246  
   247  # Loads kube-system docker images. It is better to do it before starting kubelet,
   248  # as kubelet will restart docker daemon, which may interfere with loading images.
   249  function load-docker-images {
   250  	echo "Start loading kube-system docker images"
   251  	local -r img_dir="${KUBE_BINDIR}"
   252  	try-load-docker-image "${img_dir}/kube-apiserver.tar"
   253  	try-load-docker-image "${img_dir}/kube-controller-manager.tar"
   254  	try-load-docker-image "${img_dir}/kube-scheduler.tar"
   255  }
   256  
   257  # Computes command line arguments to be passed to kubelet.
   258  function compute-kubelet-params {
   259  	local params="${KUBELET_TEST_ARGS:-}"
   260  	params+=" --cgroup-root=/"
   261  	params+=" --cloud-provider=gce"
   262  	params+=" --pod-manifest-path=/etc/kubernetes/manifests"
   263  	if [[ -n "${KUBELET_PORT:-}" ]]; then
   264  		params+=" --port=${KUBELET_PORT}"
   265  	fi
   266  	params+=" --enable-debugging-handlers=false"
   267  	params+=" --hairpin-mode=none"
   268  	echo "${params}"
   269  }
   270  
   271  # Creates the systemd config file for kubelet.service.
   272  function create-kubelet-conf() {
   273  	local -r kubelet_bin="$1"
   274  	local -r kubelet_env_file="/etc/default/kubelet"
   275  	local -r flags=$(compute-kubelet-params)
   276  	echo "KUBELET_OPTS=\"${flags}\"" > "${kubelet_env_file}"
   277  
   278  	# Write the systemd service file for kubelet.
   279  	cat <<EOF >/etc/systemd/system/kubelet.service
   280  [Unit]
   281  Description=Kubermark kubelet
   282  Requires=network-online.target
   283  After=network-online.target
   284  
   285  [Service]
   286  Restart=always
   287  RestartSec=10
   288  EnvironmentFile=${kubelet_env_file}
   289  ExecStart=${kubelet_bin} \$KUBELET_OPTS
   290  
   291  [Install]
   292  WantedBy=multi-user.target
   293  EOF
   294  }
   295  
   296  # This function assembles the kubelet systemd service file and starts it using
   297  # systemctl, on the kubemark master.
   298  function start-kubelet {
   299  	# Create systemd config.
   300  	local -r kubelet_bin="/usr/bin/kubelet"
   301  	create-kubelet-conf "${kubelet_bin}"
   302  
   303  	# Flush iptables nat table
   304    	iptables -t nat -F || true
   305  
   306  	# Start the kubelet service.
   307  	systemctl start kubelet.service
   308  }
   309  
   310  # Create the log file and set its properties.
   311  #
   312  # $1 is the file to create.
   313  function prepare-log-file {
   314  	touch "$1"
   315  	chmod 644 "$1"
   316  	chown root:root "$1"
   317  }
   318  
   319  # A helper function for copying addon manifests and set dir/files
   320  # permissions.
   321  #
   322  # $1: addon category under /etc/kubernetes
   323  # $2: manifest source dir
   324  function setup-addon-manifests {
   325    local -r src_dir="${KUBE_ROOT}/$2"
   326    local -r dst_dir="/etc/kubernetes/$1/$2"
   327  
   328    if [[ ! -d "${dst_dir}" ]]; then
   329      mkdir -p "${dst_dir}"
   330    fi
   331  
   332    local files
   333    files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml")
   334    if [[ -n "${files}" ]]; then
   335      cp "${src_dir}/"*.yaml "${dst_dir}"
   336    fi
   337    chown -R root:root "${dst_dir}"
   338    chmod 755 "${dst_dir}"
   339    chmod 644 "${dst_dir}"/*
   340  }
   341  
   342  # Write the config for the audit policy.
   343  # Note: This duplicates the function in cluster/gce/gci/configure-helper.sh.
   344  # TODO: Get rid of this function when #53321 is fixed.
   345  function create-master-audit-policy {
   346    local -r path="${1}"
   347    local -r policy="${2:-}"
   348  
   349    if [[ -n "${policy}" ]]; then
   350      echo "${policy}" > "${path}"
   351      return
   352    fi
   353  
   354    # Known api groups
   355    local -r known_apis='
   356        - group: "" # core
   357        - group: "admissionregistration.k8s.io"
   358        - group: "apiextensions.k8s.io"
   359        - group: "apiregistration.k8s.io"
   360        - group: "apps"
   361        - group: "authentication.k8s.io"
   362        - group: "authorization.k8s.io"
   363        - group: "autoscaling"
   364        - group: "batch"
   365        - group: "certificates.k8s.io"
   366        - group: "extensions"
   367        - group: "metrics"
   368        - group: "networking.k8s.io"
   369        - group: "policy"
   370        - group: "rbac.authorization.k8s.io"
   371        - group: "storage.k8s.io"'
   372  
   373    cat <<EOF >"${path}"
   374  apiVersion: audit.k8s.io/v1
   375  kind: Policy
   376  rules:
   377    # The following requests were manually identified as high-volume and low-risk,
   378    # so drop them.
   379    - level: None
   380      users: ["system:kube-proxy"]
   381      verbs: ["watch"]
   382      resources:
   383        - group: "" # core
   384          resources: ["endpoints", "services", "services/status"]
   385    - level: None
   386      # Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
   387      # TODO(#46983): Change this to the ingress controller service account.
   388      users: ["system:unsecured"]
   389      namespaces: ["kube-system"]
   390      verbs: ["get"]
   391      resources:
   392        - group: "" # core
   393          resources: ["configmaps"]
   394    - level: None
   395      users: ["kubelet"] # legacy kubelet identity
   396      verbs: ["get"]
   397      resources:
   398        - group: "" # core
   399          resources: ["nodes", "nodes/status"]
   400    - level: None
   401      userGroups: ["system:nodes"]
   402      verbs: ["get"]
   403      resources:
   404        - group: "" # core
   405          resources: ["nodes", "nodes/status"]
   406    - level: None
   407      users:
   408        - system:kube-controller-manager
   409        - system:kube-scheduler
   410        - system:serviceaccount:kube-system:endpoint-controller
   411      verbs: ["get", "update"]
   412      namespaces: ["kube-system"]
   413      resources:
   414        - group: "" # core
   415          resources: ["endpoints"]
   416    - level: None
   417      users: ["system:apiserver"]
   418      verbs: ["get"]
   419      resources:
   420        - group: "" # core
   421          resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
   422    # Don't log HPA fetching metrics.
   423    - level: None
   424      users:
   425        - system:kube-controller-manager
   426      verbs: ["get", "list"]
   427      resources:
   428        - group: "metrics"
   429    # Don't log these read-only URLs.
   430    - level: None
   431      nonResourceURLs:
   432        - /healthz*
   433        - /version
   434        - /swagger*
   435    # Don't log events requests.
   436    - level: None
   437      resources:
   438        - group: "" # core
   439          resources: ["events"]
   440    # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
   441    - level: Request
   442      users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
   443      verbs: ["update","patch"]
   444      resources:
   445        - group: "" # core
   446          resources: ["nodes/status", "pods/status"]
   447      omitStages:
   448        - "RequestReceived"
   449    - level: Request
   450      userGroups: ["system:nodes"]
   451      verbs: ["update","patch"]
   452      resources:
   453        - group: "" # core
   454          resources: ["nodes/status", "pods/status"]
   455      omitStages:
   456        - "RequestReceived"
   457    # deletecollection calls can be large, don't log responses for expected namespace deletions
   458    - level: Request
   459      users: ["system:serviceaccount:kube-system:namespace-controller"]
   460      verbs: ["deletecollection"]
   461      omitStages:
   462        - "RequestReceived"
   463    # Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
   464    # so only log at the Metadata level.
   465    - level: Metadata
   466      resources:
   467        - group: "" # core
   468          resources: ["secrets", "configmaps", "serviceaccounts/token"]
   469        - group: authentication.k8s.io
   470          resources: ["tokenreviews"]
   471      omitStages:
   472        - "RequestReceived"
   473    # Get responses can be large; skip them.
   474    - level: Request
   475      verbs: ["get", "list", "watch"]
   476      resources: ${known_apis}
   477      omitStages:
   478        - "RequestReceived"
   479    # Default level for known APIs
   480    - level: RequestResponse
   481      resources: ${known_apis}
   482      omitStages:
   483        - "RequestReceived"
   484    # Default level for all other requests.
   485    - level: Metadata
   486      omitStages:
   487        - "RequestReceived"
   488  EOF
   489  }
   490  
   491  # Computes command line arguments to be passed to etcd.
   492  function compute-etcd-params {
   493  	local params="${ETCD_TEST_ARGS:-}"
   494  	params+=" --name=etcd-$(hostname -s)"
   495  	params+=" --listen-peer-urls=http://127.0.0.1:2380"
   496  	params+=" --advertise-client-urls=http://127.0.0.1:2379"
   497  	params+=" --listen-client-urls=http://0.0.0.0:2379"
   498  
   499  	# Enable apiserver->etcd auth.
   500  	params+=" --client-cert-auth"
   501  	params+=" --trusted-ca-file /etc/srv/kubernetes/etcd-apiserver-ca.crt"
   502  	params+=" --cert-file /etc/srv/kubernetes/etcd-apiserver-server.crt"
   503  	params+=" --key-file /etc/srv/kubernetes/etcd-apiserver-server.key"
   504  
   505  	params+=" --data-dir=/var/etcd/data"
   506  	params+=" ${ETCD_QUOTA_BYTES}"
   507  	echo "${params}"
   508  }
   509  
   510  # Computes command line arguments to be passed to etcd-events.
   511  function compute-etcd-events-params {
   512  	local params="${ETCD_TEST_ARGS:-}"
   513  	params+=" --name=etcd-$(hostname -s)"
   514  	params+=" --listen-peer-urls=http://127.0.0.1:2381"
   515  	params+=" --advertise-client-urls=http://127.0.0.1:4002"
   516  	params+=" --listen-client-urls=http://0.0.0.0:4002"
   517  	params+=" --data-dir=/var/etcd/data-events"
   518  	params+=" ${ETCD_QUOTA_BYTES}"
   519  	echo "${params}"
   520  }
   521  
   522  # Computes command line arguments to be passed to apiserver.
   523  function compute-kube-apiserver-params {
   524  	local params="--insecure-bind-address=0.0.0.0"
   525  	params+=" --etcd-servers=${ETCD_SERVERS:-http://127.0.0.1:2379}"
   526  	if [[ -z "${ETCD_SERVERS:-}" ]]; then
   527  		params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-/events#${EVENT_STORE_URL}}"
   528  	elif [[ -n "${ETCD_SERVERS_OVERRIDES:-}" ]]; then
   529  		params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-}"
   530  	fi
   531  	# Enable apiserver->etcd auth.
   532  	params+=" --etcd-cafile=/etc/srv/kubernetes/etcd-apiserver-ca.crt"
   533  	params+=" --etcd-certfile=/etc/srv/kubernetes/etcd-apiserver-client.crt"
   534  	params+=" --etcd-keyfile=/etc/srv/kubernetes/etcd-apiserver-client.key"
   535  
   536  	params+=" --tls-cert-file=/etc/srv/kubernetes/server.cert"
   537  	params+=" --tls-private-key-file=/etc/srv/kubernetes/server.key"
   538  	params+=" --requestheader-client-ca-file=/etc/srv/kubernetes/aggr_ca.crt"
   539  	params+=" --requestheader-allowed-names=aggregator"
   540  	params+=" --requestheader-extra-headers-prefix=X-Remote-Extra-"
   541  	params+=" --requestheader-group-headers=X-Remote-Group"
   542  	params+=" --requestheader-username-headers=X-Remote-User"
   543  	params+=" --proxy-client-cert-file=/etc/srv/kubernetes/proxy_client.crt"
   544  	params+=" --proxy-client-key-file=/etc/srv/kubernetes/proxy_client.key"
   545  	params+=" --enable-aggregator-routing=true"
   546  	params+=" --client-ca-file=/etc/srv/kubernetes/ca.crt"
   547  	params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv"
   548  	params+=" --secure-port=443"
   549  	params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
   550  	params+=" --admission-control=${CUSTOM_ADMISSION_PLUGINS}"
   551  	params+=" --authorization-mode=Node,RBAC"
   552  	params+=" --allow-privileged=true"
   553  	if [[ -n "${STORAGE_BACKEND:-}" ]]; then
   554  		params+=" --storage-backend=${STORAGE_BACKEND}"
   555  	fi
   556  	if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]]; then
   557  		params+=" --storage-media-type=${STORAGE_MEDIA_TYPE}"
   558  	fi
   559    if [[ -n "${ETCD_COMPACTION_INTERVAL_SEC:-}" ]]; then
   560      params+=" --etcd-compaction-interval=${ETCD_COMPACTION_INTERVAL_SEC}s"
   561    fi
   562  	if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]]; then
   563  		params+=" --min-request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT}"
   564  	fi
   565  	if [[ "${NUM_NODES}" -ge 3000 ]]; then
   566  		params+=" --max-requests-inflight=3000 --max-mutating-requests-inflight=1000"
   567  	elif [[ "${NUM_NODES}" -ge 1000 ]]; then
   568  		params+=" --max-requests-inflight=1500 --max-mutating-requests-inflight=500"
   569  	fi
   570  	if [[ -n "${RUNTIME_CONFIG:-}" ]]; then
   571  		params+=" --runtime-config=${RUNTIME_CONFIG}"
   572  	fi
   573  	if [[ -n "${FEATURE_GATES:-}" ]]; then
   574  		params+=" --feature-gates=${FEATURE_GATES}"
   575  	fi
   576  	if [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
   577  		# Create the audit policy file, and mount it into the apiserver pod.
   578  		create-master-audit-policy "${audit_policy_file}" "${ADVANCED_AUDIT_POLICY:-}"
   579  
   580  		# The config below matches the one in cluster/gce/gci/configure-helper.sh.
   581  		# TODO: Currently supporting just log backend. Support webhook if needed.
   582  		params+=" --audit-policy-file=${audit_policy_file}"
   583  		params+=" --audit-log-path=/var/log/kube-apiserver-audit.log"
   584  		params+=" --audit-log-maxage=0"
   585  		params+=" --audit-log-maxbackup=0"
   586  		params+=" --audit-log-maxsize=2000000000"
   587  	fi
   588          # Append APISERVER_TEST_ARGS to the end, which will allow for
   589          # the above defaults to be overridden.
   590  	params+=" ${APISERVER_TEST_ARGS:-}"
   591  	echo "${params}"
   592  }
   593  
   594  # Computes command line arguments to be passed to controller-manager.
   595  function compute-kube-controller-manager-params {
   596  	local params="${CONTROLLER_MANAGER_TEST_ARGS:-}"
   597  	params+=" --use-service-account-credentials"
   598  	params+=" --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig"
   599  	params+=" --service-account-private-key-file=/etc/srv/kubernetes/server.key"
   600  	params+=" --root-ca-file=/etc/srv/kubernetes/ca.crt"
   601  	params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
   602  	params+=" --cluster-cidr=${CLUSTER_IP_RANGE}"
   603  	params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
   604  	params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}"
   605  	echo "${params}"
   606  }
   607  
   608  # Computes command line arguments to be passed to scheduler.
   609  function compute-kube-scheduler-params {
   610  	local params="${SCHEDULER_TEST_ARGS:-}"
   611  	params+=" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig"
   612  	echo "${params}"
   613  }
   614  
   615  # Computes command line arguments to be passed to addon-manager.
   616  function compute-kube-addon-manager-params {
   617  	echo ""
   618  }
   619  
   620  # Start a kubernetes master component '$1' which can be any of the following:
   621  # 1. etcd
   622  # 2. etcd-events
   623  # 3. kube-apiserver
   624  # 4. kube-controller-manager
   625  # 5. kube-scheduler
   626  # 6. kube-addon-manager
   627  #
   628  # It prepares the log file, loads the docker tag, calculates variables, sets them
   629  # in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
   630  #
   631  # Assumed vars:
   632  #   DOCKER_REGISTRY
   633  function start-kubemaster-component() {
   634  	echo "Start master component $1"
   635  	local -r component=$1
   636  	prepare-log-file /var/log/"${component}".log
   637  	local -r src_file="${KUBE_ROOT}/${component}.yaml"
   638  	local -r params=$("compute-${component}-params")
   639  
   640  	# Evaluate variables.
   641  	sed -i -e "s@{{params}}@${params}@g" "${src_file}"
   642  	sed -i -e "s@{{kube_docker_registry}}@${DOCKER_REGISTRY}@g" "${src_file}"
   643  	sed -i -e "s@{{instance_prefix}}@${INSTANCE_PREFIX}@g" "${src_file}"
   644  	if [ "${component:0:4}" == "etcd" ]; then
   645  		sed -i -e "s@{{etcd_image}}@${ETCD_IMAGE}@g" "${src_file}"
   646  	elif [ "${component}" == "kube-addon-manager" ]; then
   647  		setup-addon-manifests "addons" "kubemark-rbac-bindings"
   648  	else
   649  		local -r component_docker_tag=$(cat "${KUBE_BINDIR}/${component}.docker_tag")
   650  		sed -i -e "s@{{${component}_docker_tag}}@${component_docker_tag}@g" "${src_file}"
   651  		if [ "${component}" == "kube-apiserver" ]; then
   652  			local audit_policy_config_mount=""
   653  			local audit_policy_config_volume=""
   654  			if [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
   655  				read -r -d '' audit_policy_config_mount << EOF
   656  - name: auditpolicyconfigmount
   657    mountPath: ${audit_policy_file}
   658    readOnly: true
   659  EOF
   660  				read -r -d '' audit_policy_config_volume << EOF
   661  - name: auditpolicyconfigmount
   662    hostPath:
   663      path: ${audit_policy_file}
   664      type: FileOrCreate
   665  EOF
   666  			fi
   667  			sed -i -e "s@{{audit_policy_config_mount}}@${audit_policy_config_mount}@g" "${src_file}"
   668  			sed -i -e "s@{{audit_policy_config_volume}}@${audit_policy_config_volume}@g" "${src_file}"
   669  		fi
   670  	fi
   671  	cp "${src_file}" /etc/kubernetes/manifests
   672  }
   673  
   674  ############################### Main Function ########################################
   675  echo "Start to configure master instance for kubemark"
   676  
   677  # Extract files from the server tar and setup master env variables.
   678  cd "${KUBE_ROOT}"
   679  if [[ ! -d "${KUBE_ROOT}/kubernetes" ]]; then
   680  	tar xzf kubernetes-server-linux-amd64.tar.gz
   681  fi
   682  source "${KUBE_ROOT}/kubemark-master-env.sh"
   683  
   684  # Setup IP firewall rules, required directory structure and etcd config.
   685  config-ip-firewall
   686  create-dirs
   687  setup-kubelet-dir
   688  delete-default-etcd-configs
   689  compute-etcd-variables
   690  
   691  # Setup authentication tokens and kubeconfigs for kube-controller-manager and kube-scheduler,
   692  # only if their kubeconfigs don't already exist as this script could be running on reboot.
   693  if [[ ! -f "${KUBE_ROOT}/k8s_auth_data/kube-controller-manager/kubeconfig" ]]; then
   694  	KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
   695  	echo "${KUBE_CONTROLLER_MANAGER_TOKEN},system:kube-controller-manager,uid:system:kube-controller-manager" >> "${KUBE_ROOT}/k8s_auth_data/known_tokens.csv"
   696  	create-kubecontrollermanager-kubeconfig
   697  fi
   698  if [[ ! -f "${KUBE_ROOT}/k8s_auth_data/kube-scheduler/kubeconfig" ]]; then
   699  	KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
   700  	echo "${KUBE_SCHEDULER_TOKEN},system:kube-scheduler,uid:system:kube-scheduler" >> "${KUBE_ROOT}/k8s_auth_data/known_tokens.csv"
   701  	create-kubescheduler-kubeconfig
   702  fi
   703  
   704  ADDON_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
   705  echo "${ADDON_MANAGER_TOKEN},system:addon-manager,admin,system:masters" >> "${KUBE_ROOT}/k8s_auth_data/known_tokens.csv"
   706  create-addonmanager-kubeconfig
   707  
   708  # Mount master PD for etcd and create symbolic links to it.
   709  {
   710  	main_etcd_mount_point="/mnt/disks/master-pd"
   711  	mount-pd "google-master-pd" "${main_etcd_mount_point}"
   712  	# Contains all the data stored in etcd.
   713  	mkdir -p "${main_etcd_mount_point}/var/etcd"
   714  	chmod 700 "${main_etcd_mount_point}/var/etcd"
   715  	ln -s -f "${main_etcd_mount_point}/var/etcd" /var/etcd
   716  	mkdir -p /etc/srv
   717  	# Setup the dynamically generated apiserver auth certs and keys to pd.
   718  	mkdir -p "${main_etcd_mount_point}/srv/kubernetes"
   719  	ln -s -f "${main_etcd_mount_point}/srv/kubernetes" /etc/srv/kubernetes
   720  	# Copy the files to the PD only if they don't exist (so we do it only the first time).
   721  	if [[ "$(ls -A ${main_etcd_mount_point}/srv/kubernetes/)" == "" ]]; then
   722  		cp -r "${KUBE_ROOT}"/k8s_auth_data/* "${main_etcd_mount_point}/srv/kubernetes/"
   723  	fi
   724  	# Directory for kube-apiserver to store SSH key (if necessary).
   725  	mkdir -p "${main_etcd_mount_point}/srv/sshproxy"
   726  	ln -s -f "${main_etcd_mount_point}/srv/sshproxy" /etc/srv/sshproxy
   727  }
   728  
   729  # Mount master PD for event-etcd (if required) and create symbolic links to it.
   730  {
   731  	EVENT_STORE_IP="${EVENT_STORE_IP:-127.0.0.1}"
   732  	EVENT_STORE_URL="${EVENT_STORE_URL:-http://${EVENT_STORE_IP}:4002}"
   733  	if [ "${EVENT_PD:-}" == "true" ]; then
   734  		event_etcd_mount_point="/mnt/disks/master-event-pd"
   735  		mount-pd "google-master-event-pd" "${event_etcd_mount_point}"
   736  		# Contains all the data stored in event etcd.
   737  		mkdir -p "${event_etcd_mount_point}/var/etcd/events"
   738  		chmod 700 "${event_etcd_mount_point}/var/etcd/events"
   739  		ln -s -f "${event_etcd_mount_point}/var/etcd/events" /var/etcd/events
   740  	fi
   741  }
   742  
   743  # Setup docker flags and load images of the master components.
   744  assemble-docker-flags
   745  DOCKER_REGISTRY="registry.k8s.io"
   746  load-docker-images
   747  
   748  readonly audit_policy_file="/etc/audit_policy.config"
   749  
   750  # Start kubelet as a supervisord process and master components as pods.
   751  start-kubelet
   752  if [[ -z "${ETCD_SERVERS:-}" ]]; then
   753  	start-kubemaster-component "etcd"
   754  	if [ "${EVENT_STORE_IP:-}" == "127.0.0.1" ]; then
   755  		start-kubemaster-component "etcd-events"
   756  	fi
   757  fi
   758  start-kubemaster-component "kube-apiserver"
   759  start-kubemaster-component "kube-controller-manager"
   760  start-kubemaster-component "kube-scheduler"
   761  start-kubemaster-component "kube-addon-manager"
   762  
   763  # Wait till apiserver is working fine or timeout.
   764  echo -n "Waiting for apiserver to be healthy"
   765  start=$(date +%s)
   766  until [ "$(curl 127.0.0.1:8080/healthz 2> /dev/null)" == "ok" ]; do
   767  	echo -n "."
   768  	sleep 1
   769  	now=$(date +%s)
   770  	if [ $((now - start)) -gt 300 ]; then
   771  		echo "Timeout!"
   772  		exit 1
   773  	fi
   774  done
   775  
   776  echo "Done for the configuration for kubemark master"