github.com/coreos/mantle@v0.13.0/kola/tests/kubernetes/controllerInstall.go (about) 1 package kubernetes 2 3 // https://github.com/coreos/coreos-kubernetes/tree/master/multi-node/generic. 4 // The only change besides paramaterizing the env vars was: 5 // s/COREOS_PUBLIC_IP/COREOS_PRIVATE so this works on GCE. 6 const controllerInstallScript = `#!/bin/bash 7 set -e 8 9 # List of etcd servers (http://ip:port), comma separated 10 export ETCD_ENDPOINTS={{.ETCD_ENDPOINTS}} 11 12 # Specify the version (vX.Y.Z) of Kubernetes assets to deploy 13 export K8S_VER={{.K8S_VER}} 14 15 # Hyperkube image repository to use. 16 export HYPERKUBE_IMAGE_REPO={{.HYPERKUBE_IMAGE_REPO}} 17 18 # The CIDR network to use for pod IPs. 19 # Each pod launched in the cluster will be assigned an IP out of this range. 20 # Each node will be configured such that these IPs will be routable using the flannel overlay network. 21 export POD_NETWORK=10.2.0.0/16 22 23 # The CIDR network to use for service cluster IPs. 24 # Each service will be assigned a cluster IP out of this range. 25 # This must not overlap with any IP ranges assigned to the POD_NETWORK, or other existing network infrastructure. 26 # Routing to these IPs is handled by a proxy service local to each node, and are not required to be routable between nodes. 27 export SERVICE_IP_RANGE=10.3.0.0/24 28 29 # The IP address of the Kubernetes API Service 30 # If the SERVICE_IP_RANGE is changed above, this must be set to the first IP in that range. 31 export K8S_SERVICE_IP=10.3.0.1 32 33 # The IP address of the cluster DNS service. 34 # This IP must be in the range of the SERVICE_IP_RANGE and cannot be the first IP in the range. 35 # This same IP must be configured on all worker nodes to enable DNS service discovery. 36 export DNS_SERVICE_IP=10.3.0.10 37 38 # Whether to use Calico for Kubernetes network policy. 39 export USE_CALICO=false 40 41 # Determines the container runtime for kubernetes to use. Accepts 'docker' or 'rkt'. 42 export CONTAINER_RUNTIME={{.CONTAINER_RUNTIME}} 43 44 # The above settings can optionally be overridden using an environment file: 45 ENV_FILE=/run/coreos-kubernetes/options.env 46 47 # ------------- 48 49 function init_config { 50 local REQUIRED=('ADVERTISE_IP' 'POD_NETWORK' 'ETCD_ENDPOINTS' 'SERVICE_IP_RANGE' 'K8S_SERVICE_IP' 'DNS_SERVICE_IP' 'K8S_VER' 'HYPERKUBE_IMAGE_REPO' 'USE_CALICO') 51 52 if [ -f $ENV_FILE ]; then 53 export $(cat $ENV_FILE | xargs) 54 fi 55 56 if [ -z $ADVERTISE_IP ]; then 57 export ADVERTISE_IP=$(awk -F= '/COREOS_PRIVATE_IPV4/ {print $2}' /etc/environment) 58 fi 59 60 for REQ in "${REQUIRED[@]}"; do 61 if [ -z "$(eval echo \$$REQ)" ]; then 62 echo "Missing required config value: ${REQ}" 63 exit 1 64 fi 65 done 66 } 67 68 function init_flannel { 69 echo "Waiting for etcd..." 70 while true 71 do 72 IFS=',' read -ra ES <<< "$ETCD_ENDPOINTS" 73 for ETCD in "${ES[@]}"; do 74 echo "Trying: $ETCD" 75 if [ -n "$(curl --silent "$ETCD/v2/machines")" ]; then 76 local ACTIVE_ETCD=$ETCD 77 break 78 fi 79 sleep 1 80 done 81 if [ -n "$ACTIVE_ETCD" ]; then 82 break 83 fi 84 done 85 RES=$(curl --silent -X PUT -d "value={\"Network\":\"$POD_NETWORK\",\"Backend\":{\"Type\":\"vxlan\"}}" "$ACTIVE_ETCD/v2/keys/coreos.com/network/config?prevExist=false") 86 if [ -z "$(echo $RES | grep '"action":"create"')" ] && [ -z "$(echo $RES | grep 'Key already exists')" ]; then 87 echo "Unexpected error configuring flannel pod network: $RES" 88 fi 89 } 90 91 function init_templates { 92 local TEMPLATE=/etc/systemd/system/kubelet.service 93 if [ ! -f $TEMPLATE ]; then 94 echo "TEMPLATE: $TEMPLATE" 95 mkdir -p $(dirname $TEMPLATE) 96 cat << EOF > $TEMPLATE 97 [Service] 98 Environment=KUBELET_VERSION=${K8S_VER} 99 Environment=KUBELET_ACI=${HYPERKUBE_IMAGE_REPO} 100 Environment="RKT_OPTS=--volume dns,kind=host,source=/etc/resolv.conf \ 101 --mount volume=dns,target=/etc/resolv.conf \ 102 --volume=rkt,kind=host,source=/opt/bin/host-rkt \ 103 --mount volume=rkt,target=/usr/bin/rkt \ 104 --volume var-lib-rkt,kind=host,source=/var/lib/rkt \ 105 --mount volume=var-lib-rkt,target=/var/lib/rkt \ 106 --volume=stage,kind=host,source=/tmp \ 107 --mount volume=stage,target=/tmp" 108 ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests 109 ExecStart=/usr/lib/coreos/kubelet-wrapper \ 110 --api-servers=http://127.0.0.1:8080 \ 111 --register-schedulable=false \ 112 --network-plugin-dir=/etc/kubernetes/cni/net.d \ 113 --network-plugin=cni \ 114 --container-runtime=${CONTAINER_RUNTIME} \ 115 --rkt-path=/usr/bin/rkt \ 116 --rkt-stage1-image=coreos.com/rkt/stage1-coreos \ 117 --allow-privileged=true \ 118 --config=/etc/kubernetes/manifests \ 119 --hostname-override=${ADVERTISE_IP} \ 120 --cluster_dns=${DNS_SERVICE_IP} \ 121 --cluster_domain=cluster.local 122 Restart=always 123 RestartSec=10 124 125 [Install] 126 WantedBy=multi-user.target 127 EOF 128 fi 129 130 local TEMPLATE=/opt/bin/host-rkt 131 if [ ! -f $TEMPLATE ]; then 132 echo "TEMPLATE: $TEMPLATE" 133 mkdir -p $(dirname $TEMPLATE) 134 cat << EOF > $TEMPLATE 135 #!/bin/sh 136 # This is bind mounted into the kubelet rootfs and all rkt shell-outs go 137 # through this rkt wrapper. It essentially enters the host mount namespace 138 # (which it is already in) only for the purpose of breaking out of the chroot 139 # before calling rkt. It makes things like rkt gc work and avoids bind mounting 140 # in certain rkt filesystem dependancies into the kubelet rootfs. This can 141 # eventually be obviated when the write-api stuff gets upstream and rkt gc is 142 # through the api-server. Related issue: 143 # https://github.com/coreos/rkt/issues/2878 144 exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "\$@" 145 EOF 146 fi 147 148 149 local TEMPLATE=/etc/systemd/system/load-rkt-stage1.service 150 if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then 151 echo "TEMPLATE: $TEMPLATE" 152 mkdir -p $(dirname $TEMPLATE) 153 cat << EOF > $TEMPLATE 154 [Unit] 155 Description=Load rkt stage1 images 156 Documentation=http://github.com/coreos/rkt 157 Requires=network-online.target 158 After=network-online.target 159 Before=rkt-api.service 160 161 [Service] 162 RemainAfterExit=yes 163 Type=oneshot 164 ExecStart=/usr/bin/rkt fetch /usr/lib/rkt/stage1-images/stage1-coreos.aci /usr/lib/rkt/stage1-images/stage1-fly.aci --insecure-options=image 165 166 [Install] 167 RequiredBy=rkt-api.service 168 EOF 169 fi 170 171 local TEMPLATE=/etc/systemd/system/rkt-api.service 172 if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then 173 echo "TEMPLATE: $TEMPLATE" 174 mkdir -p $(dirname $TEMPLATE) 175 cat << EOF > $TEMPLATE 176 [Unit] 177 Before=kubelet.service 178 179 [Service] 180 ExecStart=/usr/bin/rkt api-service 181 Restart=always 182 RestartSec=10 183 184 [Install] 185 RequiredBy=kubelet.service 186 EOF 187 fi 188 189 local TEMPLATE=/etc/systemd/system/calico-node.service 190 if [ "${USE_CALICO}" = "true" ] && [ ! -f "${TEMPLATE}" ]; then 191 echo "TEMPLATE: $TEMPLATE" 192 mkdir -p $(dirname $TEMPLATE) 193 cat << EOF > $TEMPLATE 194 [Unit] 195 Description=Calico per-host agent 196 Requires=network-online.target 197 After=network-online.target 198 199 [Service] 200 Slice=machine.slice 201 Environment=CALICO_DISABLE_FILE_LOGGING=true 202 Environment=HOSTNAME=${ADVERTISE_IP} 203 Environment=IP=${ADVERTISE_IP} 204 Environment=FELIX_FELIXHOSTNAME=${ADVERTISE_IP} 205 Environment=CALICO_NETWORKING=false 206 Environment=NO_DEFAULT_POOLS=true 207 Environment=ETCD_ENDPOINTS=${ETCD_ENDPOINTS} 208 ExecStart=/usr/bin/rkt run --inherit-env --stage1-from-dir=stage1-fly.aci \ 209 --volume=modules,kind=host,source=/lib/modules,readOnly=false \ 210 --mount=volume=modules,target=/lib/modules \ 211 --trust-keys-from-https quay.io/calico/node:v0.19.0 212 KillMode=mixed 213 Restart=always 214 TimeoutStartSec=0 215 216 [Install] 217 WantedBy=multi-user.target 218 EOF 219 fi 220 221 local TEMPLATE=/etc/kubernetes/manifests/kube-proxy.yaml 222 if [ ! -f $TEMPLATE ]; then 223 echo "TEMPLATE: $TEMPLATE" 224 mkdir -p $(dirname $TEMPLATE) 225 cat << EOF > $TEMPLATE 226 apiVersion: v1 227 kind: Pod 228 metadata: 229 name: kube-proxy 230 namespace: kube-system 231 annotations: 232 rkt.alpha.kubernetes.io/stage1-name-override: coreos.com/rkt/stage1-fly 233 spec: 234 hostNetwork: true 235 containers: 236 - name: kube-proxy 237 image: ${HYPERKUBE_IMAGE_REPO}:$K8S_VER 238 command: 239 - /hyperkube 240 - proxy 241 - --master=http://127.0.0.1:8080 242 securityContext: 243 privileged: true 244 volumeMounts: 245 - mountPath: /etc/ssl/certs 246 name: ssl-certs-host 247 readOnly: true 248 - mountPath: /var/run/dbus 249 name: dbus 250 readOnly: false 251 volumes: 252 - hostPath: 253 path: /usr/share/ca-certificates 254 name: ssl-certs-host 255 - hostPath: 256 path: /var/run/dbus 257 name: dbus 258 EOF 259 fi 260 261 local TEMPLATE=/etc/kubernetes/manifests/kube-apiserver.yaml 262 if [ ! -f $TEMPLATE ]; then 263 echo "TEMPLATE: $TEMPLATE" 264 mkdir -p $(dirname $TEMPLATE) 265 cat << EOF > $TEMPLATE 266 apiVersion: v1 267 kind: Pod 268 metadata: 269 name: kube-apiserver 270 namespace: kube-system 271 spec: 272 hostNetwork: true 273 containers: 274 - name: kube-apiserver 275 image: ${HYPERKUBE_IMAGE_REPO}:$K8S_VER 276 command: 277 - /hyperkube 278 - apiserver 279 - --bind-address=0.0.0.0 280 - --etcd-servers=${ETCD_ENDPOINTS} 281 - --allow-privileged=true 282 - --service-cluster-ip-range=${SERVICE_IP_RANGE} 283 - --secure-port=443 284 - --advertise-address=${ADVERTISE_IP} 285 - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota 286 - --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem 287 - --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem 288 - --client-ca-file=/etc/kubernetes/ssl/ca.pem 289 - --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem 290 - --runtime-config=extensions/v1beta1/networkpolicies=true 291 livenessProbe: 292 httpGet: 293 host: 127.0.0.1 294 port: 8080 295 path: /healthz 296 initialDelaySeconds: 15 297 timeoutSeconds: 15 298 ports: 299 - containerPort: 443 300 hostPort: 443 301 name: https 302 - containerPort: 8080 303 hostPort: 8080 304 name: local 305 volumeMounts: 306 - mountPath: /etc/kubernetes/ssl 307 name: ssl-certs-kubernetes 308 readOnly: true 309 - mountPath: /etc/ssl/certs 310 name: ssl-certs-host 311 readOnly: true 312 volumes: 313 - hostPath: 314 path: /etc/kubernetes/ssl 315 name: ssl-certs-kubernetes 316 - hostPath: 317 path: /usr/share/ca-certificates 318 name: ssl-certs-host 319 EOF 320 fi 321 322 local TEMPLATE=/etc/kubernetes/manifests/kube-controller-manager.yaml 323 if [ ! -f $TEMPLATE ]; then 324 echo "TEMPLATE: $TEMPLATE" 325 mkdir -p $(dirname $TEMPLATE) 326 cat << EOF > $TEMPLATE 327 apiVersion: v1 328 kind: Pod 329 metadata: 330 name: kube-controller-manager 331 namespace: kube-system 332 spec: 333 containers: 334 - name: kube-controller-manager 335 image: ${HYPERKUBE_IMAGE_REPO}:$K8S_VER 336 command: 337 - /hyperkube 338 - controller-manager 339 - --master=http://127.0.0.1:8080 340 - --leader-elect=true 341 - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem 342 - --root-ca-file=/etc/kubernetes/ssl/ca.pem 343 resources: 344 requests: 345 cpu: 200m 346 livenessProbe: 347 httpGet: 348 host: 127.0.0.1 349 path: /healthz 350 port: 10252 351 initialDelaySeconds: 15 352 timeoutSeconds: 15 353 volumeMounts: 354 - mountPath: /etc/kubernetes/ssl 355 name: ssl-certs-kubernetes 356 readOnly: true 357 - mountPath: /etc/ssl/certs 358 name: ssl-certs-host 359 readOnly: true 360 hostNetwork: true 361 volumes: 362 - hostPath: 363 path: /etc/kubernetes/ssl 364 name: ssl-certs-kubernetes 365 - hostPath: 366 path: /usr/share/ca-certificates 367 name: ssl-certs-host 368 EOF 369 fi 370 371 local TEMPLATE=/etc/kubernetes/manifests/kube-scheduler.yaml 372 if [ ! -f $TEMPLATE ]; then 373 echo "TEMPLATE: $TEMPLATE" 374 mkdir -p $(dirname $TEMPLATE) 375 cat << EOF > $TEMPLATE 376 apiVersion: v1 377 kind: Pod 378 metadata: 379 name: kube-scheduler 380 namespace: kube-system 381 spec: 382 hostNetwork: true 383 containers: 384 - name: kube-scheduler 385 image: ${HYPERKUBE_IMAGE_REPO}:$K8S_VER 386 command: 387 - /hyperkube 388 - scheduler 389 - --master=http://127.0.0.1:8080 390 - --leader-elect=true 391 resources: 392 requests: 393 cpu: 100m 394 livenessProbe: 395 httpGet: 396 host: 127.0.0.1 397 path: /healthz 398 port: 10251 399 initialDelaySeconds: 15 400 timeoutSeconds: 15 401 EOF 402 fi 403 404 local TEMPLATE=/etc/kubernetes/manifests/calico-policy-controller.yaml 405 if [ "${USE_CALICO}" = "true" ] && [ ! -f "${TEMPLATE}" ]; then 406 echo "TEMPLATE: $TEMPLATE" 407 mkdir -p $(dirname $TEMPLATE) 408 cat << EOF > $TEMPLATE 409 apiVersion: v1 410 kind: Pod 411 metadata: 412 name: calico-policy-controller 413 namespace: calico-system 414 spec: 415 hostNetwork: true 416 containers: 417 # The Calico policy controller. 418 - name: kube-policy-controller 419 image: calico/kube-policy-controller:v0.2.0 420 env: 421 - name: ETCD_ENDPOINTS 422 value: "${ETCD_ENDPOINTS}" 423 - name: K8S_API 424 value: "http://127.0.0.1:8080" 425 - name: LEADER_ELECTION 426 value: "true" 427 # Leader election container used by the policy controller. 428 - name: leader-elector 429 image: quay.io/calico/leader-elector:v0.1.0 430 imagePullPolicy: IfNotPresent 431 args: 432 - "--election=calico-policy-election" 433 - "--election-namespace=calico-system" 434 - "--http=127.0.0.1:4040" 435 EOF 436 fi 437 438 local TEMPLATE=/srv/kubernetes/manifests/calico-system.json 439 if [ "${USE_CALICO}" = "true" ] && [ ! -f "${TEMPLATE}" ]; then 440 echo "TEMPLATE: $TEMPLATE" 441 mkdir -p $(dirname $TEMPLATE) 442 cat << EOF > $TEMPLATE 443 { 444 "apiVersion": "v1", 445 "kind": "Namespace", 446 "metadata": { 447 "name": "calico-system" 448 } 449 } 450 EOF 451 fi 452 453 local TEMPLATE=/srv/kubernetes/manifests/kube-dns-rc.json 454 if [ ! -f $TEMPLATE ]; then 455 echo "TEMPLATE: $TEMPLATE" 456 mkdir -p $(dirname $TEMPLATE) 457 cat << EOF > $TEMPLATE 458 { 459 "apiVersion": "v1", 460 "kind": "ReplicationController", 461 "metadata": { 462 "labels": { 463 "k8s-app": "kube-dns", 464 "kubernetes.io/cluster-service": "true", 465 "version": "v15" 466 }, 467 "name": "kube-dns-v15", 468 "namespace": "kube-system" 469 }, 470 "spec": { 471 "replicas": 1, 472 "selector": { 473 "k8s-app": "kube-dns", 474 "version": "v15" 475 }, 476 "template": { 477 "metadata": { 478 "labels": { 479 "k8s-app": "kube-dns", 480 "kubernetes.io/cluster-service": "true", 481 "version": "v15" 482 } 483 }, 484 "spec": { 485 "containers": [ 486 { 487 "args": [ 488 "--domain=cluster.local.", 489 "--dns-port=10053" 490 ], 491 "image": "gcr.io/google_containers/kubedns-amd64:1.3", 492 "livenessProbe": { 493 "failureThreshold": 5, 494 "httpGet": { 495 "path": "/healthz", 496 "port": 8080, 497 "scheme": "HTTP" 498 }, 499 "initialDelaySeconds": 60, 500 "successThreshold": 1, 501 "timeoutSeconds": 5 502 }, 503 "name": "kubedns", 504 "ports": [ 505 { 506 "containerPort": 10053, 507 "name": "dns-local", 508 "protocol": "UDP" 509 }, 510 { 511 "containerPort": 10053, 512 "name": "dns-tcp-local", 513 "protocol": "TCP" 514 } 515 ], 516 "readinessProbe": { 517 "httpGet": { 518 "path": "/readiness", 519 "port": 8081, 520 "scheme": "HTTP" 521 }, 522 "initialDelaySeconds": 30, 523 "timeoutSeconds": 5 524 }, 525 "resources": { 526 "limits": { 527 "cpu": "100m", 528 "memory": "200Mi" 529 }, 530 "requests": { 531 "cpu": "100m", 532 "memory": "50Mi" 533 } 534 } 535 }, 536 { 537 "args": [ 538 "--cache-size=1000", 539 "--no-resolv", 540 "--server=127.0.0.1#10053" 541 ], 542 "image": "gcr.io/google_containers/kube-dnsmasq-amd64:1.3", 543 "name": "dnsmasq", 544 "ports": [ 545 { 546 "containerPort": 53, 547 "name": "dns", 548 "protocol": "UDP" 549 }, 550 { 551 "containerPort": 53, 552 "name": "dns-tcp", 553 "protocol": "TCP" 554 } 555 ] 556 }, 557 { 558 "args": [ 559 "-cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null", 560 "-port=8080", 561 "-quiet" 562 ], 563 "image": "gcr.io/google_containers/exechealthz-amd64:1.0", 564 "name": "healthz", 565 "ports": [ 566 { 567 "containerPort": 8080, 568 "protocol": "TCP" 569 } 570 ], 571 "resources": { 572 "limits": { 573 "cpu": "10m", 574 "memory": "20Mi" 575 }, 576 "requests": { 577 "cpu": "10m", 578 "memory": "20Mi" 579 } 580 } 581 } 582 ], 583 "dnsPolicy": "Default" 584 } 585 } 586 } 587 } 588 EOF 589 fi 590 591 local TEMPLATE=/srv/kubernetes/manifests/kube-dns-svc.json 592 if [ ! -f $TEMPLATE ]; then 593 echo "TEMPLATE: $TEMPLATE" 594 mkdir -p $(dirname $TEMPLATE) 595 cat << EOF > $TEMPLATE 596 { 597 "apiVersion": "v1", 598 "kind": "Service", 599 "metadata": { 600 "labels": { 601 "k8s-app": "kube-dns", 602 "kubernetes.io/cluster-service": "true", 603 "kubernetes.io/name": "KubeDNS" 604 }, 605 "name": "kube-dns", 606 "namespace": "kube-system" 607 }, 608 "spec": { 609 "clusterIP": "$DNS_SERVICE_IP", 610 "ports": [ 611 { 612 "name": "dns", 613 "port": 53, 614 "protocol": "UDP" 615 }, 616 { 617 "name": "dns-tcp", 618 "port": 53, 619 "protocol": "TCP" 620 } 621 ], 622 "selector": { 623 "k8s-app": "kube-dns" 624 } 625 } 626 } 627 EOF 628 fi 629 630 local TEMPLATE=/srv/kubernetes/manifests/heapster-de.json 631 if [ ! -f $TEMPLATE ]; then 632 echo "TEMPLATE: $TEMPLATE" 633 mkdir -p $(dirname $TEMPLATE) 634 cat << EOF > $TEMPLATE 635 { 636 "apiVersion": "extensions/v1beta1", 637 "kind": "Deployment", 638 "metadata": { 639 "labels": { 640 "k8s-app": "heapster", 641 "kubernetes.io/cluster-service": "true", 642 "version": "v1.1.0" 643 }, 644 "name": "heapster-v1.1.0", 645 "namespace": "kube-system" 646 }, 647 "spec": { 648 "replicas": 1, 649 "selector": { 650 "matchLabels": { 651 "k8s-app": "heapster", 652 "version": "v1.1.0" 653 } 654 }, 655 "template": { 656 "metadata": { 657 "labels": { 658 "k8s-app": "heapster", 659 "version": "v1.1.0" 660 } 661 }, 662 "spec": { 663 "containers": [ 664 { 665 "command": [ 666 "/heapster", 667 "--source=kubernetes.summary_api:''" 668 ], 669 "image": "gcr.io/google_containers/heapster:v1.1.0", 670 "name": "heapster", 671 "resources": { 672 "limits": { 673 "cpu": "100m", 674 "memory": "200Mi" 675 }, 676 "requests": { 677 "cpu": "100m", 678 "memory": "200Mi" 679 } 680 } 681 }, 682 { 683 "command": [ 684 "/pod_nanny", 685 "--cpu=100m", 686 "--extra-cpu=0.5m", 687 "--memory=200Mi", 688 "--extra-memory=4Mi", 689 "--threshold=5", 690 "--deployment=heapster-v1.1.0", 691 "--container=heapster", 692 "--poll-period=300000", 693 "--estimator=exponential" 694 ], 695 "env": [ 696 { 697 "name": "MY_POD_NAME", 698 "valueFrom": { 699 "fieldRef": { 700 "fieldPath": "metadata.name" 701 } 702 } 703 }, 704 { 705 "name": "MY_POD_NAMESPACE", 706 "valueFrom": { 707 "fieldRef": { 708 "fieldPath": "metadata.namespace" 709 } 710 } 711 } 712 ], 713 "image": "gcr.io/google_containers/addon-resizer:1.3", 714 "name": "heapster-nanny", 715 "resources": { 716 "limits": { 717 "cpu": "50m", 718 "memory": "100Mi" 719 }, 720 "requests": { 721 "cpu": "50m", 722 "memory": "100Mi" 723 } 724 } 725 } 726 ] 727 } 728 } 729 } 730 } 731 EOF 732 fi 733 734 local TEMPLATE=/srv/kubernetes/manifests/heapster-svc.json 735 if [ ! -f $TEMPLATE ]; then 736 echo "TEMPLATE: $TEMPLATE" 737 mkdir -p $(dirname $TEMPLATE) 738 cat << EOF > $TEMPLATE 739 { 740 "apiVersion": "v1", 741 "kind": "Service", 742 "metadata": { 743 "labels": { 744 "kubernetes.io/cluster-service": "true", 745 "kubernetes.io/name": "Heapster" 746 }, 747 "name": "heapster", 748 "namespace": "kube-system" 749 }, 750 "spec": { 751 "ports": [ 752 { 753 "port": 80, 754 "targetPort": 8082 755 } 756 ], 757 "selector": { 758 "k8s-app": "heapster" 759 } 760 } 761 } 762 EOF 763 fi 764 765 local TEMPLATE=/srv/kubernetes/manifests/kube-dashboard-rc.json 766 if [ ! -f $TEMPLATE ]; then 767 echo "TEMPLATE: $TEMPLATE" 768 mkdir -p $(dirname $TEMPLATE) 769 cat << EOF > $TEMPLATE 770 { 771 "apiVersion": "v1", 772 "kind": "ReplicationController", 773 "metadata": { 774 "labels": { 775 "k8s-app": "kubernetes-dashboard", 776 "kubernetes.io/cluster-service": "true", 777 "version": "v1.1.0" 778 }, 779 "name": "kubernetes-dashboard-v1.1.0", 780 "namespace": "kube-system" 781 }, 782 "spec": { 783 "replicas": 1, 784 "selector": { 785 "k8s-app": "kubernetes-dashboard" 786 }, 787 "template": { 788 "metadata": { 789 "labels": { 790 "k8s-app": "kubernetes-dashboard", 791 "kubernetes.io/cluster-service": "true", 792 "version": "v1.1.0" 793 } 794 }, 795 "spec": { 796 "containers": [ 797 { 798 "image": "gcr.io/google_containers/kubernetes-dashboard-amd64:v1.1.0", 799 "livenessProbe": { 800 "httpGet": { 801 "path": "/", 802 "port": 9090 803 }, 804 "initialDelaySeconds": 30, 805 "timeoutSeconds": 30 806 }, 807 "name": "kubernetes-dashboard", 808 "ports": [ 809 { 810 "containerPort": 9090 811 } 812 ], 813 "resources": { 814 "limits": { 815 "cpu": "100m", 816 "memory": "50Mi" 817 }, 818 "requests": { 819 "cpu": "100m", 820 "memory": "50Mi" 821 } 822 } 823 } 824 ] 825 } 826 } 827 } 828 } 829 EOF 830 fi 831 832 local TEMPLATE=/srv/kubernetes/manifests/kube-dashboard-svc.json 833 if [ ! -f $TEMPLATE ]; then 834 echo "TEMPLATE: $TEMPLATE" 835 mkdir -p $(dirname $TEMPLATE) 836 cat << EOF > $TEMPLATE 837 { 838 "apiVersion": "v1", 839 "kind": "Service", 840 "metadata": { 841 "labels": { 842 "k8s-app": "kubernetes-dashboard", 843 "kubernetes.io/cluster-service": "true" 844 }, 845 "name": "kubernetes-dashboard", 846 "namespace": "kube-system" 847 }, 848 "spec": { 849 "ports": [ 850 { 851 "port": 80, 852 "targetPort": 9090 853 } 854 ], 855 "selector": { 856 "k8s-app": "kubernetes-dashboard" 857 } 858 } 859 } 860 EOF 861 fi 862 863 local TEMPLATE=/etc/flannel/options.env 864 if [ ! -f $TEMPLATE ]; then 865 echo "TEMPLATE: $TEMPLATE" 866 mkdir -p $(dirname $TEMPLATE) 867 cat << EOF > $TEMPLATE 868 FLANNELD_IFACE=$ADVERTISE_IP 869 FLANNELD_ETCD_ENDPOINTS=$ETCD_ENDPOINTS 870 EOF 871 fi 872 873 local TEMPLATE=/etc/systemd/system/flanneld.service.d/40-ExecStartPre-symlink.conf.conf 874 if [ ! -f $TEMPLATE ]; then 875 echo "TEMPLATE: $TEMPLATE" 876 mkdir -p $(dirname $TEMPLATE) 877 cat << EOF > $TEMPLATE 878 [Service] 879 ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env 880 EOF 881 fi 882 883 local TEMPLATE=/etc/systemd/system/docker.service.d/40-flannel.conf 884 if [ ! -f $TEMPLATE ]; then 885 echo "TEMPLATE: $TEMPLATE" 886 mkdir -p $(dirname $TEMPLATE) 887 cat << EOF > $TEMPLATE 888 [Unit] 889 Requires=flanneld.service 890 After=flanneld.service 891 [Service] 892 EnvironmentFile=/etc/kubernetes/cni/docker_opts_cni.env 893 EOF 894 fi 895 896 local TEMPLATE=/etc/kubernetes/cni/docker_opts_cni.env 897 if [ ! -f $TEMPLATE ]; then 898 echo "TEMPLATE: $TEMPLATE" 899 mkdir -p $(dirname $TEMPLATE) 900 cat << EOF > $TEMPLATE 901 DOCKER_OPT_BIP="" 902 DOCKER_OPT_IPMASQ="" 903 EOF 904 fi 905 906 local TEMPLATE=/etc/kubernetes/cni/net.d/10-calico.conf 907 if [ "${USE_CALICO}" = "true" ] && [ ! -f "${TEMPLATE}" ]; then 908 echo "TEMPLATE: $TEMPLATE" 909 mkdir -p $(dirname $TEMPLATE) 910 cat << EOF > $TEMPLATE 911 { 912 "name": "calico", 913 "type": "flannel", 914 "delegate": { 915 "type": "calico", 916 "etcd_endpoints": "$ETCD_ENDPOINTS", 917 "log_level": "none", 918 "log_level_stderr": "info", 919 "hostname": "${ADVERTISE_IP}", 920 "policy": { 921 "type": "k8s", 922 "k8s_api_root": "http://127.0.0.1:8080/api/v1/" 923 } 924 } 925 } 926 EOF 927 fi 928 929 local TEMPLATE=/etc/kubernetes/cni/net.d/10-flannel.conf 930 if [ "${USE_CALICO}" = "false" ] && [ ! -f "${TEMPLATE}" ]; then 931 echo "TEMPLATE: $TEMPLATE" 932 mkdir -p $(dirname $TEMPLATE) 933 cat << EOF > $TEMPLATE 934 { 935 "name": "podnet", 936 "type": "flannel", 937 "delegate": { 938 "isDefaultGateway": true 939 } 940 } 941 EOF 942 fi 943 } 944 945 function start_addons { 946 echo "Waiting for Kubernetes API..." 947 until curl --silent "http://127.0.0.1:8080/version" 948 do 949 sleep 5 950 done 951 echo 952 echo "K8S: DNS addon" 953 curl --silent -H "Content-Type: application/json" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-rc.json)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/replicationcontrollers" > /dev/null 954 curl --silent -H "Content-Type: application/json" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-svc.json)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null 955 echo "K8S: Heapster addon" 956 curl --silent -H "Content-Type: application/json" -XPOST -d"$(cat /srv/kubernetes/manifests/heapster-de.json)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null 957 curl --silent -H "Content-Type: application/json" -XPOST -d"$(cat /srv/kubernetes/manifests/heapster-svc.json)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null 958 echo "K8S: Dashboard addon" 959 curl --silent -H "Content-Type: application/json" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dashboard-rc.json)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/replicationcontrollers" > /dev/null 960 curl --silent -H "Content-Type: application/json" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dashboard-svc.json)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null 961 } 962 963 function enable_calico_policy { 964 echo "Waiting for Kubernetes API..." 965 until curl --silent "http://127.0.0.1:8080/version" 966 do 967 sleep 5 968 done 969 echo 970 echo "K8S: Calico Policy" 971 curl --silent -H "Content-Type: application/json" -XPOST -d"$(cat /srv/kubernetes/manifests/calico-system.json)" "http://127.0.0.1:8080/api/v1/namespaces/" > /dev/null 972 } 973 974 init_config 975 init_templates 976 977 chmod +x /opt/bin/host-rkt 978 979 init_flannel 980 981 systemctl stop update-engine; systemctl mask update-engine 982 983 systemctl daemon-reload 984 985 if [ $CONTAINER_RUNTIME = "rkt" ]; then 986 systemctl enable load-rkt-stage1 987 systemctl enable rkt-api 988 fi 989 990 systemctl enable flanneld; systemctl start flanneld 991 systemctl enable kubelet; systemctl start kubelet 992 993 if [ $USE_CALICO = "true" ]; then 994 systemctl enable calico-node; systemctl start calico-node 995 enable_calico_policy 996 fi 997 998 start_addons 999 echo "DONE"`