github.com/munnerz/test-infra@v0.0.0-20190108210205-ce3d181dc989/config/jobs/kubernetes/sig-network/ci-e2e-gce-netd.yaml (about) 1 presets: 2 - labels: 3 preset-kubernetes-e2e-enable-netd: "true" 4 env: 5 - name: KUBE_NON_MASTER_NODE_LABELS 6 value: cloud.google.com/gke-netd-ready=true 7 # remove this after https://github.com/kubernetes/kubernetes/pull/69051 8 - name: KUBE_CUSTOM_NETD_YAML 9 # we want to keep extra spaces for the yaml file. 10 value: |2+ 11 kind: ClusterRole 12 apiVersion: rbac.authorization.k8s.io/v1 13 metadata: 14 name: netd 15 namespace: kube-system 16 labels: 17 kubernetes.io/cluster-service: "true" 18 addonmanager.kubernetes.io/mode: Reconcile 19 rules: 20 - apiGroups: [""] 21 resources: ["nodes"] 22 verbs: ["get"] 23 --- 24 kind: ServiceAccount 25 apiVersion: v1 26 metadata: 27 name: netd 28 namespace: kube-system 29 labels: 30 kubernetes.io/cluster-service: "true" 31 addonmanager.kubernetes.io/mode: Reconcile 32 --- 33 kind: ClusterRoleBinding 34 apiVersion: rbac.authorization.k8s.io/v1 35 metadata: 36 name: netd 37 labels: 38 kubernetes.io/cluster-service: "true" 39 addonmanager.kubernetes.io/mode: Reconcile 40 roleRef: 41 apiGroup: rbac.authorization.k8s.io 42 kind: ClusterRole 43 name: netd 44 subjects: 45 - kind: ServiceAccount 46 name: netd 47 namespace: kube-system 48 --- 49 kind: ConfigMap 50 apiVersion: v1 51 metadata: 52 name: netd-config 53 namespace: kube-system 54 labels: 55 addonmanager.kubernetes.io/mode: Reconcile 56 data: 57 cni_spec_template: |- 58 { 59 "name": "gke-pod-network", 60 "cniVersion": "0.3.1", 61 "plugins": [ 62 { 63 "type": "ptp", 64 "mtu": 1460, 65 "ipam": { 66 "type": "host-local", 67 "ranges": [ 68 @ipv4Subnet@ipv6SubnetOptional 69 ], 70 "routes": [ 71 {"dst": "0.0.0.0/0"}@ipv6RouteOptional 72 ] 73 } 74 }, 75 { 76 "type": "portmap", 77 "capabilities": { 78 "portMappings": true 79 }, 80 "noSnat": true 81 } 82 ] 83 } 84 cni_spec_name: "10-gke-ptp.conflist" 85 enable_policy_routing: "true" 86 enable_masquerade: "true" 87 enable_calico_network_policy: "false" 88 enable_private_ipv6_access: "false" 89 reconcile_interval_seconds: "60s" 90 --- 91 kind: DaemonSet 92 apiVersion: extensions/v1beta1 93 metadata: 94 name: netd 95 namespace: kube-system 96 labels: 97 k8s-app: netd 98 addonmanager.kubernetes.io/mode: Reconcile 99 spec: 100 selector: 101 matchLabels: 102 k8s-app: netd 103 updateStrategy: 104 type: RollingUpdate 105 rollingUpdate: 106 maxUnavailable: 10% 107 template: 108 metadata: 109 labels: 110 k8s-app: netd 111 spec: 112 priorityClassName: system-node-critical 113 serviceAccountName: netd 114 terminationGracePeriodSeconds: 0 115 nodeSelector: 116 cloud.google.com/gke-netd-ready: "true" 117 tolerations: 118 - operator: "Exists" 119 effect: "NoExecute" 120 - operator: "Exists" 121 effect: "NoSchedule" 122 hostNetwork: true 123 initContainers: 124 - image: gcr.io/google-containers/netd-amd64:latest 125 name: install-cni 126 command: ["sh", "/install-cni.sh"] 127 env: 128 - name: CNI_SPEC_TEMPLATE 129 valueFrom: 130 configMapKeyRef: 131 name: netd-config 132 key: cni_spec_template 133 - name: CNI_SPEC_NAME 134 valueFrom: 135 configMapKeyRef: 136 name: netd-config 137 key: cni_spec_name 138 - name: ENABLE_CALICO_NETWORK_POLICY 139 valueFrom: 140 configMapKeyRef: 141 name: netd-config 142 key: enable_calico_network_policy 143 - name: ENABLE_PRIVATE_IPV6_ACCESS 144 valueFrom: 145 configMapKeyRef: 146 name: netd-config 147 key: enable_private_ipv6_access 148 volumeMounts: 149 - mountPath: /host/etc/cni/net.d 150 name: cni-net-dir 151 containers: 152 - image: gcr.io/google-containers/netd-amd64:latest 153 name: netd 154 imagePullPolicy: Always 155 securityContext: 156 privileged: true 157 capabilities: 158 add: ["NET_ADMIN"] 159 args: 160 - --enable-policy-routing=$(ENABLE_POLICY_ROUTING) 161 - --enable-masquerade=$(ENABLE_MASQUERADE) 162 - --logtostderr 163 - --reconcile-interval-seconds=$(RECONCILE_INTERVAL_SECONDS) 164 env: 165 - name: ENABLE_POLICY_ROUTING 166 valueFrom: 167 configMapKeyRef: 168 name: netd-config 169 key: enable_policy_routing 170 - name: ENABLE_MASQUERADE 171 valueFrom: 172 configMapKeyRef: 173 name: netd-config 174 key: enable_masquerade 175 - name: RECONCILE_INTERVAL_SECONDS 176 valueFrom: 177 configMapKeyRef: 178 name: netd-config 179 key: reconcile_interval_seconds 180 volumes: 181 - name: cni-net-dir 182 hostPath: 183 path: /etc/cni/net.d 184 - name: KUBE_UP_AUTOMATIC_CLEANUP 185 value: true 186 - name: KUBE_GCE_ENABLE_IP_ALIASES 187 value: true 188 - name: KUBE_ENABLE_NETD 189 value: true 190 191 - labels: 192 preset-kubernetes-e2e-enable-netd-calico: "true" 193 env: 194 - name: KUBE_NON_MASTER_NODE_LABELS 195 value: cloud.google.com/gke-netd-ready=true 196 # remove this after https://github.com/kubernetes/kubernetes/pull/69051 197 - name: KUBE_CUSTOM_NETD_YAML 198 # we want to keep extra spaces for the yaml file. 199 value: |2+ 200 kind: ClusterRole 201 apiVersion: rbac.authorization.k8s.io/v1 202 metadata: 203 name: netd 204 namespace: kube-system 205 labels: 206 kubernetes.io/cluster-service: "true" 207 addonmanager.kubernetes.io/mode: Reconcile 208 rules: 209 - apiGroups: [""] 210 resources: ["nodes"] 211 verbs: ["get"] 212 --- 213 kind: ServiceAccount 214 apiVersion: v1 215 metadata: 216 name: netd 217 namespace: kube-system 218 labels: 219 kubernetes.io/cluster-service: "true" 220 addonmanager.kubernetes.io/mode: Reconcile 221 --- 222 kind: ClusterRoleBinding 223 apiVersion: rbac.authorization.k8s.io/v1 224 metadata: 225 name: netd 226 labels: 227 kubernetes.io/cluster-service: "true" 228 addonmanager.kubernetes.io/mode: Reconcile 229 roleRef: 230 apiGroup: rbac.authorization.k8s.io 231 kind: ClusterRole 232 name: netd 233 subjects: 234 - kind: ServiceAccount 235 name: netd 236 namespace: kube-system 237 --- 238 kind: ConfigMap 239 apiVersion: v1 240 metadata: 241 name: netd-config 242 namespace: kube-system 243 labels: 244 addonmanager.kubernetes.io/mode: Reconcile 245 data: 246 cni_spec_template: |- 247 { 248 "name": "gke-pod-network", 249 "cniVersion": "0.3.1", 250 "plugins": [ 251 { 252 "type": "ptp", 253 "mtu": 1460, 254 "ipam": { 255 "type": "host-local", 256 "ranges": [ 257 @ipv4Subnet@ipv6SubnetOptional 258 ], 259 "routes": [ 260 {"dst": "0.0.0.0/0"}@ipv6RouteOptional 261 ] 262 } 263 }, 264 { 265 "type": "portmap", 266 "capabilities": { 267 "portMappings": true 268 }, 269 "noSnat": true 270 } 271 ] 272 } 273 cni_spec_name: "10-gke-ptp.conflist" 274 enable_policy_routing: "true" 275 enable_masquerade: "true" 276 enable_calico_network_policy: "false" 277 enable_private_ipv6_access: "false" 278 reconcile_interval_seconds: "60s" 279 --- 280 kind: DaemonSet 281 apiVersion: extensions/v1beta1 282 metadata: 283 name: netd 284 namespace: kube-system 285 labels: 286 k8s-app: netd 287 addonmanager.kubernetes.io/mode: Reconcile 288 spec: 289 selector: 290 matchLabels: 291 k8s-app: netd 292 updateStrategy: 293 type: RollingUpdate 294 rollingUpdate: 295 maxUnavailable: 10% 296 template: 297 metadata: 298 labels: 299 k8s-app: netd 300 spec: 301 priorityClassName: system-node-critical 302 serviceAccountName: netd 303 terminationGracePeriodSeconds: 0 304 nodeSelector: 305 cloud.google.com/gke-netd-ready: "true" 306 tolerations: 307 - operator: "Exists" 308 effect: "NoExecute" 309 - operator: "Exists" 310 effect: "NoSchedule" 311 hostNetwork: true 312 initContainers: 313 - image: gcr.io/google-containers/netd-amd64:latest 314 name: install-cni 315 command: ["sh", "/install-cni.sh"] 316 env: 317 - name: CNI_SPEC_TEMPLATE 318 valueFrom: 319 configMapKeyRef: 320 name: netd-config 321 key: cni_spec_template 322 - name: CNI_SPEC_NAME 323 valueFrom: 324 configMapKeyRef: 325 name: netd-config 326 key: cni_spec_name 327 - name: ENABLE_CALICO_NETWORK_POLICY 328 valueFrom: 329 configMapKeyRef: 330 name: netd-config 331 key: enable_calico_network_policy 332 - name: ENABLE_PRIVATE_IPV6_ACCESS 333 valueFrom: 334 configMapKeyRef: 335 name: netd-config 336 key: enable_private_ipv6_access 337 volumeMounts: 338 - mountPath: /host/etc/cni/net.d 339 name: cni-net-dir 340 containers: 341 - image: gcr.io/google-containers/netd-amd64:latest 342 name: netd 343 imagePullPolicy: Always 344 securityContext: 345 privileged: true 346 capabilities: 347 add: ["NET_ADMIN"] 348 args: 349 - --enable-policy-routing=$(ENABLE_POLICY_ROUTING) 350 - --enable-masquerade=$(ENABLE_MASQUERADE) 351 - --logtostderr 352 - --reconcile-interval-seconds=$(RECONCILE_INTERVAL_SECONDS) 353 env: 354 - name: ENABLE_POLICY_ROUTING 355 valueFrom: 356 configMapKeyRef: 357 name: netd-config 358 key: enable_policy_routing 359 - name: ENABLE_MASQUERADE 360 valueFrom: 361 configMapKeyRef: 362 name: netd-config 363 key: enable_masquerade 364 - name: RECONCILE_INTERVAL_SECONDS 365 valueFrom: 366 configMapKeyRef: 367 name: netd-config 368 key: reconcile_interval_seconds 369 volumes: 370 - name: cni-net-dir 371 hostPath: 372 path: /etc/cni/net.d 373 - name: KUBE_UP_AUTOMATIC_CLEANUP 374 value: true 375 - name: KUBE_GCE_ENABLE_IP_ALIASES 376 value: true 377 - name: KUBE_ENABLE_NETD 378 value: true 379 - name: KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML 380 value: |2+ 381 kind: DaemonSet 382 apiVersion: extensions/v1beta1 383 metadata: 384 name: calico-node 385 namespace: kube-system 386 labels: 387 kubernetes.io/cluster-service: "true" 388 addonmanager.kubernetes.io/mode: Reconcile 389 k8s-app: calico-node 390 spec: 391 selector: 392 matchLabels: 393 k8s-app: calico-node 394 updateStrategy: 395 type: RollingUpdate 396 template: 397 metadata: 398 labels: 399 k8s-app: calico-node 400 annotations: 401 scheduler.alpha.kubernetes.io/critical-pod: '' 402 spec: 403 priorityClassName: system-node-critical 404 nodeSelector: 405 projectcalico.org/ds-ready: "true" 406 hostNetwork: true 407 serviceAccountName: calico 408 # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force 409 # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. 410 terminationGracePeriodSeconds: 0 411 containers: 412 # Runs calico/node container on each Kubernetes node. This 413 # container programs network policy and routes on each 414 # host. 415 - name: calico-node 416 image: gcr.io/projectcalico-org/node:v3.1.3 417 env: 418 - name: CALICO_DISABLE_FILE_LOGGING 419 value: "true" 420 - name: CALICO_NETWORKING_BACKEND 421 value: "none" 422 - name: DATASTORE_TYPE 423 value: "kubernetes" 424 - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 425 value: "ACCEPT" 426 - name: FELIX_HEALTHENABLED 427 value: "true" 428 - name: FELIX_IPTABLESMANGLEALLOWACTION 429 value: "RETURN" 430 - name: FELIX_IPV6SUPPORT 431 value: "false" 432 - name: FELIX_LOGSEVERITYSYS 433 value: "none" 434 - name: FELIX_LOGSEVERITYSCREEN 435 value: "info" 436 - name: FELIX_IGNORELOOSERPF 437 value: "true" 438 - name: FELIX_PROMETHEUSMETRICSENABLED 439 value: "true" 440 - name: FELIX_REPORTINGINTERVALSECS 441 value: "0" 442 - name: FELIX_TYPHAK8SSERVICENAME 443 value: "calico-typha" 444 - name: IP 445 value: "" 446 - name: NO_DEFAULT_POOLS 447 value: "true" 448 - name: NODENAME 449 valueFrom: 450 fieldRef: 451 fieldPath: spec.nodeName 452 - name: WAIT_FOR_DATASTORE 453 value: "true" 454 securityContext: 455 privileged: true 456 livenessProbe: 457 httpGet: 458 path: /liveness 459 port: 9099 460 periodSeconds: 10 461 initialDelaySeconds: 10 462 failureThreshold: 6 463 readinessProbe: 464 httpGet: 465 path: /readiness 466 port: 9099 467 periodSeconds: 10 468 volumeMounts: 469 - mountPath: /lib/modules 470 name: lib-modules 471 readOnly: true 472 - mountPath: /etc/calico 473 name: etc-calico 474 readOnly: true 475 - mountPath: /var/run/calico 476 name: var-run-calico 477 readOnly: false 478 - mountPath: /var/lib/calico 479 name: var-lib-calico 480 readOnly: false 481 # This container installs the Calico CNI binaries 482 # and CNI network config file on each node. 483 - name: install-cni 484 image: gcr.io/projectcalico-org/cni:v3.1.3 485 command: ["/install-cni.sh"] 486 env: 487 - name: CNI_CONF_NAME 488 value: "10-calico.conflist" 489 - name: CNI_NETWORK_CONFIG 490 value: |- 491 { 492 "name": "k8s-pod-network", 493 "cniVersion": "0.3.0", 494 "plugins": [ 495 { 496 "type": "calico", 497 "mtu": 1460, 498 "log_level": "debug", 499 "datastore_type": "kubernetes", 500 "nodename": "__KUBERNETES_NODE_NAME__", 501 "ipam": { 502 "type": "host-local", 503 "subnet": "usePodCidr" 504 }, 505 "policy": { 506 "type": "k8s", 507 "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" 508 }, 509 "kubernetes": { 510 "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", 511 "kubeconfig": "__KUBECONFIG_FILEPATH__" 512 } 513 }, 514 { 515 "type": "portmap", 516 "capabilities": {"portMappings": true}, 517 "snat": true 518 } 519 ] 520 } 521 - name: KUBERNETES_NODE_NAME 522 valueFrom: 523 fieldRef: 524 fieldPath: spec.nodeName 525 volumeMounts: 526 - mountPath: /host/opt/cni/bin 527 name: cni-bin-dir 528 - mountPath: /host/etc/cni/net.d 529 name: cni-net-dir 530 volumes: 531 # Used to ensure proper kmods are installed. 532 - name: lib-modules 533 hostPath: 534 path: /lib/modules 535 # Mount in the Felix config file from the host. 536 - name: etc-calico 537 hostPath: 538 path: /etc/calico 539 # Used to install CNI binaries. 540 - name: cni-bin-dir 541 hostPath: 542 path: __CALICO_CNI_DIR__ 543 # Used to install CNI network config. 544 - name: cni-net-dir 545 hostPath: 546 path: /etc/cni/net.d 547 - name: var-run-calico 548 hostPath: 549 path: /var/run/calico 550 - name: var-lib-calico 551 hostPath: 552 path: /var/lib/calico 553 tolerations: 554 # Make sure calico/node gets scheduled on all nodes. 555 - effect: NoSchedule 556 operator: Exists 557 - effect: NoExecute 558 operator: Exists 559 - key: CriticalAddonsOnly 560 operator: Exists 561 - name: KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML 562 value: |2+ 563 apiVersion: extensions/v1beta1 564 kind: Deployment 565 metadata: 566 name: calico-typha 567 namespace: kube-system 568 labels: 569 kubernetes.io/cluster-service: "true" 570 addonmanager.kubernetes.io/mode: Reconcile 571 k8s-app: calico-typha 572 spec: 573 revisionHistoryLimit: 2 574 template: 575 metadata: 576 labels: 577 k8s-app: calico-typha 578 annotations: 579 scheduler.alpha.kubernetes.io/critical-pod: '' 580 spec: 581 priorityClassName: system-cluster-critical 582 tolerations: 583 - key: CriticalAddonsOnly 584 operator: Exists 585 hostNetwork: true 586 serviceAccountName: calico 587 containers: 588 - image: gcr.io/projectcalico-org/typha:v0.7.4 589 name: calico-typha 590 ports: 591 - containerPort: 5473 592 name: calico-typha 593 protocol: TCP 594 env: 595 - name: TYPHA_LOGFILEPATH 596 value: "none" 597 - name: TYPHA_LOGSEVERITYSYS 598 value: "none" 599 - name: TYPHA_LOGSEVERITYSCREEN 600 value: "info" 601 - name: TYPHA_PROMETHEUSMETRICSENABLED 602 value: "true" 603 - name: TYPHA_CONNECTIONREBALANCINGMODE 604 value: "kubernetes" 605 - name: TYPHA_REPORTINGINTERVALSECS 606 value: "0" 607 - name: TYPHA_PROMETHEUSMETRICSPORT 608 value: "9093" 609 - name: TYPHA_DATASTORETYPE 610 value: "kubernetes" 611 - name: TYPHA_MAXCONNECTIONSLOWERLIMIT 612 value: "1" 613 - name: TYPHA_HEALTHENABLED 614 value: "true" 615 volumeMounts: 616 - mountPath: /etc/calico 617 name: etc-calico 618 readOnly: true 619 livenessProbe: 620 httpGet: 621 path: /liveness 622 port: 9098 623 periodSeconds: 30 624 initialDelaySeconds: 30 625 readinessProbe: 626 httpGet: 627 path: /readiness 628 port: 9098 629 periodSeconds: 10 630 volumes: 631 - name: etc-calico 632 hostPath: 633 path: /etc/calico 634 635 - labels: 636 preset-kubernetes-e2e-containerd: "true" 637 env: 638 - name: LOG_DUMP_SYSTEMD_SERVICES 639 value: containerd containerd-installation 640 - name: KUBE_MASTER_EXTRA_METADATA 641 value: user-data=/go/src/github.com/containerd/cri/test/e2e/master.yaml,containerd-configure-sh=/go/src/github.com/containerd/cri/cluster/gce/configure.sh,containerd-env=/workspace/test-infra/jobs/e2e_node/containerd/cri-master/env 642 - name: KUBE_NODE_EXTRA_METADATA 643 value: user-data=/go/src/github.com/containerd/cri/test/e2e/node.yaml,containerd-configure-sh=/go/src/github.com/containerd/cri/cluster/gce/configure.sh,containerd-env=/workspace/test-infra/jobs/e2e_node/containerd/cri-master/env 644 - name: KUBE_CONTAINER_RUNTIME 645 value: remote 646 - name: KUBE_CONTAINER_RUNTIME_ENDPOINT 647 value: unix:///run/containerd/containerd.sock 648 - name: KUBE_CONTAINER_RUNTIME_NAME 649 value: containerd 650 - name: KUBE_LOAD_IMAGE_COMMAND 651 value: /home/containerd/usr/local/bin/ctr cri load 652 - name: KUBELET_TEST_ARGS 653 value: --runtime-cgroups=/system.slice/containerd.service 654 655 - labels: 656 preset-kubernetes-e2e-enable-calico: "true" 657 env: 658 - name: KUBE_UP_AUTOMATIC_CLEANUP 659 value: true 660 - name: KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML 661 value: |2+ 662 kind: DaemonSet 663 apiVersion: extensions/v1beta1 664 metadata: 665 name: calico-node 666 namespace: kube-system 667 labels: 668 kubernetes.io/cluster-service: "true" 669 addonmanager.kubernetes.io/mode: Reconcile 670 k8s-app: calico-node 671 spec: 672 selector: 673 matchLabels: 674 k8s-app: calico-node 675 updateStrategy: 676 type: RollingUpdate 677 template: 678 metadata: 679 labels: 680 k8s-app: calico-node 681 annotations: 682 scheduler.alpha.kubernetes.io/critical-pod: '' 683 spec: 684 priorityClassName: system-node-critical 685 nodeSelector: 686 projectcalico.org/ds-ready: "true" 687 hostNetwork: true 688 serviceAccountName: calico 689 # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force 690 # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. 691 terminationGracePeriodSeconds: 0 692 containers: 693 # Runs calico/node container on each Kubernetes node. This 694 # container programs network policy and routes on each 695 # host. 696 - name: calico-node 697 image: gcr.io/projectcalico-org/node:v3.1.3 698 env: 699 - name: CALICO_DISABLE_FILE_LOGGING 700 value: "true" 701 - name: CALICO_NETWORKING_BACKEND 702 value: "none" 703 - name: DATASTORE_TYPE 704 value: "kubernetes" 705 - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 706 value: "ACCEPT" 707 - name: FELIX_HEALTHENABLED 708 value: "true" 709 - name: FELIX_IPTABLESMANGLEALLOWACTION 710 value: "RETURN" 711 - name: FELIX_IPV6SUPPORT 712 value: "false" 713 - name: FELIX_LOGSEVERITYSYS 714 value: "none" 715 - name: FELIX_LOGSEVERITYSCREEN 716 value: "info" 717 - name: FELIX_IGNORELOOSERPF 718 value: "true" 719 - name: FELIX_PROMETHEUSMETRICSENABLED 720 value: "true" 721 - name: FELIX_REPORTINGINTERVALSECS 722 value: "0" 723 - name: FELIX_TYPHAK8SSERVICENAME 724 value: "calico-typha" 725 - name: IP 726 value: "" 727 - name: NO_DEFAULT_POOLS 728 value: "true" 729 - name: NODENAME 730 valueFrom: 731 fieldRef: 732 fieldPath: spec.nodeName 733 - name: WAIT_FOR_DATASTORE 734 value: "true" 735 securityContext: 736 privileged: true 737 livenessProbe: 738 httpGet: 739 path: /liveness 740 port: 9099 741 periodSeconds: 10 742 initialDelaySeconds: 10 743 failureThreshold: 6 744 readinessProbe: 745 httpGet: 746 path: /readiness 747 port: 9099 748 periodSeconds: 10 749 volumeMounts: 750 - mountPath: /lib/modules 751 name: lib-modules 752 readOnly: true 753 - mountPath: /etc/calico 754 name: etc-calico 755 readOnly: true 756 - mountPath: /var/run/calico 757 name: var-run-calico 758 readOnly: false 759 - mountPath: /var/lib/calico 760 name: var-lib-calico 761 readOnly: false 762 # This container installs the Calico CNI binaries 763 # and CNI network config file on each node. 764 - name: install-cni 765 image: gcr.io/projectcalico-org/cni:v3.1.3 766 command: ["/install-cni.sh"] 767 env: 768 - name: CNI_CONF_NAME 769 value: "10-calico.conflist" 770 - name: CNI_NETWORK_CONFIG 771 value: |- 772 { 773 "name": "k8s-pod-network", 774 "cniVersion": "0.3.0", 775 "plugins": [ 776 { 777 "type": "calico", 778 "mtu": 1460, 779 "log_level": "debug", 780 "datastore_type": "kubernetes", 781 "nodename": "__KUBERNETES_NODE_NAME__", 782 "ipam": { 783 "type": "host-local", 784 "subnet": "usePodCidr" 785 }, 786 "policy": { 787 "type": "k8s", 788 "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" 789 }, 790 "kubernetes": { 791 "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", 792 "kubeconfig": "__KUBECONFIG_FILEPATH__" 793 } 794 }, 795 { 796 "type": "portmap", 797 "capabilities": {"portMappings": true}, 798 "snat": true 799 } 800 ] 801 } 802 - name: KUBERNETES_NODE_NAME 803 valueFrom: 804 fieldRef: 805 fieldPath: spec.nodeName 806 volumeMounts: 807 - mountPath: /host/opt/cni/bin 808 name: cni-bin-dir 809 - mountPath: /host/etc/cni/net.d 810 name: cni-net-dir 811 volumes: 812 # Used to ensure proper kmods are installed. 813 - name: lib-modules 814 hostPath: 815 path: /lib/modules 816 # Mount in the Felix config file from the host. 817 - name: etc-calico 818 hostPath: 819 path: /etc/calico 820 # Used to install CNI binaries. 821 - name: cni-bin-dir 822 hostPath: 823 path: __CALICO_CNI_DIR__ 824 # Used to install CNI network config. 825 - name: cni-net-dir 826 hostPath: 827 path: /etc/cni/net.d 828 - name: var-run-calico 829 hostPath: 830 path: /var/run/calico 831 - name: var-lib-calico 832 hostPath: 833 path: /var/lib/calico 834 tolerations: 835 # Make sure calico/node gets scheduled on all nodes. 836 - effect: NoSchedule 837 operator: Exists 838 - effect: NoExecute 839 operator: Exists 840 - key: CriticalAddonsOnly 841 operator: Exists 842 - name: KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML 843 value: |2+ 844 apiVersion: extensions/v1beta1 845 kind: Deployment 846 metadata: 847 name: calico-typha 848 namespace: kube-system 849 labels: 850 kubernetes.io/cluster-service: "true" 851 addonmanager.kubernetes.io/mode: Reconcile 852 k8s-app: calico-typha 853 spec: 854 revisionHistoryLimit: 2 855 template: 856 metadata: 857 labels: 858 k8s-app: calico-typha 859 annotations: 860 scheduler.alpha.kubernetes.io/critical-pod: '' 861 spec: 862 priorityClassName: system-cluster-critical 863 tolerations: 864 - key: CriticalAddonsOnly 865 operator: Exists 866 hostNetwork: true 867 serviceAccountName: calico 868 containers: 869 - image: gcr.io/projectcalico-org/typha:v0.7.4 870 name: calico-typha 871 ports: 872 - containerPort: 5473 873 name: calico-typha 874 protocol: TCP 875 env: 876 - name: TYPHA_LOGFILEPATH 877 value: "none" 878 - name: TYPHA_LOGSEVERITYSYS 879 value: "none" 880 - name: TYPHA_LOGSEVERITYSCREEN 881 value: "info" 882 - name: TYPHA_PROMETHEUSMETRICSENABLED 883 value: "true" 884 - name: TYPHA_CONNECTIONREBALANCINGMODE 885 value: "kubernetes" 886 - name: TYPHA_REPORTINGINTERVALSECS 887 value: "0" 888 - name: TYPHA_PROMETHEUSMETRICSPORT 889 value: "9093" 890 - name: TYPHA_DATASTORETYPE 891 value: "kubernetes" 892 - name: TYPHA_MAXCONNECTIONSLOWERLIMIT 893 value: "1" 894 - name: TYPHA_HEALTHENABLED 895 value: "true" 896 volumeMounts: 897 - mountPath: /etc/calico 898 name: etc-calico 899 readOnly: true 900 livenessProbe: 901 httpGet: 902 path: /liveness 903 port: 9098 904 periodSeconds: 30 905 initialDelaySeconds: 30 906 readinessProbe: 907 httpGet: 908 path: /readiness 909 port: 9098 910 periodSeconds: 10 911 volumes: 912 - name: etc-calico 913 hostPath: 914 path: /etc/calico 915 - name: NETWORK_POLICY_PROVIDER 916 value: calico 917 918 periodics: 919 - interval: 2h 920 name: ci-kubernetes-e2e-gci-gce-netd 921 labels: 922 preset-service-account: "true" 923 preset-k8s-ssh: "true" 924 preset-kubernetes-e2e-enable-netd: "true" 925 spec: 926 containers: 927 - args: 928 - "--timeout=180" 929 - --root=/go/src 930 - "--repo=k8s.io/kubernetes=release-1.11" 931 - "--repo=k8s.io/examples=master" 932 - --scenario=kubernetes_e2e 933 - -- 934 - --extract=gs://koonwah-repo-pub/devel/v1.11.2-beta.0.20+21777efd99bd59 935 - --check-leaked-resources 936 - --gcp-node-image=gci 937 - --gcp-zone=asia-southeast1-a 938 - --provider=gce 939 - --test_args=--ginkgo.focus=\[sig-network\] --ginkgo.skip=\[Slow\]|\[Flaky\]|\[Feature:NetworkPolicy\]|\[Feature:Networking-IPv6\]|\[Disruptive\]|\[Feature:ServiceLoadBalancer\]|\[Feature:PerformanceDNS\] --minStartupPods=8 940 - --timeout=120m 941 image: gcr.io/k8s-testimages/kubekins-e2e:v20181218-134e718ec-master 942 943 - interval: 2h 944 name: ci-kubernetes-e2e-gci-gce-netd-calico 945 labels: 946 preset-service-account: "true" 947 preset-k8s-ssh: "true" 948 preset-kubernetes-e2e-enable-netd-calico: "true" 949 spec: 950 containers: 951 - args: 952 - "--timeout=180" 953 - --root=/go/src 954 - "--repo=k8s.io/examples=master" 955 - --scenario=kubernetes_e2e 956 - -- 957 - --extract=gs://koonwah-repo-pub/devel/v1.11.2-beta.0.20+21777efd99bd59 958 - --check-leaked-resources 959 - --gcp-node-image=gci 960 - --gcp-zone=asia-southeast1-a 961 - --provider=gce 962 - --test_args=--ginkgo.focus=\[sig-network\] --ginkgo.skip=\[Slow\]|\[Flaky\]|\[Feature:Networking-IPv6\]|\[Disruptive\]|\[Feature:ServiceLoadBalancer\]|\[Feature:PerformanceDNS\] --minStartupPods=8 963 - --timeout=120m 964 image: gcr.io/k8s-testimages/kubekins-e2e:v20181218-134e718ec-master 965 env: 966 - name: NETWORK_POLICY_PROVIDER 967 value: calico 968 969 - interval: 2h 970 name: ci-kubernetes-e2e-ubuntu-gce-netd 971 labels: 972 preset-service-account: "true" 973 preset-k8s-ssh: "true" 974 preset-kubernetes-e2e-enable-netd: "true" 975 spec: 976 containers: 977 - args: 978 - "--timeout=180" 979 - --root=/go/src 980 - "--repo=k8s.io/kubernetes=release-1.11" 981 - "--repo=k8s.io/examples=master" 982 - --scenario=kubernetes_e2e 983 - -- 984 - --extract=gs://koonwah-repo-pub/devel/v1.11.2-beta.0.20+21777efd99bd59 985 - --check-leaked-resources 986 - --image-family=ubuntu-gke-1604-lts 987 - --image-project=ubuntu-os-gke-cloud 988 - --gcp-zone=asia-southeast1-a 989 - --provider=gce 990 - --test_args=--ginkgo.focus=\[sig-network\] --ginkgo.skip=\[Slow\]|\[Flaky\]|\[Feature:NetworkPolicy\]|\[Feature:Networking-IPv6\]|\[Disruptive\]|\[Feature:ServiceLoadBalancer\]|\[Feature:PerformanceDNS\] --minStartupPods=8 991 - --timeout=120m 992 image: gcr.io/k8s-testimages/kubekins-e2e:v20181218-134e718ec-master 993 994 - interval: 2h 995 name: ci-kubernetes-e2e-ubuntu-gce-netd-calico 996 labels: 997 preset-service-account: "true" 998 preset-k8s-ssh: "true" 999 preset-kubernetes-e2e-enable-netd-calico: "true" 1000 spec: 1001 containers: 1002 - args: 1003 - "--timeout=180" 1004 - --root=/go/src 1005 - --scenario=kubernetes_e2e 1006 - "--repo=k8s.io/examples=master" 1007 - -- 1008 - --extract=gs://koonwah-repo-pub/devel/v1.11.2-beta.0.20+21777efd99bd59 1009 - --check-leaked-resources 1010 - --image-family=ubuntu-gke-1604-lts 1011 - --image-project=ubuntu-os-gke-cloud 1012 - --gcp-zone=asia-southeast1-a 1013 - --provider=gce 1014 - --test_args=--ginkgo.focus=\[sig-network\] --ginkgo.skip=\[Slow\]|\[Flaky\]|\[Feature:Networking-IPv6\]|\[Disruptive\]|\[Feature:ServiceLoadBalancer\]|\[Feature:PerformanceDNS\] --minStartupPods=8 1015 - --timeout=120m 1016 image: gcr.io/k8s-testimages/kubekins-e2e:v20181218-134e718ec-master 1017 env: 1018 - name: NETWORK_POLICY_PROVIDER 1019 value: calico 1020 1021 - interval: 2h 1022 name: ci-kubernetes-e2e-containerd-gce-netd 1023 labels: 1024 preset-service-account: "true" 1025 preset-k8s-ssh: "true" 1026 preset-kubernetes-e2e-enable-netd: "true" 1027 preset-kubernetes-e2e-containerd: "true" 1028 spec: 1029 containers: 1030 - args: 1031 - "--timeout=180" 1032 - --root=/go/src 1033 - --repo=github.com/containerd/cri=master 1034 - "--repo=k8s.io/examples=master" 1035 - --scenario=kubernetes_e2e 1036 - -- 1037 - --check-leaked-resources 1038 - --env=KUBE_MASTER_EXTRA_METADATA=user-data=/go/src/github.com/containerd/cri/test/e2e/master.yaml,containerd-configure-sh=/go/src/github.com/containerd/cri/cluster/gce/configure.sh,containerd-env=/workspace/test-infra/jobs/e2e_node/containerd/containerd-master/env 1039 - --env=KUBE_NODE_EXTRA_METADATA=user-data=/go/src/github.com/containerd/cri/test/e2e/node.yaml,containerd-configure-sh=/go/src/github.com/containerd/cri/cluster/gce/configure.sh,containerd-env=/workspace/test-infra/jobs/e2e_node/containerd/containerd-master/env 1040 - --extract=gs://koonwah-repo-pub/devel/v1.11.2-beta.0.20+21777efd99bd59 1041 - --gcp-node-image=gci 1042 - --gcp-nodes=4 1043 - --gcp-zone=asia-southeast1-a 1044 - --ginkgo-parallel=30 1045 - --provider=gce 1046 - --test_args=--ginkgo.focus=\[sig-network\] --ginkgo.skip=\[Slow\]|\[Flaky\]|\[Feature:NetworkPolicy\]|\[Feature:Networking-IPv6\]|\[Disruptive\]|\[Feature:ServiceLoadBalancer\]|\[Feature:PerformanceDNS\] --minStartupPods=8 1047 - --timeout=120m 1048 image: gcr.io/k8s-testimages/kubekins-e2e:v20181218-134e718ec-master 1049 1050 - interval: 2h 1051 name: ci-kubernetes-e2e-containerd-gce-netd-calico 1052 labels: 1053 preset-service-account: "true" 1054 preset-k8s-ssh: "true" 1055 preset-kubernetes-e2e-containerd: "true" 1056 preset-kubernetes-e2e-enable-netd-calico: "true" 1057 spec: 1058 containers: 1059 - args: 1060 - "--timeout=180" 1061 - --root=/go/src 1062 - --repo=github.com/containerd/cri=master 1063 - "--repo=k8s.io/examples=master" 1064 - --scenario=kubernetes_e2e 1065 - -- 1066 - --check-leaked-resources 1067 - --env=KUBE_MASTER_EXTRA_METADATA=user-data=/go/src/github.com/containerd/cri/test/e2e/master.yaml,containerd-configure-sh=/go/src/github.com/containerd/cri/cluster/gce/configure.sh,containerd-env=/workspace/test-infra/jobs/e2e_node/containerd/containerd-master/env 1068 - --env=KUBE_NODE_EXTRA_METADATA=user-data=/go/src/github.com/containerd/cri/test/e2e/node.yaml,containerd-configure-sh=/go/src/github.com/containerd/cri/cluster/gce/configure.sh,containerd-env=/workspace/test-infra/jobs/e2e_node/containerd/containerd-master/env 1069 - --extract=gs://koonwah-repo-pub/devel/v1.11.2-beta.0.20+21777efd99bd59 1070 - --gcp-node-image=gci 1071 - --gcp-nodes=4 1072 - --gcp-zone=asia-southeast1-a 1073 - --ginkgo-parallel=30 1074 - --provider=gce 1075 - --test_args=--ginkgo.focus=\[sig-network\] --ginkgo.skip=\[Slow\]|\[Flaky\]|\[Feature:Networking-IPv6\]|\[Disruptive\]|\[Feature:ServiceLoadBalancer\]|\[Feature:PerformanceDNS\] --minStartupPods=8 1076 - --timeout=120m 1077 image: gcr.io/k8s-testimages/kubekins-e2e:v20181218-134e718ec-master 1078 env: 1079 - name: NETWORK_POLICY_PROVIDER 1080 value: calico 1081 1082 - interval: 2h 1083 name: ci-kubernetes-e2e-containerd-gce-calico 1084 labels: 1085 preset-service-account: "true" 1086 preset-k8s-ssh: "true" 1087 preset-kubernetes-e2e-containerd: "true" 1088 preset-kubernetes-e2e-enable-calico: "true" 1089 spec: 1090 containers: 1091 - args: 1092 - "--timeout=180" 1093 - --root=/go/src 1094 - --repo=github.com/containerd/cri=master 1095 - "--repo=k8s.io/examples=master" 1096 - --scenario=kubernetes_e2e 1097 - -- 1098 - --check-leaked-resources 1099 - --env=KUBE_MASTER_EXTRA_METADATA=user-data=/go/src/github.com/containerd/cri/test/e2e/master.yaml,containerd-configure-sh=/go/src/github.com/containerd/cri/cluster/gce/configure.sh,containerd-env=/workspace/test-infra/jobs/e2e_node/containerd/containerd-master/env 1100 - --env=KUBE_NODE_EXTRA_METADATA=user-data=/go/src/github.com/containerd/cri/test/e2e/node.yaml,containerd-configure-sh=/go/src/github.com/containerd/cri/cluster/gce/configure.sh,containerd-env=/workspace/test-infra/jobs/e2e_node/containerd/containerd-master/env 1101 - --extract=gs://koonwah-repo-pub/devel/v1.11.2-beta.0.20+21777efd99bd59 1102 - --gcp-node-image=gci 1103 - --gcp-nodes=4 1104 - --gcp-zone=asia-southeast1-a 1105 - --ginkgo-parallel=30 1106 - --provider=gce 1107 - --test_args=--ginkgo.focus=\[sig-network\] --ginkgo.skip=\[Slow\]|\[Flaky\]|\[Feature:Networking-IPv6\]|\[Disruptive\]|\[Feature:ServiceLoadBalancer\]|\[Feature:PerformanceDNS\] --minStartupPods=8 1108 - --timeout=120m 1109 image: gcr.io/k8s-testimages/kubekins-e2e:v20181218-134e718ec-master