github.com/yrj2011/jx-test-infra@v0.0.0-20190529031832-7a2065ee98eb/config/jobs/kubernetes/sig-network/ci-e2e-gce-netd.yaml (about) 1 presets: 2 - labels: 3 preset-kubernetes-e2e-enable-netd: "true" 4 env: 5 - name: KUBE_CUSTOM_NETD_YAML 6 # we want to keep extra spaces for the yaml file. 7 value: |2+ 8 kind: ClusterRole 9 apiVersion: rbac.authorization.k8s.io/v1 10 metadata: 11 name: netd 12 namespace: kube-system 13 labels: 14 kubernetes.io/cluster-service: "true" 15 addonmanager.kubernetes.io/mode: EnsureExists 16 rules: 17 - apiGroups: [""] 18 resources: ["nodes"] 19 verbs: ["get"] 20 --- 21 kind: ServiceAccount 22 apiVersion: v1 23 metadata: 24 name: netd 25 namespace: kube-system 26 labels: 27 kubernetes.io/cluster-service: "true" 28 addonmanager.kubernetes.io/mode: EnsureExists 29 --- 30 kind: ClusterRoleBinding 31 apiVersion: rbac.authorization.k8s.io/v1 32 metadata: 33 name: netd 34 labels: 35 kubernetes.io/cluster-service: "true" 36 addonmanager.kubernetes.io/mode: EnsureExists 37 roleRef: 38 apiGroup: rbac.authorization.k8s.io 39 kind: ClusterRole 40 name: netd 41 subjects: 42 - kind: ServiceAccount 43 name: netd 44 namespace: kube-system 45 --- 46 kind: ConfigMap 47 apiVersion: v1 48 metadata: 49 name: netd-config 50 namespace: kube-system 51 labels: 52 addonmanager.kubernetes.io/mode: EnsureExists 53 data: 54 cni_spec_template: |- 55 { 56 "name": "k8s-pod-network", 57 "cniVersion": "0.3.1", 58 "plugins": [ 59 { 60 "type": "ptp", 61 "mtu": 1460, 62 "ipam": { 63 "type": "host-local", 64 "ranges": [ 65 [ 66 {"subnet": podCidr} 67 ] 68 ], 69 "routes": [ 70 {"dst": "0.0.0.0/0"} 71 ] 72 } 73 }, 74 { 75 "type": "portmap", 76 "capabilities": { 77 "portMappings": true 78 }, 79 "noSnat": true 80 } 81 ] 82 } 83 cni_spec_name: "10-k8s-ptp.conflist" 84 enable_policy_routing: "true" 85 enable_masquerade: "true" 86 enable_calico_network_policy: "false" 87 --- 88 kind: DaemonSet 89 apiVersion: extensions/v1beta1 90 metadata: 91 name: netd 92 namespace: kube-system 93 labels: 94 k8s-app: netd 95 addonmanager.kubernetes.io/mode: EnsureExists 96 spec: 97 selector: 98 matchLabels: 99 k8s-app: netd 100 template: 101 metadata: 102 labels: 103 k8s-app: netd 104 spec: 105 priorityClassName: system-node-critical 106 serviceAccountName: netd 107 terminationGracePeriodSeconds: 0 108 nodeSelector: 109 beta.kubernetes.io/kube-netd-ready: "true" 110 hostNetwork: true 111 initContainers: 112 - image: gcr.io/google-containers/netd-amd64:0.1.2 113 name: install-cni 114 command: ["sh", "/install-cni.sh"] 115 env: 116 - name: CNI_SPEC_TEMPLATE 117 valueFrom: 118 configMapKeyRef: 119 name: netd-config 120 key: cni_spec_template 121 - name: CNI_SPEC_NAME 122 valueFrom: 123 configMapKeyRef: 124 name: netd-config 125 key: cni_spec_name 126 - name: ENABLE_CALICO_NETWORK_POLICY 127 valueFrom: 128 configMapKeyRef: 129 name: netd-config 130 key: enable_calico_network_policy 131 volumeMounts: 132 - mountPath: /host/etc/cni/net.d 133 name: cni-net-dir 134 containers: 135 - image: gcr.io/google-containers/netd-amd64:0.1.2 136 name: netd 137 imagePullPolicy: Always 138 securityContext: 139 privileged: true 140 capabilities: 141 add: ["NET_ADMIN"] 142 args: 143 - --enable-policy-routing=$(ENABLE_POLICY_ROUTING) 144 - --enable-masquerade=$(ENABLE_MASQUERADE) 145 - --logtostderr 146 env: 147 - name: ENABLE_POLICY_ROUTING 148 valueFrom: 149 configMapKeyRef: 150 name: netd-config 151 key: enable_policy_routing 152 - name: ENABLE_MASQUERADE 153 valueFrom: 154 configMapKeyRef: 155 name: netd-config 156 key: enable_masquerade 157 volumes: 158 - name: cni-net-dir 159 hostPath: 160 path: /etc/cni/net.d 161 - name: KUBE_UP_AUTOMATIC_CLEANUP 162 value: true 163 - name: KUBE_GCE_ENABLE_IP_ALIASES 164 value: true 165 - name: KUBE_ENABLE_NETD 166 value: true 167 168 - labels: 169 preset-kubernetes-e2e-enable-netd-calico: "true" 170 env: 171 - name: KUBE_CUSTOM_NETD_YAML 172 # we want to keep extra spaces for the yaml file. 173 value: |2+ 174 kind: ClusterRole 175 apiVersion: rbac.authorization.k8s.io/v1 176 metadata: 177 name: netd 178 namespace: kube-system 179 labels: 180 kubernetes.io/cluster-service: "true" 181 addonmanager.kubernetes.io/mode: EnsureExists 182 rules: 183 - apiGroups: [""] 184 resources: ["nodes"] 185 verbs: ["get"] 186 --- 187 kind: ServiceAccount 188 apiVersion: v1 189 metadata: 190 name: netd 191 namespace: kube-system 192 labels: 193 kubernetes.io/cluster-service: "true" 194 addonmanager.kubernetes.io/mode: EnsureExists 195 --- 196 kind: ClusterRoleBinding 197 apiVersion: rbac.authorization.k8s.io/v1 198 metadata: 199 name: netd 200 labels: 201 kubernetes.io/cluster-service: "true" 202 addonmanager.kubernetes.io/mode: EnsureExists 203 roleRef: 204 apiGroup: rbac.authorization.k8s.io 205 kind: ClusterRole 206 name: netd 207 subjects: 208 - kind: ServiceAccount 209 name: netd 210 namespace: kube-system 211 --- 212 kind: ConfigMap 213 apiVersion: v1 214 metadata: 215 name: netd-config 216 namespace: kube-system 217 labels: 218 addonmanager.kubernetes.io/mode: EnsureExists 219 data: 220 cni_spec_template: |- 221 { 222 "name": "k8s-pod-network", 223 "cniVersion": "0.3.1", 224 "plugins": [ 225 { 226 "type": "ptp", 227 "mtu": 1460, 228 "ipam": { 229 "type": "host-local", 230 "ranges": [ 231 [ 232 {"subnet": podCidr} 233 ] 234 ], 235 "routes": [ 236 {"dst": "0.0.0.0/0"} 237 ] 238 } 239 }, 240 { 241 "type": "portmap", 242 "capabilities": { 243 "portMappings": true 244 }, 245 "noSnat": true 246 } 247 ] 248 } 249 cni_spec_name: "10-k8s-ptp.conflist" 250 enable_policy_routing: "true" 251 enable_masquerade: "true" 252 enable_calico_network_policy: "true" 253 --- 254 kind: DaemonSet 255 apiVersion: extensions/v1beta1 256 metadata: 257 name: netd 258 namespace: kube-system 259 labels: 260 k8s-app: netd 261 addonmanager.kubernetes.io/mode: EnsureExists 262 spec: 263 selector: 264 matchLabels: 265 k8s-app: netd 266 template: 267 metadata: 268 labels: 269 k8s-app: netd 270 spec: 271 priorityClassName: system-node-critical 272 serviceAccountName: netd 273 terminationGracePeriodSeconds: 0 274 nodeSelector: 275 beta.kubernetes.io/kube-netd-ready: "true" 276 hostNetwork: true 277 initContainers: 278 - image: gcr.io/google-containers/netd-amd64:0.1.2 279 name: install-cni 280 command: ["sh", "/install-cni.sh"] 281 env: 282 - name: CNI_SPEC_TEMPLATE 283 valueFrom: 284 configMapKeyRef: 285 name: netd-config 286 key: cni_spec_template 287 - name: CNI_SPEC_NAME 288 valueFrom: 289 configMapKeyRef: 290 name: netd-config 291 key: cni_spec_name 292 - name: ENABLE_CALICO_NETWORK_POLICY 293 valueFrom: 294 configMapKeyRef: 295 name: netd-config 296 key: enable_calico_network_policy 297 volumeMounts: 298 - mountPath: /host/etc/cni/net.d 299 name: cni-net-dir 300 containers: 301 - image: gcr.io/google-containers/netd-amd64:0.1.2 302 name: netd 303 imagePullPolicy: Always 304 securityContext: 305 privileged: true 306 capabilities: 307 add: ["NET_ADMIN"] 308 args: 309 - --enable-policy-routing=$(ENABLE_POLICY_ROUTING) 310 - --enable-masquerade=$(ENABLE_MASQUERADE) 311 - --logtostderr 312 env: 313 - name: ENABLE_POLICY_ROUTING 314 valueFrom: 315 configMapKeyRef: 316 name: netd-config 317 key: enable_policy_routing 318 - name: ENABLE_MASQUERADE 319 valueFrom: 320 configMapKeyRef: 321 name: netd-config 322 key: enable_masquerade 323 volumes: 324 - name: cni-net-dir 325 hostPath: 326 path: /etc/cni/net.d 327 - name: KUBE_UP_AUTOMATIC_CLEANUP 328 value: true 329 - name: KUBE_GCE_ENABLE_IP_ALIASES 330 value: true 331 - name: KUBE_ENABLE_NETD 332 value: true 333 - name: KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML 334 value: |2+ 335 kind: DaemonSet 336 apiVersion: extensions/v1beta1 337 metadata: 338 name: calico-node 339 namespace: kube-system 340 labels: 341 kubernetes.io/cluster-service: "true" 342 addonmanager.kubernetes.io/mode: Reconcile 343 k8s-app: calico-node 344 spec: 345 selector: 346 matchLabels: 347 k8s-app: calico-node 348 updateStrategy: 349 type: RollingUpdate 350 template: 351 metadata: 352 labels: 353 k8s-app: calico-node 354 annotations: 355 scheduler.alpha.kubernetes.io/critical-pod: '' 356 spec: 357 priorityClassName: system-node-critical 358 nodeSelector: 359 projectcalico.org/ds-ready: "true" 360 hostNetwork: true 361 serviceAccountName: calico 362 # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force 363 # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. 364 terminationGracePeriodSeconds: 0 365 containers: 366 # Runs calico/node container on each Kubernetes node. This 367 # container programs network policy and routes on each 368 # host. 369 - name: calico-node 370 image: gcr.io/projectcalico-org/node:v3.1.3 371 env: 372 - name: CALICO_DISABLE_FILE_LOGGING 373 value: "true" 374 - name: CALICO_NETWORKING_BACKEND 375 value: "none" 376 - name: DATASTORE_TYPE 377 value: "kubernetes" 378 - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 379 value: "ACCEPT" 380 - name: FELIX_HEALTHENABLED 381 value: "true" 382 - name: FELIX_IPTABLESMANGLEALLOWACTION 383 value: "RETURN" 384 - name: FELIX_IPV6SUPPORT 385 value: "false" 386 - name: FELIX_LOGSEVERITYSYS 387 value: "none" 388 - name: FELIX_LOGSEVERITYSCREEN 389 value: "info" 390 - name: FELIX_IGNORELOOSERPF 391 value: "true" 392 - name: FELIX_PROMETHEUSMETRICSENABLED 393 value: "true" 394 - name: FELIX_REPORTINGINTERVALSECS 395 value: "0" 396 - name: FELIX_TYPHAK8SSERVICENAME 397 value: "calico-typha" 398 - name: IP 399 value: "" 400 - name: NO_DEFAULT_POOLS 401 value: "true" 402 - name: NODENAME 403 valueFrom: 404 fieldRef: 405 fieldPath: spec.nodeName 406 - name: WAIT_FOR_DATASTORE 407 value: "true" 408 securityContext: 409 privileged: true 410 livenessProbe: 411 httpGet: 412 path: /liveness 413 port: 9099 414 periodSeconds: 10 415 initialDelaySeconds: 10 416 failureThreshold: 6 417 readinessProbe: 418 httpGet: 419 path: /readiness 420 port: 9099 421 periodSeconds: 10 422 volumeMounts: 423 - mountPath: /lib/modules 424 name: lib-modules 425 readOnly: true 426 - mountPath: /etc/calico 427 name: etc-calico 428 readOnly: true 429 - mountPath: /var/run/calico 430 name: var-run-calico 431 readOnly: false 432 - mountPath: /var/lib/calico 433 name: var-lib-calico 434 readOnly: false 435 # This container installs the Calico CNI binaries 436 # and CNI network config file on each node. 437 - name: install-cni 438 image: gcr.io/projectcalico-org/cni:v3.1.3 439 command: ["/install-cni.sh"] 440 env: 441 - name: CNI_CONF_NAME 442 value: "10-calico.conflist" 443 - name: CNI_NETWORK_CONFIG 444 value: |- 445 { 446 "name": "k8s-pod-network", 447 "cniVersion": "0.3.0", 448 "plugins": [ 449 { 450 "type": "calico", 451 "mtu": 1460, 452 "log_level": "debug", 453 "datastore_type": "kubernetes", 454 "nodename": "__KUBERNETES_NODE_NAME__", 455 "ipam": { 456 "type": "host-local", 457 "subnet": "usePodCidr" 458 }, 459 "policy": { 460 "type": "k8s", 461 "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" 462 }, 463 "kubernetes": { 464 "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", 465 "kubeconfig": "__KUBECONFIG_FILEPATH__" 466 } 467 }, 468 { 469 "type": "portmap", 470 "capabilities": {"portMappings": true}, 471 "snat": true 472 } 473 ] 474 } 475 - name: KUBERNETES_NODE_NAME 476 valueFrom: 477 fieldRef: 478 fieldPath: spec.nodeName 479 volumeMounts: 480 - mountPath: /host/opt/cni/bin 481 name: cni-bin-dir 482 - mountPath: /host/etc/cni/net.d 483 name: cni-net-dir 484 volumes: 485 # Used to ensure proper kmods are installed. 486 - name: lib-modules 487 hostPath: 488 path: /lib/modules 489 # Mount in the Felix config file from the host. 490 - name: etc-calico 491 hostPath: 492 path: /etc/calico 493 # Used to install CNI binaries. 494 - name: cni-bin-dir 495 hostPath: 496 path: __CALICO_CNI_DIR__ 497 # Used to install CNI network config. 498 - name: cni-net-dir 499 hostPath: 500 path: /etc/cni/net.d 501 - name: var-run-calico 502 hostPath: 503 path: /var/run/calico 504 - name: var-lib-calico 505 hostPath: 506 path: /var/lib/calico 507 tolerations: 508 # Make sure calico/node gets scheduled on all nodes. 509 - effect: NoSchedule 510 operator: Exists 511 - effect: NoExecute 512 operator: Exists 513 - key: CriticalAddonsOnly 514 operator: Exists 515 - name: KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML 516 value: |2+ 517 apiVersion: extensions/v1beta1 518 kind: Deployment 519 metadata: 520 name: calico-typha 521 namespace: kube-system 522 labels: 523 kubernetes.io/cluster-service: "true" 524 addonmanager.kubernetes.io/mode: Reconcile 525 k8s-app: calico-typha 526 spec: 527 revisionHistoryLimit: 2 528 template: 529 metadata: 530 labels: 531 k8s-app: calico-typha 532 annotations: 533 scheduler.alpha.kubernetes.io/critical-pod: '' 534 spec: 535 priorityClassName: system-cluster-critical 536 tolerations: 537 - key: CriticalAddonsOnly 538 operator: Exists 539 hostNetwork: true 540 serviceAccountName: calico 541 containers: 542 - image: gcr.io/projectcalico-org/typha:v0.7.4 543 name: calico-typha 544 ports: 545 - containerPort: 5473 546 name: calico-typha 547 protocol: TCP 548 env: 549 - name: TYPHA_LOGFILEPATH 550 value: "none" 551 - name: TYPHA_LOGSEVERITYSYS 552 value: "none" 553 - name: TYPHA_LOGSEVERITYSCREEN 554 value: "info" 555 - name: TYPHA_PROMETHEUSMETRICSENABLED 556 value: "true" 557 - name: TYPHA_CONNECTIONREBALANCINGMODE 558 value: "kubernetes" 559 - name: TYPHA_REPORTINGINTERVALSECS 560 value: "0" 561 - name: TYPHA_PROMETHEUSMETRICSPORT 562 value: "9093" 563 - name: TYPHA_DATASTORETYPE 564 value: "kubernetes" 565 - name: TYPHA_MAXCONNECTIONSLOWERLIMIT 566 value: "1" 567 - name: TYPHA_HEALTHENABLED 568 value: "true" 569 volumeMounts: 570 - mountPath: /etc/calico 571 name: etc-calico 572 readOnly: true 573 livenessProbe: 574 httpGet: 575 path: /liveness 576 port: 9098 577 periodSeconds: 30 578 initialDelaySeconds: 30 579 readinessProbe: 580 httpGet: 581 path: /readiness 582 port: 9098 583 periodSeconds: 10 584 volumes: 585 - name: etc-calico 586 hostPath: 587 path: /etc/calico 588 589 periodics: 590 - interval: 2h 591 agent: kubernetes 592 name: ci-kubernetes-e2e-gci-gce-netd 593 labels: 594 preset-service-account: "true" 595 preset-k8s-ssh: "true" 596 preset-kubernetes-e2e-enable-netd: "true" 597 spec: 598 containers: 599 - args: 600 - "--timeout=340" 601 - "--repo=k8s.io/kubernetes=release-1.11" 602 - "--repo=k8s.io/examples=master" 603 - --scenario=kubernetes_e2e 604 - -- 605 - --extract=ci/latest 606 - --check-leaked-resources 607 - --gcp-node-image=gci 608 - --gcp-zone=asia-southeast1-a 609 - --provider=gce 610 - --test_args=--ginkgo.focus=\[sig-network\] --ginkgo.skip=\[Slow\]|\[Flaky\]|\[Feature:NetworkPolicy\]|\[Feature:Networking-IPv6\] --minStartupPods=8 611 - --timeout=120m 612 image: gcr.io/k8s-testimages/kubekins-e2e:v20180725-795cceb4c-master 613 614 - interval: 2h 615 agent: kubernetes 616 name: ci-kubernetes-e2e-gci-gce-netd-calico 617 labels: 618 preset-service-account: "true" 619 preset-k8s-ssh: "true" 620 preset-kubernetes-e2e-enable-netd-calico: "true" 621 spec: 622 containers: 623 - args: 624 - "--timeout=340" 625 - "--repo=k8s.io/examples=master" 626 - --scenario=kubernetes_e2e 627 - -- 628 - --extract=gs://koonwah-repo-pub/devel/v1.11.2-beta.0.20+39d0b71432c167 629 - --check-leaked-resources 630 - --gcp-node-image=gci 631 - --gcp-zone=asia-southeast1-a 632 - --provider=gce 633 - --test_args=--ginkgo.focus=\[sig-network\] --ginkgo.skip=\[Slow\]|\[Flaky\]|\[Feature:Networking-IPv6\] --minStartupPods=8 634 - --timeout=120m 635 image: gcr.io/k8s-testimages/kubekins-e2e:v20180725-795cceb4c-master 636 env: 637 - name: NETWORK_POLICY_PROVIDER 638 value: calico 639 640 - interval: 2h 641 agent: kubernetes 642 name: ci-kubernetes-e2e-ubuntu-gce-netd 643 labels: 644 preset-service-account: "true" 645 preset-k8s-ssh: "true" 646 preset-kubernetes-e2e-enable-netd: "true" 647 spec: 648 containers: 649 - args: 650 - "--timeout=340" 651 - "--repo=k8s.io/kubernetes=release-1.11" 652 - "--repo=k8s.io/examples=master" 653 - --scenario=kubernetes_e2e 654 - -- 655 - --extract=ci/latest 656 - --check-leaked-resources 657 - --image-family=ubuntu-gke-1604-lts 658 - --image-project=ubuntu-os-gke-cloud 659 - --gcp-zone=asia-southeast1-a 660 - --provider=gce 661 - --test_args=--ginkgo.focus=\[sig-network\] --ginkgo.skip=\[Slow\]|\[Flaky\]|\[Feature:NetworkPolicy\]|\[Feature:Networking-IPv6\] --minStartupPods=8 662 - --timeout=120m 663 image: gcr.io/k8s-testimages/kubekins-e2e:v20180725-795cceb4c-master 664 665 - interval: 2h 666 agent: kubernetes 667 name: ci-kubernetes-e2e-ubuntu-gce-netd-calico 668 labels: 669 preset-service-account: "true" 670 preset-k8s-ssh: "true" 671 preset-kubernetes-e2e-enable-netd-calico: "true" 672 spec: 673 containers: 674 - args: 675 - "--timeout=340" 676 - --scenario=kubernetes_e2e 677 - "--repo=k8s.io/examples=master" 678 - -- 679 - --extract=gs://koonwah-repo-pub/devel/v1.11.2-beta.0.20+39d0b71432c167 680 - --check-leaked-resources 681 - --image-family=ubuntu-gke-1604-lts 682 - --image-project=ubuntu-os-gke-cloud 683 - --gcp-zone=asia-southeast1-a 684 - --provider=gce 685 - --test_args=--ginkgo.focus=\[sig-network\] --ginkgo.skip=\[Slow\]|\[Flaky\]|\[Feature:Networking-IPv6\] --minStartupPods=8 686 - --timeout=120m 687 image: gcr.io/k8s-testimages/kubekins-e2e:v20180725-795cceb4c-master 688 env: 689 - name: NETWORK_POLICY_PROVIDER 690 value: calico