k8s.io/test-infra@v0.0.0-20240520184403-27c6b4c223d8/config/jobs/kubernetes/sig-scalability/sig-scalability-adhoc.yaml (about) 1 # This file contains a PoC presubmit for providing community members ability 2 # to run ad-hoc 100 node scale tests. 3 4 # Please contact mm4tt@ if you're interested in using it. 5 # TODO(mm4tt): Create a mailing group with owners of this framework. 6 7 # Current reservation holder: pohly@ 8 9 presubmits: 10 kubernetes/perf-tests: 11 - name: pull-perf-tests-100-adhoc 12 cluster: k8s-infra-prow-build 13 always_run: false # This test needs to be triggered manually via `/test pull-perf-tests-100-adhoc` 14 max_concurrency: 1 # Keep at 1 until we figure out a proper reservation system. 15 skip_report: false # Report the status on github. 16 optional: true 17 branches: 18 - master 19 decorate: true 20 path_alias: k8s.io/perf-tests 21 decoration_config: 22 timeout: 120m 23 extra_refs: 24 - org: kubernetes 25 repo: release 26 base_ref: master 27 path_alias: k8s.io/release 28 labels: 29 preset-service-account: "true" 30 preset-k8s-ssh: "true" 31 preset-bazel-scratch-dir: "true" 32 preset-e2e-scalability-common: "true" 33 annotations: 34 testgrid-dashboards: presubmits-kubernetes-scalability 35 testgrid-tab-name: pull-perf-tests-100-adhoc 36 spec: 37 containers: 38 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 39 command: 40 - runner.sh 41 - /workspace/scenarios/kubernetes_e2e.py 42 args: 43 - --cluster= 44 - --extract=ci/latest 45 - --gcp-nodes=100 46 - --gcp-project-type=scalability-project 47 - --gcp-zone=us-east1-b 48 - --provider=gce 49 - --tear-down-previous 50 - # TODO(pohly@): Custom overrides, clean up after finishing the tests. 51 - --env=CONTROLLER_MANAGER_TEST_ARGS=--profiling --contention-profiling --kube-api-qps=1000 --kube-api-burst=1000 52 - --test=false 53 - --test-cmd=$GOPATH/src/k8s.io/perf-tests/adhoc/run-e2e-test.sh 54 - --timeout=100m 55 - --use-logexporter 56 - --logexporter-gcs-path=gs://sig-scalability-logs/$(JOB_NAME)/$(BUILD_ID) 57 # The resources are set to support a 100 node CL2 test. 58 resources: 59 limits: 60 cpu: 2 61 memory: "6Gi" 62 requests: 63 cpu: 2 64 memory: "6Gi"