k8s.io/test-infra@v0.0.0-20240520184403-27c6b4c223d8/config/jobs/kubernetes/sig-scalability/sig-scalability-periodic-jobs.yaml (about) 1 periodics: 2 - name: ci-kubernetes-e2e-gce-node-throughput 3 cluster: k8s-infra-prow-build 4 tags: 5 - "perfDashPrefix: docker-node-throughput" 6 - "perfDashJobType: throughput" 7 interval: 1h 8 labels: 9 preset-service-account: "true" 10 preset-k8s-ssh: "true" 11 preset-e2e-scalability-node: "true" 12 preset-e2e-scalability-periodics: "true" 13 preset-e2e-scalability-periodics-master: "true" 14 decorate: true 15 decoration_config: 16 timeout: 60m 17 extra_refs: 18 - org: kubernetes 19 repo: kubernetes 20 base_ref: master 21 path_alias: k8s.io/kubernetes 22 - org: kubernetes 23 repo: perf-tests 24 base_ref: master 25 path_alias: k8s.io/perf-tests 26 annotations: 27 testgrid-dashboards: sig-scalability-node 28 testgrid-tab-name: node-throughput 29 spec: 30 containers: 31 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 32 command: 33 - runner.sh 34 - /workspace/scenarios/kubernetes_e2e.py 35 args: 36 - --check-leaked-resources 37 - --cluster= 38 - --env=CONTAINER_IMAGE=registry-sandbox.k8s.io/pause:3.1 #TODO(ameukam): revert when registry.k8s.io is ready 39 - --extract=ci/latest 40 - --gcp-node-image=gci 41 - --gcp-nodes=1 42 - --provider=gce 43 - --test=false 44 - --test-cmd=$GOPATH/src/k8s.io/perf-tests/run-e2e.sh 45 - --test-cmd-args=cluster-loader2 46 - --test-cmd-args=--nodes=1 47 - --test-cmd-args=--provider=gce 48 - --test-cmd-args=--report-dir=$(ARTIFACTS) 49 - --test-cmd-args=--testconfig=testing/node-throughput/config.yaml 50 - --test-cmd-args=--testoverrides=./testing/overrides/node_docker.yaml 51 - --test-cmd-name=ClusterLoaderV2 52 - --timeout=40m 53 - --use-logexporter 54 - --logexporter-gcs-path=gs://sig-scalability-logs/$(JOB_NAME)/$(BUILD_ID) 55 resources: 56 requests: 57 cpu: 2 58 memory: "6Gi" 59 limits: 60 cpu: 2 61 memory: "6Gi" 62 63 - name: ci-kubernetes-e2e-gce-node-containerd-throughput 64 cluster: k8s-infra-prow-build 65 tags: 66 - "perfDashPrefix: containerd-node-throughput" 67 - "perfDashJobType: throughput" 68 interval: 1h 69 labels: 70 preset-service-account: "true" 71 preset-k8s-ssh: "true" 72 preset-e2e-scalability-node: "true" 73 preset-e2e-scalability-periodics: "true" 74 preset-e2e-scalability-periodics-master: "true" 75 decorate: true 76 decoration_config: 77 timeout: 60m 78 extra_refs: 79 - org: kubernetes 80 repo: kubernetes 81 base_ref: master 82 path_alias: k8s.io/kubernetes 83 - org: kubernetes 84 repo: perf-tests 85 base_ref: master 86 path_alias: k8s.io/perf-tests 87 annotations: 88 testgrid-dashboards: sig-scalability-node 89 testgrid-tab-name: node-containerd-throughput 90 spec: 91 containers: 92 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 93 command: 94 - runner.sh 95 - /workspace/scenarios/kubernetes_e2e.py 96 args: 97 - --check-leaked-resources 98 - --cluster= 99 - --env=CONTAINER_IMAGE=registry-sandbox.k8s.io/pause:3.1 #TODO(ameukam): revert when registry.k8s.io is ready 100 - --extract=ci/latest 101 - --gcp-node-image=gci 102 - --gcp-nodes=1 103 - --provider=gce 104 - --test=false 105 - --test-cmd=$GOPATH/src/k8s.io/perf-tests/run-e2e.sh 106 - --test-cmd-args=cluster-loader2 107 - --test-cmd-args=--nodes=1 108 - --test-cmd-args=--provider=gce 109 - --test-cmd-args=--report-dir=$(ARTIFACTS) 110 - --test-cmd-args=--testconfig=testing/node-throughput/config.yaml 111 - --test-cmd-args=--testoverrides=./testing/overrides/node_containerd.yaml 112 - --test-cmd-name=ClusterLoaderV2 113 - --timeout=40m 114 - --use-logexporter 115 - --logexporter-gcs-path=gs://sig-scalability-logs/$(JOB_NAME)/$(BUILD_ID) 116 resources: 117 requests: 118 cpu: 2 119 memory: "6Gi" 120 limits: 121 cpu: 2 122 memory: "6Gi" 123 124 #kubemark 125 - name: ci-kubernetes-kubemark-100-gce 126 tags: 127 - "perfDashPrefix: kubemark-100Nodes" 128 - "perfDashJobType: performance" 129 interval: 3h 130 cluster: k8s-infra-prow-build 131 labels: 132 preset-service-account: "true" 133 preset-k8s-ssh: "true" 134 preset-dind-enabled: "true" 135 preset-e2e-kubemark-common: "true" 136 preset-e2e-scalability-periodics: "true" 137 preset-e2e-scalability-periodics-master: "true" 138 decorate: true 139 decoration_config: 140 timeout: 260m 141 extra_refs: 142 - org: kubernetes 143 repo: kubernetes 144 base_ref: master 145 path_alias: k8s.io/kubernetes 146 - org: kubernetes 147 repo: perf-tests 148 base_ref: master 149 path_alias: k8s.io/perf-tests 150 annotations: 151 testgrid-alert-email: kubernetes-sig-scale@googlegroups.com, kubernetes-scalability-tickets@google.com 152 testgrid-dashboards: sig-scalability-kubemark 153 testgrid-tab-name: kubemark-100 154 testgrid-num-failures-to-alert: '2' 155 spec: 156 containers: 157 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 158 command: 159 - runner.sh 160 - /workspace/scenarios/kubernetes_e2e.py 161 args: 162 - --cluster=kubemark-100 163 - --extract=ci/latest 164 - --gcp-master-size=n2-standard-2 165 - --gcp-node-image=gci 166 - --gcp-node-size=e2-standard-4 167 - --gcp-nodes=4 168 - --gcp-project-type=scalability-project 169 - --gcp-zone=us-east1-b 170 - --kubemark 171 - --kubemark-nodes=100 172 - --kubemark-master-size=n2-standard-8 173 - --provider=gce 174 - --metadata-sources=cl2-metadata.json 175 - --env=KUBEMARK_APISERVER_TEST_ARGS=--max-requests-inflight=80 --max-mutating-requests-inflight=0 --profiling --contention-profiling 176 - --test=false 177 - --test_args=--ginkgo.focus=xxxx 178 - --test-cmd=$GOPATH/src/k8s.io/perf-tests/run-e2e.sh 179 - --test-cmd-args=cluster-loader2 180 - --test-cmd-args=--nodes=100 181 - --test-cmd-args=--prometheus-scrape-node-exporter=true 182 - --test-cmd-args=--provider=kubemark 183 - --test-cmd-args=--report-dir=$(ARTIFACTS) 184 - --test-cmd-args=--testconfig=testing/load/config.yaml 185 - --test-cmd-args=--testconfig=testing/huge-service/config.yaml 186 - --test-cmd-args=--testconfig=testing/access-tokens/config.yaml 187 - --test-cmd-args=--testoverrides=./testing/experiments/enable_restart_count_check.yaml 188 - --test-cmd-args=--testoverrides=./testing/experiments/use_simple_latency_query.yaml 189 - --test-cmd-args=--testoverrides=./testing/overrides/kubemark_load_throughput.yaml 190 - --test-cmd-name=ClusterLoaderV2 191 - --timeout=240m 192 - --use-logexporter 193 - --logexporter-gcs-path=gs://sig-scalability-logs/$(JOB_NAME)/$(BUILD_ID) 194 # docker-in-docker needs privileged mode 195 securityContext: 196 privileged: true 197 resources: 198 requests: 199 cpu: 2 200 memory: "6Gi" 201 limits: 202 cpu: 2 203 memory: "6Gi" 204 205 - name: ci-kubernetes-kubemark-100-gce-scheduler 206 cluster: k8s-infra-prow-build 207 tags: 208 - "perfDashPrefix: kubemark-100Nodes-scheduler" 209 - "perfDashJobType: performance" 210 interval: 24h 211 labels: 212 preset-service-account: "true" 213 preset-k8s-ssh: "true" 214 preset-dind-enabled: "true" 215 preset-e2e-kubemark-common: "true" 216 preset-e2e-scalability-periodics: "true" 217 preset-e2e-scalability-periodics-master: "true" 218 decorate: true 219 decoration_config: 220 timeout: 170m 221 extra_refs: 222 - org: kubernetes 223 repo: kubernetes 224 base_ref: master 225 path_alias: k8s.io/kubernetes 226 - org: kubernetes 227 repo: perf-tests 228 base_ref: master 229 path_alias: k8s.io/perf-tests 230 annotations: 231 testgrid-dashboards: sig-scalability-kubemark 232 testgrid-tab-name: kubemark-100-scheduler 233 testgrid-num-failures-to-alert: '1' 234 spec: 235 containers: 236 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 237 command: 238 - runner.sh 239 - /workspace/scenarios/kubernetes_e2e.py 240 args: 241 - --cluster=kubemark-100 242 - --extract=ci/latest 243 - --gcp-master-size=n2-standard-2 244 - --gcp-node-image=gci 245 - --gcp-node-size=e2-standard-4 246 - --gcp-nodes=4 247 - --gcp-project-type=scalability-project 248 - --gcp-zone=us-east1-b 249 - --kubemark 250 - --kubemark-nodes=100 251 - --kubemark-master-size=n2-standard-8 252 - --provider=gce 253 - --metadata-sources=cl2-metadata.json 254 - --test=false 255 - --test_args=--ginkgo.focus=xxxx 256 - --test-cmd=$GOPATH/src/k8s.io/perf-tests/run-e2e.sh 257 - --test-cmd-args=cluster-loader2 258 - --test-cmd-args=--nodes=100 259 - --test-cmd-args=--prometheus-scrape-node-exporter=true 260 - --test-cmd-args=--provider=kubemark 261 - --env=CL2_ENABLE_DNS_PROGRAMMING=true 262 - --env=KUBEMARK_APISERVER_TEST_ARGS=--max-requests-inflight=80 --max-mutating-requests-inflight=0 --profiling --contention-profiling 263 - --test-cmd-args=--report-dir=$(ARTIFACTS) 264 - --test-cmd-args=--testsuite=testing/density/scheduler-suite.yaml 265 - --test-cmd-args=--testoverrides=./testing/experiments/enable_restart_count_check.yaml 266 - --test-cmd-args=--testoverrides=./testing/experiments/use_simple_latency_query.yaml 267 - --test-cmd-name=ClusterLoaderV2 268 - --timeout=150m 269 - --use-logexporter 270 - --logexporter-gcs-path=gs://k8s-infra-scalability-tests-logs/$(JOB_NAME)/$(BUILD_ID) 271 # docker-in-docker needs privileged mode 272 securityContext: 273 privileged: true 274 resources: 275 requests: 276 cpu: 2 277 memory: "6Gi" 278 limits: 279 cpu: 2 280 memory: "6Gi" 281 282 - name: ci-kubernetes-kubemark-100-gce-scheduler-highqps 283 cluster: k8s-infra-prow-build 284 tags: 285 - "perfDashPrefix: kubemark-100Nodes-scheduler-highqps" 286 - "perfDashJobType: performance" 287 interval: 24h 288 labels: 289 preset-service-account: "true" 290 preset-k8s-ssh: "true" 291 preset-dind-enabled: "true" 292 preset-e2e-kubemark-common: "true" 293 preset-e2e-scalability-periodics: "true" 294 preset-e2e-scalability-periodics-master: "true" 295 decorate: true 296 decoration_config: 297 timeout: 170m 298 extra_refs: 299 - org: kubernetes 300 repo: kubernetes 301 base_ref: master 302 path_alias: k8s.io/kubernetes 303 - org: kubernetes 304 repo: perf-tests 305 base_ref: master 306 path_alias: k8s.io/perf-tests 307 annotations: 308 testgrid-dashboards: sig-scalability-kubemark 309 testgrid-tab-name: kubemark-100-scheduler-highqps 310 testgrid-num-failures-to-alert: '1' 311 spec: 312 containers: 313 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 314 command: 315 - runner.sh 316 - /workspace/scenarios/kubernetes_e2e.py 317 args: 318 - --cluster=kubemark-100-scheduler-highqps 319 - --extract=ci/latest 320 - --gcp-master-size=n2-standard-2 321 - --gcp-node-image=gci 322 - --gcp-node-size=e2-standard-4 323 - --gcp-nodes=4 324 - --gcp-project-type=scalability-project 325 - --gcp-zone=us-east1-b 326 - --kubemark 327 - --kubemark-nodes=100 328 - --kubemark-master-size=n2-standard-8 329 - --provider=gce 330 - --metadata-sources=cl2-metadata.json 331 - --env=KUBEMARK_APISERVER_TEST_ARGS=--max-requests-inflight=80 --max-mutating-requests-inflight=0 --profiling --contention-profiling 332 - --env=CONTROLLER_MANAGER_TEST_ARGS=--profiling --contention-profiling --kube-api-qps=300 --kube-api-burst=300 333 - --env=SCHEDULER_TEST_ARGS=--profiling --contention-profiling --kube-api-qps=300 --kube-api-burst=300 334 - --env=CL2_ENABLE_CLUSTER_OOMS_TRACKER=true 335 - --test=false 336 - --test_args=--ginkgo.focus=xxxx 337 - --test-cmd=$GOPATH/src/k8s.io/perf-tests/run-e2e.sh 338 - --test-cmd-args=cluster-loader2 339 - --test-cmd-args=--nodes=100 340 - --test-cmd-args=--prometheus-scrape-node-exporter=true 341 - --test-cmd-args=--provider=kubemark 342 - --test-cmd-args=--report-dir=$(ARTIFACTS) 343 - --test-cmd-args=--testsuite=testing/density/scheduler-suite.yaml 344 - --test-cmd-args=--testoverrides=./testing/experiments/enable_restart_count_check.yaml 345 - --test-cmd-args=--testoverrides=./testing/experiments/use_simple_latency_query.yaml 346 - --test-cmd-name=ClusterLoaderV2 347 - --timeout=150m 348 # docker-in-docker needs privileged mode 349 securityContext: 350 privileged: true 351 resources: 352 requests: 353 cpu: 2 354 memory: "6Gi" 355 limits: 356 cpu: 2 357 memory: "6Gi" 358 359 - name: ci-kubernetes-kubemark-500-gce 360 cluster: k8s-infra-prow-build 361 tags: 362 - "perfDashPrefix: kubemark-500Nodes" 363 - "perfDashJobType: performance" 364 interval: 1h 365 labels: 366 preset-service-account: "true" 367 preset-k8s-ssh: "true" 368 preset-dind-enabled: "true" 369 preset-e2e-kubemark-common: "true" 370 preset-e2e-scalability-periodics: "true" 371 preset-e2e-scalability-periodics-master: "true" 372 decorate: true 373 decoration_config: 374 timeout: 120m 375 extra_refs: 376 - org: kubernetes 377 repo: kubernetes 378 base_ref: master 379 path_alias: k8s.io/kubernetes 380 - org: kubernetes 381 repo: perf-tests 382 base_ref: master 383 path_alias: k8s.io/perf-tests 384 annotations: 385 fork-per-release: "true" 386 fork-per-release-cron: 0 3 * * *, 0 7 * * *, 0 13 * * *, 0 17 * * *, 0 21 * * * 387 fork-per-release-deletions: "preset-e2e-scalability-periodics-master" 388 fork-per-release-replacements: "kubemark-500Nodes -> kubemark-500Nodes-{{.Version}}, extract=ci/latest -> extract=ci/latest-{{.Version}}, gcp-project=k8s-jenkins-blocking-kubemark -> gcp-project-type=scalability-project, us-central1-f -> us-east1-b" 389 testgrid-alert-email: kubernetes-sig-scale@googlegroups.com, kubernetes-scalability-tickets@google.com 390 testgrid-dashboards: sig-scalability-kubemark 391 testgrid-tab-name: kubemark-master-500 392 testgrid-num-failures-to-alert: '2' 393 spec: 394 containers: 395 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 396 command: 397 - runner.sh 398 - /workspace/scenarios/kubernetes_e2e.py 399 args: 400 - --cluster=kubemark-500 401 - --extract=ci/latest 402 - --gcp-master-size=n2-standard-4 403 - --gcp-node-image=gci 404 - --gcp-node-size=e2-standard-8 405 - --gcp-nodes=8 406 - --gcp-project-type=scalability-project 407 - --gcp-zone=us-central1-f 408 - --kubemark 409 - --kubemark-nodes=500 410 - --kubemark-master-size=n2-standard-16 411 - --metadata-sources=cl2-metadata.json 412 - --env=KUBEMARK_APISERVER_TEST_ARGS=--max-requests-inflight=160 --max-mutating-requests-inflight=0 --profiling --contention-profiling 413 - --provider=gce 414 - --test=false 415 - --test_args=--ginkgo.focus=xxxx 416 - --test-cmd=$GOPATH/src/k8s.io/perf-tests/run-e2e.sh 417 - --test-cmd-args=cluster-loader2 418 - --test-cmd-args=--nodes=500 419 - --test-cmd-args=--provider=kubemark 420 - --test-cmd-args=--report-dir=$(ARTIFACTS) 421 - --test-cmd-args=--testconfig=testing/load/config.yaml 422 - --test-cmd-args=--testconfig=testing/huge-service/config.yaml 423 - --test-cmd-args=--testconfig=testing/access-tokens/config.yaml 424 - --test-cmd-args=--testoverrides=./testing/experiments/use_simple_latency_query.yaml 425 - --test-cmd-args=--testoverrides=./testing/overrides/kubemark_500_nodes.yaml 426 - --test-cmd-name=ClusterLoaderV2 427 - --timeout=100m 428 - --use-logexporter 429 - --logexporter-gcs-path=gs://k8s-infra-scalability-tests-logs/$(JOB_NAME)/$(BUILD_ID) 430 # docker-in-docker needs privilged mode 431 securityContext: 432 privileged: true 433 resources: 434 requests: 435 cpu: 2 436 memory: "8Gi" 437 limits: 438 cpu: 2 439 memory: "8Gi" 440 441 - name: ci-kubernetes-kubemark-gce-scale 442 cluster: k8s-infra-prow-build 443 tags: 444 - "perfDashPrefix: kubemark-5000Nodes" 445 - "perfDashJobType: performance" 446 # For cost-efficiency reasons, we're switching this job off, 447 # by setting it to run on February 31st. 448 cron: '0 0 31 2 *' 449 labels: 450 preset-service-account: "true" 451 preset-k8s-ssh: "true" 452 preset-dind-enabled: "true" 453 preset-e2e-kubemark-common: "true" 454 preset-e2e-kubemark-gce-scale: "true" 455 preset-e2e-scalability-periodics: "true" 456 preset-e2e-scalability-periodics-master: "true" 457 decorate: true 458 decoration_config: 459 timeout: 1100m 460 extra_refs: 461 - org: kubernetes 462 repo: kubernetes 463 base_ref: master 464 path_alias: k8s.io/kubernetes 465 - org: kubernetes 466 repo: perf-tests 467 base_ref: master 468 path_alias: k8s.io/perf-tests 469 annotations: 470 testgrid-alert-email: kubernetes-sig-scale@googlegroups.com, kubernetes-scalability-tickets@google.com 471 testgrid-dashboards: sig-scalability-kubemark 472 testgrid-tab-name: kubemark-5000 473 testgrid-num-failures-to-alert: '2' 474 testgrid-num-columns-recent: '3' 475 spec: 476 containers: 477 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 478 command: 479 - runner.sh 480 - /workspace/scenarios/kubernetes_e2e.py 481 args: 482 - --cluster=kubemark-5000 483 - --extract=ci/latest 484 - --gcp-node-image=gci 485 - --gcp-node-size=e2-standard-8 486 - --gcp-master-size=n2-standard-64 487 - --gcp-nodes=84 488 - --gcp-project-type=scalability-project 489 - --gcp-zone=us-east1-b 490 - --kubemark 491 - --kubemark-nodes=5000 492 - --kubemark-master-size=n2-standard-64 493 - --provider=gce 494 - --metadata-sources=cl2-metadata.json 495 # With APF only sum of --max-requests-inflight and --max-mutating-requests-inflight matters, so set --max-mutating-requests-inflight to 0. 496 - --env=KUBEMARK_APISERVER_TEST_ARGS=--max-requests-inflight=640 --max-mutating-requests-inflight=0 --profiling --contention-profiling 497 - --test=false 498 - --test_args=--ginkgo.focus=xxxx 499 - --test-cmd=$GOPATH/src/k8s.io/perf-tests/run-e2e.sh 500 - --test-cmd-args=cluster-loader2 501 - --test-cmd-args=--experimental-gcp-snapshot-prometheus-disk=true 502 - --test-cmd-args=--experimental-prometheus-disk-snapshot-name=$(JOB_NAME)-$(BUILD_ID) 503 - --test-cmd-args=--experimental-prometheus-snapshot-to-report-dir=true 504 - --test-cmd-args=--nodes=5000 505 - --test-cmd-args=--provider=kubemark 506 - --test-cmd-args=--report-dir=$(ARTIFACTS) 507 - --test-cmd-args=--testconfig=testing/load/config.yaml 508 - --test-cmd-args=--testconfig=testing/huge-service/config.yaml 509 - --test-cmd-args=--testconfig=testing/access-tokens/config.yaml 510 - --test-cmd-args=--testoverrides=./testing/experiments/enable_restart_count_check.yaml 511 - --test-cmd-args=--testoverrides=./testing/experiments/ignore_known_kubemark_container_restarts.yaml 512 - --test-cmd-name=ClusterLoaderV2 513 - --timeout=1080m 514 - --use-logexporter 515 - --logexporter-gcs-path=gs://k8s-infra-scalability-tests-logs/$(JOB_NAME)/$(BUILD_ID) 516 # docker-in-docker needs privilged mode 517 securityContext: 518 privileged: true 519 resources: 520 requests: 521 cpu: 6 522 memory: "16Gi" 523 limits: 524 cpu: 6 525 memory: "16Gi" 526 527 - name: ci-kubernetes-kubemark-gce-scale-scheduler 528 cluster: k8s-infra-prow-build 529 tags: 530 - "perfDashPrefix: kubemark-5000Nodes-scheduler" 531 - "perfDashJobType: performance" 532 # Run at 10:01 UTC on the even days of each month. There will be ample time 533 # between the kubemark-5000Nodes job (expected to start at 16:01 UTC the 534 # previous day and finish in around ~12-14 hours) and this job. This job is 535 # expected to take ~6-8 hours, which should allow it to finish well before 536 # the next kubemark-5000Nodes job (at 00:01 UTC). 537 cron: '1 10 2-31/2 * *' 538 labels: 539 preset-service-account: "true" 540 preset-k8s-ssh: "true" 541 preset-dind-enabled: "true" 542 preset-e2e-kubemark-common: "true" 543 preset-e2e-kubemark-gce-scale: "true" 544 preset-e2e-scalability-periodics: "true" 545 preset-e2e-scalability-periodics-master: "true" 546 decorate: true 547 decoration_config: 548 timeout: 1100m 549 extra_refs: 550 - org: kubernetes 551 repo: kubernetes 552 base_ref: master 553 path_alias: k8s.io/kubernetes 554 - org: kubernetes 555 repo: perf-tests 556 base_ref: master 557 path_alias: k8s.io/perf-tests 558 annotations: 559 testgrid-dashboards: sig-scalability-kubemark 560 testgrid-tab-name: kubemark-5000-scheduler 561 testgrid-num-failures-to-alert: '1' 562 testgrid-num-columns-recent: '3' 563 spec: 564 containers: 565 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 566 command: 567 - runner.sh 568 - /workspace/scenarios/kubernetes_e2e.py 569 args: 570 - --cluster=kubemark-5000 571 - --extract=ci/latest 572 - --gcp-node-image=gci 573 - --gcp-node-size=e2-standard-8 574 - --gcp-master-size=n2-standard-2 575 - --gcp-nodes=84 576 - --gcp-project-type=scalability-project 577 - --gcp-zone=us-east1-b 578 - --kubemark 579 - --kubemark-nodes=5000 580 - --kubemark-master-size=n2-standard-64 581 - --provider=gce 582 - --metadata-sources=cl2-metadata.json 583 - --test=false 584 - --test_args=--ginkgo.focus=xxxx 585 - --test-cmd=$GOPATH/src/k8s.io/perf-tests/run-e2e.sh 586 - --test-cmd-args=cluster-loader2 587 - --test-cmd-args=--experimental-gcp-snapshot-prometheus-disk=true 588 - --test-cmd-args=--experimental-prometheus-disk-snapshot-name=$(JOB_NAME)-$(BUILD_ID) 589 - --test-cmd-args=--experimental-prometheus-snapshot-to-report-dir=true 590 - --test-cmd-args=--nodes=5000 591 - --test-cmd-args=--provider=kubemark 592 - --test-cmd-args=--report-dir=$(ARTIFACTS) 593 - --test-cmd-args=--testsuite=testing/density/scheduler-suite.yaml 594 - --test-cmd-args=--testoverrides=./testing/experiments/enable_restart_count_check.yaml 595 - --test-cmd-args=--testoverrides=./testing/experiments/ignore_known_kubemark_container_restarts.yaml 596 - --test-cmd-name=ClusterLoaderV2 597 - --timeout=1080m 598 - --use-logexporter 599 - --logexporter-gcs-path=gs://k8s-infra-scalability-tests-logs/$(JOB_NAME)/$(BUILD_ID) 600 # docker-in-docker needs privilged mode 601 securityContext: 602 privileged: true 603 resources: 604 limits: 605 cpu: 6 606 memory: "16Gi" 607 requests: 608 cpu: 6 609 memory: "16Gi" 610 611 - name: ci-kubernetes-kubemark-high-density-100-gce 612 cluster: k8s-infra-prow-build 613 tags: 614 - "perfDashPrefix: kubemark-100Nodes-highDensity" 615 - "perfDashJobType: performance" 616 interval: 24h 617 labels: 618 preset-service-account: "true" 619 preset-k8s-ssh: "true" 620 preset-dind-enabled: "true" 621 preset-e2e-kubemark-common: "true" 622 preset-e2e-scalability-periodics: "true" 623 preset-e2e-scalability-periodics-master: "true" 624 decorate: true 625 decoration_config: 626 timeout: 300m 627 extra_refs: 628 - org: kubernetes 629 repo: kubernetes 630 base_ref: master 631 path_alias: k8s.io/kubernetes 632 - org: kubernetes 633 repo: perf-tests 634 base_ref: master 635 path_alias: k8s.io/perf-tests 636 annotations: 637 testgrid-dashboards: sig-scalability-kubemark 638 testgrid-tab-name: kubemark-100-high-density 639 testgrid-num-failures-to-alert: '1' 640 spec: 641 containers: 642 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 643 command: 644 - runner.sh 645 - /workspace/scenarios/kubernetes_e2e.py 646 args: 647 - --cluster=kubemark-100pods 648 - --extract=ci/latest 649 - --gcp-node-image=gci 650 - --gcp-node-size=e2-standard-8 651 - --gcp-master-size=n2-standard-2 652 - --gcp-nodes=9 653 - --gcp-project-type=scalability-project 654 - --gcp-zone=us-east1-b 655 - --kubemark 656 - --kubemark-master-size=n2-standard-32 657 - --kubemark-nodes=600 658 - --provider=gce 659 - --metadata-sources=cl2-metadata.json 660 - --env=KUBEMARK_APISERVER_TEST_ARGS=--max-requests-inflight=80 --max-mutating-requests-inflight=0 --profiling --contention-profiling 661 - --test=false 662 - --test_args=--ginkgo.focus=xxxx 663 - --test-cmd=$GOPATH/src/k8s.io/perf-tests/run-e2e.sh 664 - --test-cmd-args=cluster-loader2 665 - --test-cmd-args=--nodes=600 666 - --test-cmd-args=--provider=kubemark 667 - --test-cmd-args=--report-dir=$(ARTIFACTS) 668 # TODO(https://github.com/kubernetes/perf-tests/issues/1007): load test should be used to test high-density. 669 - --test-cmd-args=--testconfig=testing/density/high-density-config.yaml 670 - --test-cmd-args=--testoverrides=./testing/experiments/use_simple_latency_query.yaml 671 - --test-cmd-args=--testoverrides=./testing/overrides/600_nodes_high_density.yaml 672 - --test-cmd-name=ClusterLoaderV2 673 - --timeout=280m 674 - --use-logexporter 675 - --logexporter-gcs-path=gs://k8s-infra-scalability-tests-logs/$(JOB_NAME)/$(BUILD_ID) 676 # docker-in-docker needs privileged mode 677 securityContext: 678 privileged: true 679 resources: 680 requests: 681 cpu: 2 682 memory: "8Gi" 683 limits: 684 cpu: 2 685 memory: "8Gi" 686 687 - name: ci-perf-tests-kubemark-100-benchmark 688 cluster: k8s-infra-prow-build 689 interval: 2h 690 labels: 691 preset-service-account: "true" 692 preset-k8s-ssh: "true" 693 preset-e2e-scalability-periodics: "true" 694 preset-e2e-scalability-periodics-master: "true" 695 decorate: true 696 decoration_config: 697 timeout: 10m 698 extra_refs: 699 - org: kubernetes 700 repo: perf-tests 701 base_ref: master 702 path_alias: k8s.io/perf-tests 703 annotations: 704 testgrid-dashboards: sig-scalability-perf-tests 705 testgrid-tab-name: kubemark-100-benchmark 706 spec: 707 containers: 708 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 709 command: 710 - runner.sh 711 args: 712 - ./benchmark/runner.sh 713 resources: 714 requests: 715 cpu: 1 716 memory: "2Gi" 717 limits: 718 cpu: 1 719 memory: "2Gi" 720 721 - name: ci-benchmark-scheduler-perf-master 722 cluster: k8s-infra-prow-build 723 tags: 724 - "perfDashPrefix: scheduler-perf-benchmark" 725 - "perfDashJobType: benchmark" 726 interval: 2h30m 727 annotations: 728 testgrid-dashboards: sig-scalability-benchmarks 729 testgrid-tab-name: scheduler-perf 730 decorate: true 731 extra_refs: 732 - org: kubernetes 733 repo: kubernetes 734 base_ref: master 735 path_alias: k8s.io/kubernetes 736 decoration_config: 737 timeout: 2h25m 738 spec: 739 containers: 740 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 741 command: 742 - ./hack/jenkins/benchmark-dockerized.sh 743 args: 744 - ./test/integration/scheduler_perf 745 env: 746 - name: KUBE_TIMEOUT 747 value: --timeout=2h25m 748 - name: TEST_PREFIX 749 value: BenchmarkPerfScheduling 750 # Set the benchtime to a very low value so every test is ran at most once 751 # even on very powerful machines 752 - name: BENCHTIME 753 value: 1ns 754 # We need to constraint compute resources so all the tests 755 # finish approximately at the same time. More compute power 756 # can increase scheduling throughput and make consequent results 757 # incomparable. 758 resources: 759 requests: 760 cpu: 6 761 memory: "24Gi" 762 limits: 763 cpu: 6 764 memory: "24Gi" 765 766 - name: ci-benchmark-kube-dns-master 767 cluster: k8s-infra-prow-build 768 interval: 2h 769 tags: 770 - "perfDashPrefix: kube-dns benchmark" 771 - "perfDashJobType: dnsBenchmark" 772 labels: 773 preset-service-account: "true" 774 preset-k8s-ssh: "true" 775 preset-e2e-scalability-periodics: "true" 776 preset-e2e-scalability-periodics-master: "true" 777 decorate: true 778 decoration_config: 779 timeout: 140m 780 extra_refs: 781 - org: kubernetes 782 repo: kubernetes 783 base_ref: master 784 path_alias: k8s.io/kubernetes 785 - org: kubernetes 786 repo: perf-tests 787 base_ref: master 788 path_alias: k8s.io/perf-tests 789 annotations: 790 testgrid-dashboards: sig-scalability-benchmarks 791 testgrid-tab-name: kube-dns 792 spec: 793 containers: 794 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 795 command: 796 - runner.sh 797 - /workspace/scenarios/kubernetes_e2e.py 798 args: 799 - --check-leaked-resources 800 - --cluster=kube-dns-benchmark 801 - --extract=ci/latest 802 - --gcp-node-size=e2-standard-2 803 - --gcp-nodes=3 804 - --gcp-zone=us-east1-b 805 - --provider=gce 806 - --test=false 807 - --test-cmd=$GOPATH/src/k8s.io/perf-tests/run-e2e.sh 808 - --test-cmd-args=kube-dns 809 - --test-cmd-args=$(ARTIFACTS)/out 810 - --test-cmd-args=$(ARTIFACTS) 811 - --test-cmd-name=KubeDnsBenchmark 812 - --timeout=120m 813 resources: 814 requests: 815 cpu: 2 816 memory: "6Gi" 817 limits: 818 cpu: 2 819 memory: "6Gi" 820 821 - name: ci-benchmark-nodelocal-dns-master 822 cluster: k8s-infra-prow-build 823 interval: 2h 824 tags: 825 - "perfDashPrefix: node-local-dns benchmark" 826 - "perfDashJobType: dnsBenchmark" 827 labels: 828 preset-service-account: "true" 829 preset-k8s-ssh: "true" 830 preset-e2e-scalability-periodics: "true" 831 preset-e2e-scalability-periodics-master: "true" 832 decorate: true 833 decoration_config: 834 timeout: 140m 835 extra_refs: 836 - org: kubernetes 837 repo: kubernetes 838 base_ref: master 839 path_alias: k8s.io/kubernetes 840 - org: kubernetes 841 repo: perf-tests 842 base_ref: master 843 path_alias: k8s.io/perf-tests 844 annotations: 845 testgrid-dashboards: sig-scalability-benchmarks 846 testgrid-tab-name: node-local-dns 847 spec: 848 containers: 849 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 850 command: 851 - runner.sh 852 - /workspace/scenarios/kubernetes_e2e.py 853 args: 854 - --check-leaked-resources 855 - --cluster=node-local-dns-benchmark 856 - --env=KUBE_ENABLE_NODELOCAL_DNS=true 857 - --extract=ci/latest 858 - --gcp-node-size=e2-standard-2 859 - --gcp-nodes=3 860 - --gcp-zone=us-east1-b 861 - --provider=gce 862 - --test=false 863 - --test-cmd=$GOPATH/src/k8s.io/perf-tests/run-e2e.sh 864 - --test-cmd-args=node-local-dns 865 - --test-cmd-args=$(ARTIFACTS)/out 866 - --test-cmd-args=$(ARTIFACTS) 867 - --test-cmd-args=169.254.20.10 868 - --test-cmd-name=KubeDnsBenchmark 869 - --timeout=120m 870 resources: 871 requests: 872 cpu: 2 873 memory: "4Gi" 874 limits: 875 cpu: 2 876 memory: "4Gi" 877 878 - name: ci-kubernetes-e2e-gce-network-metric-measurement 879 cluster: k8s-infra-prow-build 880 tags: 881 - "perfDashPrefix: network-performance" 882 - "perfDashJobType: performance" 883 interval: 24h 884 labels: 885 preset-service-account: "true" 886 preset-k8s-ssh: "true" 887 preset-e2e-scalability-common: "true" 888 preset-e2e-scalability-periodics: "true" 889 preset-e2e-scalability-periodics-master: "true" 890 annotations: 891 testgrid-dashboards: sig-scalability-network 892 testgrid-tab-name: metric-measurement 893 decorate: true 894 decoration_config: 895 timeout: 60m 896 extra_refs: 897 - org: kubernetes 898 repo: kubernetes 899 base_ref: master 900 path_alias: k8s.io/kubernetes 901 - org: kubernetes 902 repo: perf-tests 903 base_ref: master 904 path_alias: k8s.io/perf-tests 905 spec: 906 containers: 907 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 908 command: 909 - runner.sh 910 - /workspace/scenarios/kubernetes_e2e.py 911 args: 912 - --check-leaked-resources 913 - --cluster= 914 - --extract=ci/latest 915 - --gcp-node-image=gci 916 - --gcp-nodes=102 917 - --provider=gce 918 - --test=false 919 - --test-cmd=$GOPATH/src/k8s.io/perf-tests/run-e2e.sh 920 - --test-cmd-args=cluster-loader2 921 - --test-cmd-args=--nodes=100 922 - --test-cmd-args=--provider=gce 923 - --test-cmd-args=--report-dir=/workspace/_artifacts 924 - --test-cmd-args=--testsuite=testing/network/suite.yaml 925 - --test-cmd-name=ClusterLoaderV2 926 - --timeout=40m 927 - --use-logexporter 928 resources: 929 requests: 930 cpu: 1 931 memory: "2Gi" 932 limits: 933 cpu: 1 934 memory: "2Gi" 935 936 - interval: 12h 937 cluster: k8s-infra-prow-build 938 name: ci-kubernetes-e2e-gci-gce-benchmark-requests-1 939 labels: 940 preset-service-account: "true" 941 preset-k8s-ssh: "true" 942 preset-e2e-scalability-common: "true" 943 preset-e2e-scalability-periodics: "true" 944 preset-e2e-scalability-periodics-master: "true" 945 decorate: true 946 decoration_config: 947 timeout: 60m 948 extra_refs: 949 - org: kubernetes 950 repo: kubernetes 951 base_ref: master 952 path_alias: k8s.io/kubernetes 953 - org: kubernetes 954 repo: perf-tests 955 base_ref: master 956 path_alias: k8s.io/perf-tests 957 annotations: 958 testgrid-dashboards: sig-scalability-benchmarks 959 testgrid-tab-name: gce-benchmark-requests-1 960 spec: 961 containers: 962 - image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240515-17c6d50e24-master 963 command: 964 - runner.sh 965 - /workspace/scenarios/kubernetes_e2e.py 966 args: 967 - --check-leaked-resources 968 - --cluster=benchmark-small 969 # Eliminate test flakiness 970 - --env=ALLOWED_NOTREADY_NODES=1 971 - --env=APISERVER_TEST_ARGS=--max-requests-inflight=1000 --max-mutating-requests-inflight=0 --profiling --contention-profiling 972 - --env=CL2_BENCHMARK_INFLIGHT=1 973 - --env=CL2_BENCHMARK_URI=/api/v1/namespaces/%namespace%/configmaps/benchmark-config-map-0?resourceVersion=0 974 - --extract=ci/latest 975 - --gcp-master-size=n2-standard-96 976 - --gcp-node-image=gci 977 - --gcp-node-size=e2-standard-8 978 - --gcp-nodes=1 979 - --gcp-project-type=scalability-project 980 - --gcp-zone=us-east1-b 981 - --provider=gce 982 - --metadata-sources=cl2-metadata.json 983 - --test=false 984 - --test-cmd=$GOPATH/src/k8s.io/perf-tests/run-e2e.sh 985 - --test-cmd-args=cluster-loader2 986 - --test-cmd-args=--experimental-gcp-snapshot-prometheus-disk=true 987 - --test-cmd-args=--experimental-prometheus-disk-snapshot-name=$(JOB_NAME)-$(BUILD_ID) 988 - --test-cmd-args=--experimental-prometheus-snapshot-to-report-dir=true 989 - --test-cmd-args=--nodes=1 990 - --test-cmd-args=--prometheus-scrape-kubelets=true 991 - --test-cmd-args=--prometheus-scrape-node-exporter 992 - --test-cmd-args=--provider=gce 993 - --test-cmd-args=--report-dir=$(ARTIFACTS) 994 - --test-cmd-args=--testconfig=testing/request-benchmark/config.yaml 995 - --test-cmd-name=ClusterLoaderV2 996 - --timeout=45m 997 - --use-logexporter 998 - --logexporter-gcs-path=gs://sig-scalability-logs/$(JOB_NAME)/$(BUILD_ID) 999 resources: 1000 requests: 1001 cpu: 2 1002 memory: 6Gi 1003 limits: 1004 cpu: 2 1005 memory: 6Gi