github.com/yrj2011/jx-test-infra@v0.0.0-20190529031832-7a2065ee98eb/config/tests/jobs/jobs_test.go (about) 1 /* 2 Copyright 2018 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package tests 18 19 // This file validates kubernetes's jobs configs. 20 // See also prow/config/jobstests for generic job tests that 21 // all deployments should consider using. 22 23 import ( 24 "bytes" 25 "encoding/json" 26 "errors" 27 "flag" 28 "fmt" 29 "io/ioutil" 30 "os" 31 "regexp" 32 "strings" 33 "testing" 34 35 "k8s.io/api/core/v1" 36 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 37 38 cfg "k8s.io/test-infra/prow/config" 39 "k8s.io/test-infra/prow/kube" 40 ) 41 42 // config.json is the worst but contains useful information :-( 43 type configJSON map[string]map[string]interface{} 44 45 var configPath = flag.String("config", "../../../prow/config.yaml", "Path to prow config") 46 var jobConfigPath = flag.String("job-config", "../../jobs", "Path to prow job config") 47 var configJSONPath = flag.String("config-json", "../../../jobs/config.json", "Path to prow job config") 48 var gubernatorPath = flag.String("gubernator-path", "https://k8s-gubernator.appspot.com", "Path to linked gubernator") 49 var bucket = flag.String("bucket", "kubernetes-jenkins", "Gcs bucket for log upload") 50 51 func (c configJSON) ScenarioForJob(jobName string) string { 52 if scenario, ok := c[jobName]["scenario"]; ok { 53 return scenario.(string) 54 } 55 return "" 56 } 57 58 func readConfigJSON(path string) (config configJSON, err error) { 59 raw, err := ioutil.ReadFile(path) 60 if err != nil { 61 return nil, err 62 } 63 config = configJSON{} 64 err = json.Unmarshal(raw, &config) 65 if err != nil { 66 return nil, err 67 } 68 return config, nil 69 } 70 71 // Loaded at TestMain. 72 var c *cfg.Config 73 var cj configJSON 74 75 func TestMain(m *testing.M) { 76 flag.Parse() 77 if *configPath == "" { 78 fmt.Println("--config must set") 79 os.Exit(1) 80 } 81 82 conf, err := cfg.Load(*configPath, *jobConfigPath) 83 if err != nil { 84 fmt.Printf("Could not load config: %v", err) 85 os.Exit(1) 86 } 87 c = conf 88 89 if *configJSONPath != "" { 90 cj, err = readConfigJSON(*configJSONPath) 91 if err != nil { 92 fmt.Printf("Could not load jobs config: %v", err) 93 os.Exit(1) 94 } 95 } 96 97 os.Exit(m.Run()) 98 } 99 100 func TestReportTemplate(t *testing.T) { 101 var testcases = []struct { 102 org string 103 repo string 104 number int 105 suffix string 106 }{ 107 { 108 org: "o", 109 repo: "r", 110 number: 4, 111 suffix: "o_r/4", 112 }, 113 { 114 org: "kubernetes", 115 repo: "test-infra", 116 number: 123, 117 suffix: "test-infra/123", 118 }, 119 { 120 org: "kubernetes", 121 repo: "kubernetes", 122 number: 123, 123 suffix: "123", 124 }, 125 { 126 org: "o", 127 repo: "kubernetes", 128 number: 456, 129 suffix: "o_kubernetes/456", 130 }, 131 } 132 for _, tc := range testcases { 133 var b bytes.Buffer 134 if err := c.Plank.ReportTemplate.Execute(&b, &kube.ProwJob{ 135 Spec: kube.ProwJobSpec{ 136 Refs: &kube.Refs{ 137 Org: tc.org, 138 Repo: tc.repo, 139 Pulls: []kube.Pull{ 140 { 141 Number: tc.number, 142 }, 143 }, 144 }, 145 }, 146 }); err != nil { 147 t.Errorf("Error executing template: %v", err) 148 continue 149 } 150 expectedPath := *gubernatorPath + "/pr/" + tc.suffix 151 if !strings.Contains(b.String(), expectedPath) { 152 t.Errorf("Expected template to contain %s, but it didn't: %s", expectedPath, b.String()) 153 } 154 } 155 } 156 157 func TestURLTemplate(t *testing.T) { 158 testcases := []struct { 159 name string 160 jobType kube.ProwJobType 161 org string 162 repo string 163 job string 164 build string 165 expect string 166 }{ 167 { 168 name: "k8s presubmit", 169 jobType: kube.PresubmitJob, 170 org: "kubernetes", 171 repo: "kubernetes", 172 job: "k8s-pre-1", 173 build: "1", 174 expect: *gubernatorPath + "/build/" + *bucket + "/pr-logs/pull/0/k8s-pre-1/1/", 175 }, 176 { 177 name: "k8s/test-infra presubmit", 178 jobType: kube.PresubmitJob, 179 org: "kubernetes", 180 repo: "test-infra", 181 job: "ti-pre-1", 182 build: "1", 183 expect: *gubernatorPath + "/build/" + *bucket + "/pr-logs/pull/test-infra/0/ti-pre-1/1/", 184 }, 185 { 186 name: "foo/k8s presubmit", 187 jobType: kube.PresubmitJob, 188 org: "foo", 189 repo: "kubernetes", 190 job: "k8s-pre-1", 191 build: "1", 192 expect: *gubernatorPath + "/build/" + *bucket + "/pr-logs/pull/foo_kubernetes/0/k8s-pre-1/1/", 193 }, 194 { 195 name: "foo-bar presubmit", 196 jobType: kube.PresubmitJob, 197 org: "foo", 198 repo: "bar", 199 job: "foo-pre-1", 200 build: "1", 201 expect: *gubernatorPath + "/build/" + *bucket + "/pr-logs/pull/foo_bar/0/foo-pre-1/1/", 202 }, 203 { 204 name: "k8s postsubmit", 205 jobType: kube.PostsubmitJob, 206 org: "kubernetes", 207 repo: "kubernetes", 208 job: "k8s-post-1", 209 build: "1", 210 expect: *gubernatorPath + "/build/" + *bucket + "/logs/k8s-post-1/1/", 211 }, 212 { 213 name: "k8s periodic", 214 jobType: kube.PeriodicJob, 215 job: "k8s-peri-1", 216 build: "1", 217 expect: *gubernatorPath + "/build/" + *bucket + "/logs/k8s-peri-1/1/", 218 }, 219 { 220 name: "empty periodic", 221 jobType: kube.PeriodicJob, 222 job: "nan-peri-1", 223 build: "1", 224 expect: *gubernatorPath + "/build/" + *bucket + "/logs/nan-peri-1/1/", 225 }, 226 { 227 name: "k8s batch", 228 jobType: kube.BatchJob, 229 org: "kubernetes", 230 repo: "kubernetes", 231 job: "k8s-batch-1", 232 build: "1", 233 expect: *gubernatorPath + "/build/" + *bucket + "/pr-logs/pull/batch/k8s-batch-1/1/", 234 }, 235 } 236 237 for _, tc := range testcases { 238 var pj = kube.ProwJob{ 239 ObjectMeta: metav1.ObjectMeta{Name: tc.name}, 240 Spec: kube.ProwJobSpec{ 241 Type: tc.jobType, 242 Job: tc.job, 243 }, 244 Status: kube.ProwJobStatus{ 245 BuildID: tc.build, 246 }, 247 } 248 if tc.jobType != kube.PeriodicJob { 249 pj.Spec.Refs = &kube.Refs{ 250 Pulls: []kube.Pull{{}}, 251 Org: tc.org, 252 Repo: tc.repo, 253 } 254 } 255 256 var b bytes.Buffer 257 if err := c.Plank.JobURLTemplate.Execute(&b, &pj); err != nil { 258 t.Fatalf("Error executing template: %v", err) 259 } 260 res := b.String() 261 if res != tc.expect { 262 t.Errorf("tc: %s, Expect URL: %s, got %s", tc.name, tc.expect, res) 263 } 264 } 265 } 266 267 func checkContext(t *testing.T, repo string, p cfg.Presubmit) { 268 if !p.SkipReport && p.Name != p.Context { 269 t.Errorf("Context does not match job name: %s in %s", p.Name, repo) 270 } 271 for _, c := range p.RunAfterSuccess { 272 checkContext(t, repo, c) 273 } 274 } 275 276 func TestContextMatches(t *testing.T) { 277 for repo, presubmits := range c.Presubmits { 278 for _, p := range presubmits { 279 checkContext(t, repo, p) 280 } 281 } 282 } 283 284 func checkRetest(t *testing.T, repo string, presubmits []cfg.Presubmit) { 285 for _, p := range presubmits { 286 expected := fmt.Sprintf("/test %s", p.Name) 287 if p.RerunCommand != expected { 288 t.Errorf("%s in %s rerun_command: %s != expected: %s", repo, p.Name, p.RerunCommand, expected) 289 } 290 checkRetest(t, repo, p.RunAfterSuccess) 291 } 292 } 293 294 func TestRetestMatchJobsName(t *testing.T) { 295 for repo, presubmits := range c.Presubmits { 296 checkRetest(t, repo, presubmits) 297 } 298 } 299 300 type SubmitQueueConfig struct { 301 // this is the only field we need for the tests below 302 RequiredRetestContexts string `json:"required-retest-contexts"` 303 } 304 305 func findRequired(t *testing.T, presubmits []cfg.Presubmit) []string { 306 var required []string 307 for _, p := range presubmits { 308 if !p.AlwaysRun { 309 continue 310 } 311 for _, r := range findRequired(t, p.RunAfterSuccess) { 312 required = append(required, r) 313 } 314 if p.SkipReport { 315 continue 316 } 317 required = append(required, p.Context) 318 } 319 return required 320 } 321 322 func TestConfigSecurityJobsMatch(t *testing.T) { 323 kp := c.Presubmits["kubernetes/kubernetes"] 324 sp := c.Presubmits["kubernetes-security/kubernetes"] 325 if len(kp) != len(sp) { 326 t.Fatalf("length of kubernetes/kubernetes presubmits %d does not equal length of kubernetes-security/kubernetes presubmits %d", len(kp), len(sp)) 327 } 328 } 329 330 // Unit test jobs outside kubernetes-security do not use the security cluster 331 // and that jobs inside kubernetes-security DO 332 func TestConfigSecurityClusterRestricted(t *testing.T) { 333 for repo, jobs := range c.Presubmits { 334 if strings.HasPrefix(repo, "kubernetes-security/") { 335 for _, job := range jobs { 336 if job.Agent != "jenkins" && job.Cluster != "security" { 337 t.Fatalf("Jobs in kubernetes-security/* should use the security cluster! %s", job.Name) 338 } 339 } 340 } else { 341 for _, job := range jobs { 342 if job.Cluster == "security" { 343 t.Fatalf("Jobs not in kubernetes-security/* should not use the security cluster! %s", job.Name) 344 } 345 } 346 } 347 } 348 for repo, jobs := range c.Postsubmits { 349 if strings.HasPrefix(repo, "kubernetes-security/") { 350 for _, job := range jobs { 351 if job.Agent != "jenkins" && job.Cluster != "security" { 352 t.Fatalf("Jobs in kubernetes-security/* should use the security cluster! %s", job.Name) 353 } 354 } 355 } else { 356 for _, job := range jobs { 357 if job.Cluster == "security" { 358 t.Fatalf("Jobs not in kubernetes-security/* should not use the security cluster! %s", job.Name) 359 } 360 } 361 } 362 } 363 // TODO(bentheelder): this will need to be more complex if we ever add k-s periodic 364 for _, job := range c.AllPeriodics() { 365 if job.Cluster == "security" { 366 t.Fatalf("Jobs not in kubernetes-security/* should not use the security cluster! %s", job.Name) 367 } 368 } 369 } 370 371 // checkDockerSocketVolumes returns an error if any volume uses a hostpath 372 // to the docker socket. we do not want to allow this 373 func checkDockerSocketVolumes(volumes []v1.Volume) error { 374 for _, volume := range volumes { 375 if volume.HostPath != nil && volume.HostPath.Path == "/var/run/docker.sock" { 376 return errors.New("job uses HostPath with docker socket") 377 } 378 } 379 return nil 380 } 381 382 // Make sure jobs are not using the docker socket as a host path 383 func TestJobDoesNotHaveDockerSocket(t *testing.T) { 384 for _, presubmit := range c.AllPresubmits(nil) { 385 if presubmit.Spec != nil { 386 if err := checkDockerSocketVolumes(presubmit.Spec.Volumes); err != nil { 387 t.Errorf("Error in presubmit: %v", err) 388 } 389 } 390 } 391 392 for _, postsubmit := range c.AllPostsubmits(nil) { 393 if postsubmit.Spec != nil { 394 if err := checkDockerSocketVolumes(postsubmit.Spec.Volumes); err != nil { 395 t.Errorf("Error in postsubmit: %v", err) 396 } 397 } 398 } 399 400 for _, periodic := range c.Periodics { 401 if periodic.Spec != nil { 402 if err := checkDockerSocketVolumes(periodic.Spec.Volumes); err != nil { 403 t.Errorf("Error in periodic: %v", err) 404 } 405 } 406 } 407 } 408 409 // Validate any containers using a bazelbuild image, returning which bazelbuild tags are used. 410 // In particular ensure that: 411 // * Presubmit, postsubmit jobs specify at least one --repo flag, the first of which uses PULL_REFS and REPO_NAME vars 412 // * Prow injected vars like REPO_NAME, PULL_REFS, etc are only used on non-periodic jobs 413 // * Deprecated --branch, --pull flags are not used 414 // * Required --service-account, --upload, --job, --clean flags are present 415 func checkBazelbuildSpec(t *testing.T, name string, spec *v1.PodSpec, periodic bool) map[string]int { 416 img := "gcr.io/k8s-testimages/bazelbuild" 417 tags := map[string]int{} 418 if spec == nil { 419 return tags 420 } 421 // Tags look something like vDATE-SHA or vDATE-SHA-BAZELVERSION. 422 // We want to match only on the date + sha 423 tagRE := regexp.MustCompile(`^([^-]+-[^-]+)(-[^-]+)?$`) 424 for _, c := range spec.Containers { 425 parts := strings.SplitN(c.Image, ":", 2) 426 var i, tag string // image:tag 427 i = parts[0] 428 if i != img { 429 continue 430 } 431 if len(parts) == 1 { 432 tag = "latest" 433 } else { 434 submatches := tagRE.FindStringSubmatch(parts[1]) 435 if submatches != nil { 436 tag = submatches[1] 437 } else { 438 t.Errorf("bazelbuild tag '%s' doesn't match expected format", parts[1]) 439 } 440 } 441 tags[tag]++ 442 443 found := map[string][]string{} 444 for _, a := range c.Args { 445 parts := strings.SplitN(a, "=", 2) 446 k := parts[0] 447 v := "true" 448 if len(parts) == 2 { 449 v = parts[1] 450 } 451 found[k] = append(found[k], v) 452 453 // Require --flag=FOO for easier processing 454 if k == "--repo" && len(parts) == 1 { 455 t.Errorf("%s: use --repo=FOO not --repo foo", name) 456 } 457 } 458 459 if _, ok := found["--pull"]; ok { 460 t.Errorf("%s: uses deprecated --pull arg, use --repo=org/repo=$(PULL_REFS) instead", name) 461 } 462 if _, ok := found["--branch"]; ok { 463 t.Errorf("%s: uses deprecated --branch arg, use --repo=org/repo=$(PULL_REFS) instead", name) 464 } 465 466 for _, f := range []string{ 467 "--service-account", 468 "--upload", 469 "--job", 470 } { 471 if _, ok := found[f]; !ok { 472 t.Errorf("%s: missing %s flag", name, f) 473 } 474 } 475 476 if v, ok := found["--repo"]; !ok { 477 t.Errorf("%s: missing %s flag", name, "--repo") 478 } else { 479 firstRepo := true 480 hasRefs := false 481 hasName := false 482 for _, r := range v { 483 hasRefs = hasRefs || strings.Contains(r, "$(PULL_REFS)") 484 hasName = hasName || strings.Contains(r, "$(REPO_NAME)") 485 if !firstRepo { 486 t.Errorf("%s: has too many --repo. REMOVE THIS CHECK BEFORE MERGE", name) 487 } 488 for _, d := range []string{ 489 "$(REPO_NAME)", 490 "$(REPO_OWNER)", 491 "$(PULL_BASE_REF)", 492 "$(PULL_BASE_SHA)", 493 "$(PULL_REFS)", 494 "$(PULL_NUMBER)", 495 "$(PULL_PULL_SHA)", 496 } { 497 has := strings.Contains(r, d) 498 if periodic && has { 499 t.Errorf("%s: %s are not available to periodic jobs, please use a static --repo=org/repo=branch", name, d) 500 } else if !firstRepo && has { 501 t.Errorf("%s: %s are only relevant to the first --repo flag, remove from --repo=%s", name, d, r) 502 } 503 } 504 firstRepo = false 505 } 506 if !periodic && !hasRefs { 507 t.Errorf("%s: non-periodic jobs need a --repo=org/branch=$(PULL_REFS) somewhere", name) 508 } 509 if !periodic && !hasName { 510 t.Errorf("%s: non-periodic jobs need a --repo=org/$(REPO_NAME) somewhere", name) 511 } 512 } 513 514 if c.Resources.Requests == nil { 515 t.Errorf("%s: bazel jobs need to place a resource request", name) 516 } 517 } 518 return tags 519 } 520 521 // Unit test jobs that use a bazelbuild image do so correctly. 522 func TestBazelbuildArgs(t *testing.T) { 523 tags := map[string][]string{} // tag -> jobs map 524 for _, p := range c.AllPresubmits(nil) { 525 for t := range checkBazelbuildSpec(t, p.Name, p.Spec, false) { 526 tags[t] = append(tags[t], p.Name) 527 } 528 } 529 for _, p := range c.AllPostsubmits(nil) { 530 for t := range checkBazelbuildSpec(t, p.Name, p.Spec, false) { 531 tags[t] = append(tags[t], p.Name) 532 } 533 } 534 for _, p := range c.AllPeriodics() { 535 for t := range checkBazelbuildSpec(t, p.Name, p.Spec, true) { 536 tags[t] = append(tags[t], p.Name) 537 } 538 } 539 pinnedJobs := map[string]string{ 540 //job: reason for pinning 541 // these frequently need to be pinned... 542 //"pull-test-infra-bazel": "test-infra adopts bazel upgrades first", 543 //"ci-test-infra-bazel": "test-infra adopts bazel upgrades first", 544 "pull-test-infra-bazel-canary": "canary testing the latest bazel", 545 "pull-kubernetes-bazel-build-canary": "canary testing the latest bazel", 546 "pull-kubernetes-bazel-test-canary": "canary testing the latest bazel", 547 } 548 // auto insert pull-security-kubernetes-* 549 for job, reason := range pinnedJobs { 550 if strings.HasPrefix(job, "pull-kubernetes") { 551 pinnedJobs[strings.Replace(job, "pull-kubernetes", "pull-security-kubernetes", 1)] = reason 552 } 553 } 554 maxTag := "" 555 maxN := 0 556 for t, js := range tags { 557 n := len(js) 558 if n > maxN { 559 maxTag = t 560 maxN = n 561 } 562 } 563 for tag, js := range tags { 564 current := tag == maxTag 565 for _, j := range js { 566 if v, pinned := pinnedJobs[j]; !pinned && !current { 567 t.Errorf("%s: please add to the pinnedJobs list or else update tag to %s", j, maxTag) 568 } else if current && pinned { 569 t.Errorf("%s: please remove from the pinnedJobs list", j) 570 } else if !current && v == "" { 571 t.Errorf("%s: pinning to a non-default version requires a non-empty reason for doing so", j) 572 } 573 } 574 } 575 } 576 577 // checkLatestUsesImagePullPolicy returns an error if an image is a `latest-.*` tag, 578 // but doesn't have imagePullPolicy: Always 579 func checkLatestUsesImagePullPolicy(spec *v1.PodSpec) error { 580 for _, container := range spec.Containers { 581 if strings.Contains(container.Image, ":latest-") { 582 // If the job doesn't specify imagePullPolicy: Always, 583 // we aren't guaranteed to check for the latest version of the image. 584 if container.ImagePullPolicy != "Always" { 585 return errors.New("job uses latest- tag, but does not specify imagePullPolicy: Always") 586 } 587 } 588 if strings.HasSuffix(container.Image, ":latest") { 589 // The k8s default for `:latest` images is `imagePullPolicy: Always` 590 // Check the job didn't override 591 if container.ImagePullPolicy != "" && container.ImagePullPolicy != "Always" { 592 return errors.New("job uses latest tag, but does not specify imagePullPolicy: Always") 593 } 594 } 595 596 } 597 return nil 598 } 599 600 // Make sure jobs that use `latest-*` tags specify `imagePullPolicy: Always` 601 func TestLatestUsesImagePullPolicy(t *testing.T) { 602 for _, presubmit := range c.AllPresubmits(nil) { 603 if presubmit.Spec != nil { 604 if err := checkLatestUsesImagePullPolicy(presubmit.Spec); err != nil { 605 t.Errorf("Error in presubmit %q: %v", presubmit.Name, err) 606 } 607 } 608 } 609 610 for _, postsubmit := range c.AllPostsubmits(nil) { 611 if postsubmit.Spec != nil { 612 if err := checkLatestUsesImagePullPolicy(postsubmit.Spec); err != nil { 613 t.Errorf("Error in postsubmit %q: %v", postsubmit.Name, err) 614 } 615 } 616 } 617 618 for _, periodic := range c.AllPeriodics() { 619 if periodic.Spec != nil { 620 if err := checkLatestUsesImagePullPolicy(periodic.Spec); err != nil { 621 t.Errorf("Error in periodic %q: %v", periodic.Name, err) 622 } 623 } 624 } 625 } 626 627 // checkKubekinsPresets returns an error if a spec references to kubekins-e2e|bootstrap image, 628 // but doesn't use service preset or ssh preset 629 func checkKubekinsPresets(jobName string, spec *v1.PodSpec, labels, validLabels map[string]string) error { 630 service := true 631 ssh := true 632 633 for _, container := range spec.Containers { 634 if strings.Contains(container.Image, "kubekins-e2e") || strings.Contains(container.Image, "bootstrap") { 635 service = false 636 for key, val := range labels { 637 if (key == "preset-gke-alpha-service" || key == "preset-service-account" || key == "preset-istio-service") && val == "true" { 638 service = true 639 } 640 } 641 } 642 643 configJSONJobName := strings.Replace(jobName, "pull-kubernetes", "pull-security-kubernetes", -1) 644 if cj.ScenarioForJob(configJSONJobName) == "kubenetes_e2e" { 645 ssh = false 646 for key, val := range labels { 647 if (key == "preset-k8s-ssh" || key == "preset-aws-ssh") && val == "true" { 648 ssh = true 649 } 650 } 651 } 652 } 653 654 if !service { 655 return fmt.Errorf("cannot find service account preset") 656 } 657 658 if !ssh { 659 return fmt.Errorf("cannot find ssh preset") 660 } 661 662 for key, val := range labels { 663 if validVal, ok := validLabels[key]; !ok { 664 return fmt.Errorf("label %s is not a valid preset label", key) 665 } else if validVal != val { 666 return fmt.Errorf("label %s does not have valid value, have %s, expect %s", key, val, validVal) 667 } 668 } 669 670 return nil 671 } 672 673 // TestValidPresets makes sure all presets name starts with 'preset-', all job presets are valid, 674 // and jobs that uses kubekins-e2e image has the right service account preset 675 func TestValidPresets(t *testing.T) { 676 validLabels := map[string]string{} 677 for _, preset := range c.Presets { 678 for label, val := range preset.Labels { 679 if !strings.HasPrefix(label, "preset-") { 680 t.Errorf("Preset label %s - label name should start with 'preset-'", label) 681 } else if val != "true" { 682 t.Errorf("Preset label %s - label value should be true", label) 683 } 684 if _, ok := validLabels[label]; ok { 685 t.Errorf("Duplicated preset label : %s", label) 686 } else { 687 validLabels[label] = val 688 } 689 } 690 } 691 692 for _, presubmit := range c.AllPresubmits(nil) { 693 if presubmit.Spec != nil && !presubmit.Decorate { 694 if err := checkKubekinsPresets(presubmit.Name, presubmit.Spec, presubmit.Labels, validLabels); err != nil { 695 t.Errorf("Error in presubmit %q: %v", presubmit.Name, err) 696 } 697 } 698 } 699 700 for _, postsubmit := range c.AllPostsubmits(nil) { 701 if postsubmit.Spec != nil && !postsubmit.Decorate { 702 if err := checkKubekinsPresets(postsubmit.Name, postsubmit.Spec, postsubmit.Labels, validLabels); err != nil { 703 t.Errorf("Error in postsubmit %q: %v", postsubmit.Name, err) 704 } 705 } 706 } 707 708 for _, periodic := range c.AllPeriodics() { 709 if periodic.Spec != nil && !periodic.Decorate { 710 if err := checkKubekinsPresets(periodic.Name, periodic.Spec, periodic.Labels, validLabels); err != nil { 711 t.Errorf("Error in periodic %q: %v", periodic.Name, err) 712 } 713 } 714 } 715 } 716 717 func hasArg(wanted string, args []string) bool { 718 for _, arg := range args { 719 if strings.HasPrefix(arg, wanted) { 720 return true 721 } 722 } 723 724 return false 725 } 726 727 func checkScenarioArgs(jobName, imageName string, args []string) error { 728 // env files/scenarios validation 729 scenarioArgs := false 730 scenario := "" 731 for _, arg := range args { 732 if strings.HasPrefix(arg, "--env-file=") { 733 env := strings.TrimPrefix(arg, "--env-file=") 734 if _, err := os.Stat("../../../" + env); err != nil { 735 return fmt.Errorf("job %s: cannot stat env file %s", jobName, env) 736 } 737 } 738 739 if arg == "--" { 740 scenarioArgs = true 741 } 742 743 if strings.HasPrefix(arg, "--scenario=") { 744 scenario = strings.TrimPrefix(arg, "--scenario=") 745 } 746 } 747 748 if scenario == "" { 749 entry := jobName 750 if strings.HasPrefix(jobName, "pull-security-kubernetes") { 751 entry = strings.Replace(entry, "pull-security-kubernetes", "pull-kubernetes", -1) 752 } 753 754 if _, ok := cj[entry]; ok { 755 // the unit test is handled in jobs/config_test.py 756 return nil 757 } 758 759 if !scenarioArgs { 760 if strings.Contains(imageName, "kubekins-e2e") || 761 strings.Contains(imageName, "bootstrap") || 762 strings.Contains(imageName, "gcloud-in-go") { 763 return fmt.Errorf("job %s: image %s uses bootstrap.py and need scenario args", jobName, imageName) 764 } 765 return nil 766 } 767 768 } else { 769 if _, err := os.Stat(fmt.Sprintf("../../../scenarios/%s.py", scenario)); err != nil { 770 return fmt.Errorf("job %s: scenario %s does not exist: %s", jobName, scenario, err) 771 } 772 773 if !scenarioArgs { 774 return fmt.Errorf("job %s: set --scenario and will need scenario args", jobName) 775 } 776 } 777 778 // shared build args 779 use_shared_build_in_args := hasArg("--use-shared-build", args) 780 extract_in_args := hasArg("--extract", args) 781 build_in_args := hasArg("--build", args) 782 783 if use_shared_build_in_args && extract_in_args { 784 return fmt.Errorf("job %s: --use-shared-build and --extract cannot be combined", jobName) 785 } 786 787 if use_shared_build_in_args && build_in_args { 788 return fmt.Errorf("job %s: --use-shared-build and --build cannot be combined", jobName) 789 } 790 791 if scenario != "kubernetes_e2e" { 792 return nil 793 } 794 795 if hasArg("--provider=gke", args) { 796 if !hasArg("--deployment=gke", args) { 797 return fmt.Errorf("with --provider=gke, job %s must use --deployment=gke", jobName) 798 } 799 if hasArg("--gcp-master-image", args) { 800 return fmt.Errorf("with --provider=gke, job %s cannot use --gcp-master-image", jobName) 801 } 802 if hasArg("--gcp-nodes", args) { 803 return fmt.Errorf("with --provider=gke, job %s cannot use --gcp-nodes", jobName) 804 } 805 } 806 807 if hasArg("--deployment=gke", args) && !hasArg("--gcp-node-image", args) { 808 return fmt.Errorf("with --deployment=gke, job %s must use --gcp-node-image", jobName) 809 } 810 811 if hasArg("--env-file=jobs/pull-kubernetes-e2e.env", args) && hasArg("--check-leaked-resources", args) { 812 return fmt.Errorf("presubmit job %s should not check for resource leaks", jobName) 813 } 814 815 extracts := hasArg("--extract=", args) 816 sharedBuilds := hasArg("--use-shared-build", args) 817 nodeE2e := hasArg("--deployment=node", args) 818 localE2e := hasArg("--deployment=local", args) 819 builds := hasArg("--build", args) 820 821 if sharedBuilds && extracts { 822 return fmt.Errorf("e2e jobs %s cannot have --use-shared-build and --extract", jobName) 823 } 824 825 if !sharedBuilds && !extracts && !nodeE2e && !builds { 826 return fmt.Errorf("e2e jobs %s should get k8s build from one of --extract, --use-shared-build, --build or use --deployment=node", jobName) 827 } 828 829 expectedExtract := 1 830 if sharedBuilds || nodeE2e || builds { 831 expectedExtract = 0 832 } else if strings.Contains(jobName, "upgrade") || 833 strings.Contains(jobName, "skew") || 834 strings.Contains(jobName, "rollback") || 835 strings.Contains(jobName, "downgrade") || 836 jobName == "ci-kubernetes-e2e-gce-canary" { 837 expectedExtract = 2 838 } 839 840 numExtract := 0 841 for _, arg := range args { 842 if strings.HasPrefix(arg, "--extract=") { 843 numExtract++ 844 } 845 } 846 if numExtract != expectedExtract { 847 return fmt.Errorf("e2e jobs %s should have %d --extract flags, got %d", jobName, expectedExtract, numExtract) 848 } 849 850 if hasArg("--image-family", args) != hasArg("--image-project", args) { 851 return fmt.Errorf("e2e jobs %s should have both --image-family and --image-project, or none of them", jobName) 852 } 853 854 if strings.HasPrefix(jobName, "pull-kubernetes-") && 855 !nodeE2e && 856 !localE2e && 857 !strings.Contains(jobName, "kubeadm") { 858 stage := "gs://kubernetes-release-pull/ci/" + jobName 859 if strings.Contains(jobName, "gke") { 860 stage = "gs://kubernetes-release-dev/ci" 861 if !hasArg("--stage-suffix="+jobName, args) { 862 return fmt.Errorf("presubmit gke jobs %s - need to have --stage-suffix=%s", jobName, jobName) 863 } 864 } 865 866 if !sharedBuilds { 867 if !hasArg("--stage="+stage, args) { 868 return fmt.Errorf("presubmit jobs %s - need to stage to %s", jobName, stage) 869 } 870 } 871 } 872 873 return nil 874 } 875 876 // TestValidScenarioArgs makes sure all scenario args in job configs are valid 877 func TestValidScenarioArgs(t *testing.T) { 878 for _, job := range c.AllPresubmits(nil) { 879 if job.Spec != nil && !job.Decorate { 880 if err := checkScenarioArgs(job.Name, job.Spec.Containers[0].Image, job.Spec.Containers[0].Args); err != nil { 881 t.Errorf("Invalid Scenario Args : %s", err) 882 } 883 } 884 } 885 886 for _, job := range c.AllPostsubmits(nil) { 887 if job.Spec != nil && !job.Decorate { 888 if err := checkScenarioArgs(job.Name, job.Spec.Containers[0].Image, job.Spec.Containers[0].Args); err != nil { 889 t.Errorf("Invalid Scenario Args : %s", err) 890 } 891 } 892 } 893 894 for _, job := range c.AllPeriodics() { 895 if job.Spec != nil && !job.Decorate { 896 if err := checkScenarioArgs(job.Name, job.Spec.Containers[0].Image, job.Spec.Containers[0].Args); err != nil { 897 t.Errorf("Invalid Scenario Args : %s", err) 898 } 899 } 900 } 901 }