github.com/munnerz/test-infra@v0.0.0-20190108210205-ce3d181dc989/config/tests/jobs/jobs_test.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package tests
    18  
    19  // This file validates kubernetes's jobs configs.
    20  // See also prow/config/jobstests for generic job tests that
    21  // all deployments should consider using.
    22  
    23  import (
    24  	"bytes"
    25  	"errors"
    26  	"flag"
    27  	"fmt"
    28  	"os"
    29  	"path"
    30  	"strings"
    31  	"testing"
    32  	"time"
    33  
    34  	"k8s.io/api/core/v1"
    35  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    36  
    37  	cfg "k8s.io/test-infra/prow/config"
    38  	"k8s.io/test-infra/prow/kube"
    39  )
    40  
    41  var configPath = flag.String("config", "../../../prow/config.yaml", "Path to prow config")
    42  var jobConfigPath = flag.String("job-config", "../../jobs", "Path to prow job config")
    43  var gubernatorPath = flag.String("gubernator-path", "https://gubernator.k8s.io", "Path to linked gubernator")
    44  var bucket = flag.String("bucket", "kubernetes-jenkins", "Gcs bucket for log upload")
    45  var k8sProw = flag.Bool("k8s-prow", true, "If the config is for k8s prow cluster")
    46  
    47  // Loaded at TestMain.
    48  var c *cfg.Config
    49  
    50  func TestMain(m *testing.M) {
    51  	flag.Parse()
    52  	if *configPath == "" {
    53  		fmt.Println("--config must set")
    54  		os.Exit(1)
    55  	}
    56  
    57  	conf, err := cfg.Load(*configPath, *jobConfigPath)
    58  	if err != nil {
    59  		fmt.Printf("Could not load config: %v", err)
    60  		os.Exit(1)
    61  	}
    62  	c = conf
    63  
    64  	os.Exit(m.Run())
    65  }
    66  
    67  func TestReportTemplate(t *testing.T) {
    68  	var testcases = []struct {
    69  		org    string
    70  		repo   string
    71  		number int
    72  		suffix string
    73  	}{
    74  		{
    75  			org:    "o",
    76  			repo:   "r",
    77  			number: 4,
    78  			suffix: "o_r/4",
    79  		},
    80  		{
    81  			org:    "kubernetes",
    82  			repo:   "test-infra",
    83  			number: 123,
    84  			suffix: "test-infra/123",
    85  		},
    86  		{
    87  			org:    "kubernetes",
    88  			repo:   "kubernetes",
    89  			number: 123,
    90  			suffix: "123",
    91  		},
    92  		{
    93  			org:    "o",
    94  			repo:   "kubernetes",
    95  			number: 456,
    96  			suffix: "o_kubernetes/456",
    97  		},
    98  	}
    99  	for _, tc := range testcases {
   100  		var b bytes.Buffer
   101  		if err := c.Plank.ReportTemplate.Execute(&b, &kube.ProwJob{
   102  			Spec: kube.ProwJobSpec{
   103  				Refs: &kube.Refs{
   104  					Org:  tc.org,
   105  					Repo: tc.repo,
   106  					Pulls: []kube.Pull{
   107  						{
   108  							Number: tc.number,
   109  						},
   110  					},
   111  				},
   112  			},
   113  		}); err != nil {
   114  			t.Errorf("Error executing template: %v", err)
   115  			continue
   116  		}
   117  		expectedPath := *gubernatorPath + "/pr/" + tc.suffix
   118  		if !strings.Contains(b.String(), expectedPath) {
   119  			t.Errorf("Expected template to contain %s, but it didn't: %s", expectedPath, b.String())
   120  		}
   121  	}
   122  }
   123  
   124  func TestURLTemplate(t *testing.T) {
   125  	testcases := []struct {
   126  		name    string
   127  		jobType kube.ProwJobType
   128  		org     string
   129  		repo    string
   130  		job     string
   131  		build   string
   132  		expect  string
   133  		k8sOnly bool
   134  	}{
   135  		{
   136  			name:    "k8s presubmit",
   137  			jobType: kube.PresubmitJob,
   138  			org:     "kubernetes",
   139  			repo:    "kubernetes",
   140  			job:     "k8s-pre-1",
   141  			build:   "1",
   142  			expect:  *gubernatorPath + "/build/" + *bucket + "/pr-logs/pull/0/k8s-pre-1/1/",
   143  			k8sOnly: true,
   144  		},
   145  		{
   146  			name:    "k8s-security presubmit",
   147  			jobType: kube.PresubmitJob,
   148  			org:     "kubernetes-security",
   149  			repo:    "kubernetes",
   150  			job:     "k8s-pre-1",
   151  			build:   "1",
   152  			expect:  "https://console.cloud.google.com/storage/browser/kubernetes-security-prow/pr-logs/pull/kubernetes-security_kubernetes/0/k8s-pre-1/1/",
   153  			k8sOnly: true,
   154  		},
   155  		{
   156  			name:    "k8s/test-infra presubmit",
   157  			jobType: kube.PresubmitJob,
   158  			org:     "kubernetes",
   159  			repo:    "test-infra",
   160  			job:     "ti-pre-1",
   161  			build:   "1",
   162  			expect:  *gubernatorPath + "/build/" + *bucket + "/pr-logs/pull/test-infra/0/ti-pre-1/1/",
   163  			k8sOnly: true,
   164  		},
   165  		{
   166  			name:    "foo/k8s presubmit",
   167  			jobType: kube.PresubmitJob,
   168  			org:     "foo",
   169  			repo:    "kubernetes",
   170  			job:     "k8s-pre-1",
   171  			build:   "1",
   172  			expect:  *gubernatorPath + "/build/" + *bucket + "/pr-logs/pull/foo_kubernetes/0/k8s-pre-1/1/",
   173  		},
   174  		{
   175  			name:    "foo-bar presubmit",
   176  			jobType: kube.PresubmitJob,
   177  			org:     "foo",
   178  			repo:    "bar",
   179  			job:     "foo-pre-1",
   180  			build:   "1",
   181  			expect:  *gubernatorPath + "/build/" + *bucket + "/pr-logs/pull/foo_bar/0/foo-pre-1/1/",
   182  		},
   183  		{
   184  			name:    "k8s postsubmit",
   185  			jobType: kube.PostsubmitJob,
   186  			org:     "kubernetes",
   187  			repo:    "kubernetes",
   188  			job:     "k8s-post-1",
   189  			build:   "1",
   190  			expect:  *gubernatorPath + "/build/" + *bucket + "/logs/k8s-post-1/1/",
   191  		},
   192  		{
   193  			name:    "k8s periodic",
   194  			jobType: kube.PeriodicJob,
   195  			job:     "k8s-peri-1",
   196  			build:   "1",
   197  			expect:  *gubernatorPath + "/build/" + *bucket + "/logs/k8s-peri-1/1/",
   198  		},
   199  		{
   200  			name:    "empty periodic",
   201  			jobType: kube.PeriodicJob,
   202  			job:     "nan-peri-1",
   203  			build:   "1",
   204  			expect:  *gubernatorPath + "/build/" + *bucket + "/logs/nan-peri-1/1/",
   205  		},
   206  		{
   207  			name:    "k8s batch",
   208  			jobType: kube.BatchJob,
   209  			org:     "kubernetes",
   210  			repo:    "kubernetes",
   211  			job:     "k8s-batch-1",
   212  			build:   "1",
   213  			expect:  *gubernatorPath + "/build/" + *bucket + "/pr-logs/pull/batch/k8s-batch-1/1/",
   214  			k8sOnly: true,
   215  		},
   216  		{
   217  			name:    "foo bar batch",
   218  			jobType: kube.BatchJob,
   219  			org:     "foo",
   220  			repo:    "bar",
   221  			job:     "k8s-batch-1",
   222  			build:   "1",
   223  			expect:  *gubernatorPath + "/build/" + *bucket + "/pr-logs/pull/foo_bar/batch/k8s-batch-1/1/",
   224  		},
   225  	}
   226  
   227  	for _, tc := range testcases {
   228  		if !*k8sProw && tc.k8sOnly {
   229  			continue
   230  		}
   231  
   232  		var pj = kube.ProwJob{
   233  			ObjectMeta: metav1.ObjectMeta{Name: tc.name},
   234  			Spec: kube.ProwJobSpec{
   235  				Type: tc.jobType,
   236  				Job:  tc.job,
   237  			},
   238  			Status: kube.ProwJobStatus{
   239  				BuildID: tc.build,
   240  			},
   241  		}
   242  		if tc.jobType != kube.PeriodicJob {
   243  			pj.Spec.Refs = &kube.Refs{
   244  				Pulls: []kube.Pull{{}},
   245  				Org:   tc.org,
   246  				Repo:  tc.repo,
   247  			}
   248  		}
   249  
   250  		var b bytes.Buffer
   251  		if err := c.Plank.JobURLTemplate.Execute(&b, &pj); err != nil {
   252  			t.Fatalf("Error executing template: %v", err)
   253  		}
   254  		res := b.String()
   255  		if res != tc.expect {
   256  			t.Errorf("tc: %s, Expect URL: %s, got %s", tc.name, tc.expect, res)
   257  		}
   258  	}
   259  }
   260  
   261  func checkContext(t *testing.T, repo string, p cfg.Presubmit) {
   262  	if !p.SkipReport && p.Name != p.Context {
   263  		t.Errorf("Context does not match job name: %s in %s", p.Name, repo)
   264  	}
   265  	for _, c := range p.RunAfterSuccess {
   266  		checkContext(t, repo, c)
   267  	}
   268  }
   269  
   270  func TestContextMatches(t *testing.T) {
   271  	for repo, presubmits := range c.Presubmits {
   272  		for _, p := range presubmits {
   273  			checkContext(t, repo, p)
   274  		}
   275  	}
   276  }
   277  
   278  func checkRetest(t *testing.T, repo string, presubmits []cfg.Presubmit) {
   279  	for _, p := range presubmits {
   280  		expected := fmt.Sprintf("/test %s", p.Name)
   281  		if p.RerunCommand != expected {
   282  			t.Errorf("%s in %s rerun_command: %s != expected: %s", repo, p.Name, p.RerunCommand, expected)
   283  		}
   284  		checkRetest(t, repo, p.RunAfterSuccess)
   285  	}
   286  }
   287  
   288  func TestRetestMatchJobsName(t *testing.T) {
   289  	for repo, presubmits := range c.Presubmits {
   290  		checkRetest(t, repo, presubmits)
   291  	}
   292  }
   293  
   294  type SubmitQueueConfig struct {
   295  	// this is the only field we need for the tests below
   296  	RequiredRetestContexts string `json:"required-retest-contexts"`
   297  }
   298  
   299  func findRequired(t *testing.T, presubmits []cfg.Presubmit) []string {
   300  	var required []string
   301  	for _, p := range presubmits {
   302  		if !p.AlwaysRun {
   303  			continue
   304  		}
   305  		for _, r := range findRequired(t, p.RunAfterSuccess) {
   306  			required = append(required, r)
   307  		}
   308  		if p.SkipReport {
   309  			continue
   310  		}
   311  		required = append(required, p.Context)
   312  	}
   313  	return required
   314  }
   315  
   316  func TestTrustedJobs(t *testing.T) {
   317  	// TODO(fejta): allow each config/jobs/kubernetes/foo/foo-trusted.yaml
   318  	// that uses a foo-trusted cluster
   319  	const trusted = "test-infra-trusted"
   320  	trustedPath := path.Join(*jobConfigPath, "kubernetes", "test-infra", "test-infra-trusted.yaml")
   321  
   322  	// Presubmits may not use trusted clusters.
   323  	for _, pre := range c.AllPresubmits(nil) {
   324  		if pre.Cluster == trusted {
   325  			t.Errorf("%s: presubmits cannot use trusted clusters", pre.Name)
   326  		}
   327  	}
   328  
   329  	// Trusted postsubmits must be defined in trustedPath
   330  	for _, post := range c.AllPostsubmits(nil) {
   331  		if post.Cluster != trusted {
   332  			continue
   333  		}
   334  		if post.SourcePath != trustedPath {
   335  			t.Errorf("%s defined in %s may not run in trusted cluster", post.Name, post.SourcePath)
   336  		}
   337  	}
   338  
   339  	// Trusted periodics must be defined in trustedPath
   340  	for _, per := range c.AllPeriodics() {
   341  		if per.Cluster != trusted {
   342  			continue
   343  		}
   344  		if per.SourcePath != trustedPath {
   345  			t.Errorf("%s defined in %s may not run in trusted cluster", per.Name, per.SourcePath)
   346  		}
   347  	}
   348  }
   349  
   350  // Unit test jobs outside kubernetes-security do not use the security cluster
   351  // and that jobs inside kubernetes-security DO
   352  func TestConfigSecurityClusterRestricted(t *testing.T) {
   353  	for repo, jobs := range c.Presubmits {
   354  		if strings.HasPrefix(repo, "kubernetes-security/") {
   355  			for _, job := range jobs {
   356  				if job.Agent != "jenkins" && job.Cluster != "security" {
   357  					t.Fatalf("Jobs in kubernetes-security/* should use the security cluster! %s", job.Name)
   358  				}
   359  			}
   360  		} else {
   361  			for _, job := range jobs {
   362  				if job.Cluster == "security" {
   363  					t.Fatalf("Jobs not in kubernetes-security/* should not use the security cluster! %s", job.Name)
   364  				}
   365  			}
   366  		}
   367  	}
   368  	for repo, jobs := range c.Postsubmits {
   369  		if strings.HasPrefix(repo, "kubernetes-security/") {
   370  			for _, job := range jobs {
   371  				if job.Agent != "jenkins" && job.Cluster != "security" {
   372  					t.Fatalf("Jobs in kubernetes-security/* should use the security cluster! %s", job.Name)
   373  				}
   374  			}
   375  		} else {
   376  			for _, job := range jobs {
   377  				if job.Cluster == "security" {
   378  					t.Fatalf("Jobs not in kubernetes-security/* should not use the security cluster! %s", job.Name)
   379  				}
   380  			}
   381  		}
   382  	}
   383  	// TODO(bentheelder): this will need to be more complex if we ever add k-s periodic
   384  	for _, job := range c.AllPeriodics() {
   385  		if job.Cluster == "security" {
   386  			t.Fatalf("Jobs not in kubernetes-security/* should not use the security cluster! %s", job.Name)
   387  		}
   388  	}
   389  }
   390  
   391  // checkDockerSocketVolumes returns an error if any volume uses a hostpath
   392  // to the docker socket. we do not want to allow this
   393  func checkDockerSocketVolumes(volumes []v1.Volume) error {
   394  	for _, volume := range volumes {
   395  		if volume.HostPath != nil && volume.HostPath.Path == "/var/run/docker.sock" {
   396  			return errors.New("job uses HostPath with docker socket")
   397  		}
   398  	}
   399  	return nil
   400  }
   401  
   402  // Make sure jobs are not using the docker socket as a host path
   403  func TestJobDoesNotHaveDockerSocket(t *testing.T) {
   404  	for _, presubmit := range c.AllPresubmits(nil) {
   405  		if presubmit.Spec != nil {
   406  			if err := checkDockerSocketVolumes(presubmit.Spec.Volumes); err != nil {
   407  				t.Errorf("Error in presubmit: %v", err)
   408  			}
   409  		}
   410  	}
   411  
   412  	for _, postsubmit := range c.AllPostsubmits(nil) {
   413  		if postsubmit.Spec != nil {
   414  			if err := checkDockerSocketVolumes(postsubmit.Spec.Volumes); err != nil {
   415  				t.Errorf("Error in postsubmit: %v", err)
   416  			}
   417  		}
   418  	}
   419  
   420  	for _, periodic := range c.Periodics {
   421  		if periodic.Spec != nil {
   422  			if err := checkDockerSocketVolumes(periodic.Spec.Volumes); err != nil {
   423  				t.Errorf("Error in periodic: %v", err)
   424  			}
   425  		}
   426  	}
   427  }
   428  
   429  // checkLatestUsesImagePullPolicy returns an error if an image is a `latest-.*` tag,
   430  // but doesn't have imagePullPolicy: Always
   431  func checkLatestUsesImagePullPolicy(spec *v1.PodSpec) error {
   432  	for _, container := range spec.Containers {
   433  		if strings.Contains(container.Image, ":latest-") {
   434  			// If the job doesn't specify imagePullPolicy: Always,
   435  			// we aren't guaranteed to check for the latest version of the image.
   436  			if container.ImagePullPolicy != "Always" {
   437  				return errors.New("job uses latest- tag, but does not specify imagePullPolicy: Always")
   438  			}
   439  		}
   440  		if strings.HasSuffix(container.Image, ":latest") {
   441  			// The k8s default for `:latest` images is `imagePullPolicy: Always`
   442  			// Check the job didn't override
   443  			if container.ImagePullPolicy != "" && container.ImagePullPolicy != "Always" {
   444  				return errors.New("job uses latest tag, but does not specify imagePullPolicy: Always")
   445  			}
   446  		}
   447  
   448  	}
   449  	return nil
   450  }
   451  
   452  // Make sure jobs that use `latest-*` tags specify `imagePullPolicy: Always`
   453  func TestLatestUsesImagePullPolicy(t *testing.T) {
   454  	for _, presubmit := range c.AllPresubmits(nil) {
   455  		if presubmit.Spec != nil {
   456  			if err := checkLatestUsesImagePullPolicy(presubmit.Spec); err != nil {
   457  				t.Errorf("Error in presubmit %q: %v", presubmit.Name, err)
   458  			}
   459  		}
   460  	}
   461  
   462  	for _, postsubmit := range c.AllPostsubmits(nil) {
   463  		if postsubmit.Spec != nil {
   464  			if err := checkLatestUsesImagePullPolicy(postsubmit.Spec); err != nil {
   465  				t.Errorf("Error in postsubmit %q: %v", postsubmit.Name, err)
   466  			}
   467  		}
   468  	}
   469  
   470  	for _, periodic := range c.AllPeriodics() {
   471  		if periodic.Spec != nil {
   472  			if err := checkLatestUsesImagePullPolicy(periodic.Spec); err != nil {
   473  				t.Errorf("Error in periodic %q: %v", periodic.Name, err)
   474  			}
   475  		}
   476  	}
   477  }
   478  
   479  // checkKubekinsPresets returns an error if a spec references to kubekins-e2e|bootstrap image,
   480  // but doesn't use service preset or ssh preset
   481  func checkKubekinsPresets(jobName string, spec *v1.PodSpec, labels, validLabels map[string]string) error {
   482  	service := true
   483  	ssh := true
   484  
   485  	for _, container := range spec.Containers {
   486  		if strings.Contains(container.Image, "kubekins-e2e") || strings.Contains(container.Image, "bootstrap") {
   487  			service = false
   488  			for key, val := range labels {
   489  				if (key == "preset-gke-alpha-service" || key == "preset-service-account" || key == "preset-istio-service") && val == "true" {
   490  					service = true
   491  				}
   492  			}
   493  		}
   494  
   495  		scenario := ""
   496  		for _, arg := range container.Args {
   497  			if strings.HasPrefix(arg, "--scenario=") {
   498  				scenario = strings.TrimPrefix(arg, "--scenario=")
   499  			}
   500  		}
   501  
   502  		if scenario == "kubenetes_e2e" {
   503  			ssh = false
   504  			for key, val := range labels {
   505  				if (key == "preset-k8s-ssh" || key == "preset-aws-ssh") && val == "true" {
   506  					ssh = true
   507  				}
   508  			}
   509  		}
   510  	}
   511  
   512  	if !service {
   513  		return fmt.Errorf("cannot find service account preset")
   514  	}
   515  
   516  	if !ssh {
   517  		return fmt.Errorf("cannot find ssh preset")
   518  	}
   519  
   520  	for key, val := range labels {
   521  		if validVal, ok := validLabels[key]; !ok {
   522  			return fmt.Errorf("label %s is not a valid preset label", key)
   523  		} else if validVal != val {
   524  			return fmt.Errorf("label %s does not have valid value, have %s, expect %s", key, val, validVal)
   525  		}
   526  	}
   527  
   528  	return nil
   529  }
   530  
   531  // TestValidPresets makes sure all presets name starts with 'preset-', all job presets are valid,
   532  // and jobs that uses kubekins-e2e image has the right service account preset
   533  func TestValidPresets(t *testing.T) {
   534  	validLabels := map[string]string{}
   535  	for _, preset := range c.Presets {
   536  		for label, val := range preset.Labels {
   537  			if !strings.HasPrefix(label, "preset-") {
   538  				t.Errorf("Preset label %s - label name should start with 'preset-'", label)
   539  			} else if val != "true" {
   540  				t.Errorf("Preset label %s - label value should be true", label)
   541  			}
   542  			if _, ok := validLabels[label]; ok {
   543  				t.Errorf("Duplicated preset label : %s", label)
   544  			} else {
   545  				validLabels[label] = val
   546  			}
   547  		}
   548  	}
   549  
   550  	if !*k8sProw {
   551  		return
   552  	}
   553  
   554  	for _, presubmit := range c.AllPresubmits(nil) {
   555  		if presubmit.Spec != nil && !presubmit.Decorate {
   556  			if err := checkKubekinsPresets(presubmit.Name, presubmit.Spec, presubmit.Labels, validLabels); err != nil {
   557  				t.Errorf("Error in presubmit %q: %v", presubmit.Name, err)
   558  			}
   559  		}
   560  	}
   561  
   562  	for _, postsubmit := range c.AllPostsubmits(nil) {
   563  		if postsubmit.Spec != nil && !postsubmit.Decorate {
   564  			if err := checkKubekinsPresets(postsubmit.Name, postsubmit.Spec, postsubmit.Labels, validLabels); err != nil {
   565  				t.Errorf("Error in postsubmit %q: %v", postsubmit.Name, err)
   566  			}
   567  		}
   568  	}
   569  
   570  	for _, periodic := range c.AllPeriodics() {
   571  		if periodic.Spec != nil && !periodic.Decorate {
   572  			if err := checkKubekinsPresets(periodic.Name, periodic.Spec, periodic.Labels, validLabels); err != nil {
   573  				t.Errorf("Error in periodic %q: %v", periodic.Name, err)
   574  			}
   575  		}
   576  	}
   577  }
   578  
   579  func hasArg(wanted string, args []string) bool {
   580  	for _, arg := range args {
   581  		if strings.HasPrefix(arg, wanted) {
   582  			return true
   583  		}
   584  	}
   585  
   586  	return false
   587  }
   588  
   589  func checkScenarioArgs(jobName, imageName string, args []string) error {
   590  	// env files/scenarios validation
   591  	scenarioArgs := false
   592  	scenario := ""
   593  	for _, arg := range args {
   594  		if strings.HasPrefix(arg, "--env-file=") {
   595  			return fmt.Errorf("job %s: --env-file is deprecated, please migrate to presets %s", jobName, arg)
   596  		}
   597  
   598  		if arg == "--" {
   599  			scenarioArgs = true
   600  		}
   601  
   602  		if strings.HasPrefix(arg, "--scenario=") {
   603  			scenario = strings.TrimPrefix(arg, "--scenario=")
   604  		}
   605  	}
   606  
   607  	if scenario == "" {
   608  		entry := jobName
   609  		if strings.HasPrefix(jobName, "pull-security-kubernetes") {
   610  			entry = strings.Replace(entry, "pull-security-kubernetes", "pull-kubernetes", -1)
   611  		}
   612  
   613  		if !scenarioArgs {
   614  			if strings.Contains(imageName, "kubekins-e2e") ||
   615  				strings.Contains(imageName, "bootstrap") ||
   616  				strings.Contains(imageName, "gcloud-in-go") {
   617  				return fmt.Errorf("job %s: image %s uses bootstrap.py and need scenario args", jobName, imageName)
   618  			}
   619  			return nil
   620  		}
   621  
   622  	} else {
   623  		if _, err := os.Stat(fmt.Sprintf("../../../scenarios/%s.py", scenario)); err != nil {
   624  			return fmt.Errorf("job %s: scenario %s does not exist: %s", jobName, scenario, err)
   625  		}
   626  
   627  		if !scenarioArgs {
   628  			return fmt.Errorf("job %s: set --scenario=%s and will need scenario args", jobName, scenario)
   629  		}
   630  	}
   631  
   632  	// shared build args
   633  	use_shared_build_in_args := hasArg("--use-shared-build", args)
   634  	extract_in_args := hasArg("--extract", args)
   635  	build_in_args := hasArg("--build", args)
   636  
   637  	if use_shared_build_in_args && extract_in_args {
   638  		return fmt.Errorf("job %s: --use-shared-build and --extract cannot be combined", jobName)
   639  	}
   640  
   641  	if use_shared_build_in_args && build_in_args {
   642  		return fmt.Errorf("job %s: --use-shared-build and --build cannot be combined", jobName)
   643  	}
   644  
   645  	if scenario != "kubernetes_e2e" {
   646  		return nil
   647  	}
   648  
   649  	if hasArg("--provider=gke", args) {
   650  		if !hasArg("--deployment=gke", args) {
   651  			return fmt.Errorf("with --provider=gke, job %s must use --deployment=gke", jobName)
   652  		}
   653  		if hasArg("--gcp-master-image", args) {
   654  			return fmt.Errorf("with --provider=gke, job %s cannot use --gcp-master-image", jobName)
   655  		}
   656  		if hasArg("--gcp-nodes", args) {
   657  			return fmt.Errorf("with --provider=gke, job %s cannot use --gcp-nodes", jobName)
   658  		}
   659  	}
   660  
   661  	if hasArg("--deployment=gke", args) && !hasArg("--gcp-node-image", args) {
   662  		return fmt.Errorf("with --deployment=gke, job %s must use --gcp-node-image", jobName)
   663  	}
   664  
   665  	if hasArg("--stage=gs://kubernetes-release-pull", args) && hasArg("--check-leaked-resources", args) {
   666  		return fmt.Errorf("presubmit job %s should not check for resource leaks", jobName)
   667  	}
   668  
   669  	extracts := hasArg("--extract=", args)
   670  	sharedBuilds := hasArg("--use-shared-build", args)
   671  	nodeE2e := hasArg("--deployment=node", args)
   672  	localE2e := hasArg("--deployment=local", args)
   673  	builds := hasArg("--build", args)
   674  
   675  	if sharedBuilds && extracts {
   676  		return fmt.Errorf("e2e jobs %s cannot have --use-shared-build and --extract", jobName)
   677  	}
   678  
   679  	if !sharedBuilds && !extracts && !nodeE2e && !builds {
   680  		return fmt.Errorf("e2e jobs %s should get k8s build from one of --extract, --use-shared-build, --build or use --deployment=node", jobName)
   681  	}
   682  
   683  	expectedExtract := 1
   684  	if sharedBuilds || nodeE2e {
   685  		expectedExtract = 0
   686  	} else if builds && !extracts {
   687  		expectedExtract = 0
   688  	} else if strings.Contains(jobName, "ingress") {
   689  		expectedExtract = 1
   690  	} else if strings.Contains(jobName, "upgrade") ||
   691  		strings.Contains(jobName, "skew") ||
   692  		strings.Contains(jobName, "rollback") ||
   693  		strings.Contains(jobName, "downgrade") ||
   694  		jobName == "ci-kubernetes-e2e-gce-canary" {
   695  		expectedExtract = 2
   696  	}
   697  
   698  	numExtract := 0
   699  	for _, arg := range args {
   700  		if strings.HasPrefix(arg, "--extract=") {
   701  			numExtract++
   702  		}
   703  	}
   704  	if numExtract != expectedExtract {
   705  		return fmt.Errorf("e2e jobs %s should have %d --extract flags, got %d", jobName, expectedExtract, numExtract)
   706  	}
   707  
   708  	if hasArg("--image-family", args) != hasArg("--image-project", args) {
   709  		return fmt.Errorf("e2e jobs %s should have both --image-family and --image-project, or none of them", jobName)
   710  	}
   711  
   712  	if strings.HasPrefix(jobName, "pull-kubernetes-") &&
   713  		!nodeE2e &&
   714  		!localE2e &&
   715  		!strings.Contains(jobName, "kubeadm") {
   716  		stage := "gs://kubernetes-release-pull/ci/" + jobName
   717  		if strings.Contains(jobName, "gke") {
   718  			stage = "gs://kubernetes-release-dev/ci"
   719  			if !hasArg("--stage-suffix="+jobName, args) {
   720  				return fmt.Errorf("presubmit gke jobs %s - need to have --stage-suffix=%s", jobName, jobName)
   721  			}
   722  		}
   723  
   724  		if !sharedBuilds {
   725  			if !hasArg("--stage="+stage, args) {
   726  				return fmt.Errorf("presubmit jobs %s - need to stage to %s", jobName, stage)
   727  			}
   728  		}
   729  	}
   730  
   731  	// test_args should not have double slashes on ginkgo flags
   732  	for _, arg := range args {
   733  		ginkgo_args := ""
   734  		if strings.HasPrefix(arg, "--test_args=") {
   735  			split := strings.SplitN(arg, "=", 2)
   736  			ginkgo_args = split[1]
   737  		} else if strings.HasPrefix(arg, "--upgrade_args=") {
   738  			split := strings.SplitN(arg, "=", 2)
   739  			ginkgo_args = split[1]
   740  		}
   741  
   742  		if strings.Contains(ginkgo_args, "\\\\") {
   743  			return fmt.Errorf("jobs %s - double slashes in ginkgo args should be single slash now : arg %s", jobName, arg)
   744  		}
   745  	}
   746  
   747  	// timeout should be valid
   748  	bootstrap_timeout := 0 * time.Minute
   749  	kubetest_timeout := 0 * time.Minute
   750  	var err error
   751  	kubetest := false
   752  	for _, arg := range args {
   753  		if strings.HasPrefix(arg, "--timeout=") {
   754  			timeout := strings.SplitN(arg, "=", 2)[1]
   755  			if kubetest {
   756  				if kubetest_timeout, err = time.ParseDuration(timeout); err != nil {
   757  					return fmt.Errorf("jobs %s - invalid kubetest timeout : arg %s", jobName, arg)
   758  				}
   759  			} else {
   760  				if bootstrap_timeout, err = time.ParseDuration(timeout + "m"); err != nil {
   761  					return fmt.Errorf("jobs %s - invalid bootstrap timeout : arg %s", jobName, arg)
   762  				}
   763  			}
   764  		}
   765  
   766  		if arg == "--" {
   767  			kubetest = true
   768  		}
   769  	}
   770  
   771  	if bootstrap_timeout.Minutes()-kubetest_timeout.Minutes() < 20.0 {
   772  		return fmt.Errorf(
   773  			"jobs %s - kubetest timeout(%v), bootstrap timeout(%v): bootstrap timeout need to be 20min more than kubetest timeout!", jobName, kubetest_timeout, bootstrap_timeout)
   774  	}
   775  
   776  	return nil
   777  }
   778  
   779  // TestValidScenarioArgs makes sure all scenario args in job configs are valid
   780  func TestValidScenarioArgs(t *testing.T) {
   781  	for _, job := range c.AllPresubmits(nil) {
   782  		if job.Spec != nil && !job.Decorate {
   783  			if err := checkScenarioArgs(job.Name, job.Spec.Containers[0].Image, job.Spec.Containers[0].Args); err != nil {
   784  				t.Errorf("Invalid Scenario Args : %s", err)
   785  			}
   786  		}
   787  	}
   788  
   789  	for _, job := range c.AllPostsubmits(nil) {
   790  		if job.Spec != nil && !job.Decorate {
   791  			if err := checkScenarioArgs(job.Name, job.Spec.Containers[0].Image, job.Spec.Containers[0].Args); err != nil {
   792  				t.Errorf("Invalid Scenario Args : %s", err)
   793  			}
   794  		}
   795  	}
   796  
   797  	for _, job := range c.AllPeriodics() {
   798  		if job.Spec != nil && !job.Decorate {
   799  			if err := checkScenarioArgs(job.Name, job.Spec.Containers[0].Image, job.Spec.Containers[0].Args); err != nil {
   800  				t.Errorf("Invalid Scenario Args : %s", err)
   801  			}
   802  		}
   803  	}
   804  }