k8s.io/test-infra@v0.0.0-20240520184403-27c6b4c223d8/config/tests/jobs/jobs_test.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package tests
    18  
    19  // This file validates kubernetes's jobs configs.
    20  // See also from_prow_test.go for generic job tests that
    21  // all deployments should consider using.
    22  
    23  import (
    24  	"bytes"
    25  	"errors"
    26  	"flag"
    27  	"fmt"
    28  	"os"
    29  	"path"
    30  	"path/filepath"
    31  	"regexp"
    32  	"sort"
    33  	"strings"
    34  	"testing"
    35  	"time"
    36  
    37  	coreapi "k8s.io/api/core/v1"
    38  	"k8s.io/apimachinery/pkg/api/resource"
    39  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    40  	"k8s.io/apimachinery/pkg/util/sets"
    41  	"k8s.io/utils/strings/slices"
    42  
    43  	prowapi "sigs.k8s.io/prow/pkg/apis/prowjobs/v1"
    44  	cfg "sigs.k8s.io/prow/pkg/config"
    45  )
    46  
    47  var configPath = flag.String("config", "../../../config/prow/config.yaml", "Path to prow config")
    48  var jobConfigPath = flag.String("job-config", "../../jobs", "Path to prow job config")
    49  var deckPath = flag.String("deck-path", "https://prow.k8s.io", "Path to deck")
    50  var bucket = flag.String("bucket", "kubernetes-jenkins", "Gcs bucket for log upload")
    51  var k8sProw = flag.Bool("k8s-prow", true, "If the config is for k8s prow cluster")
    52  
    53  // Loaded at TestMain.
    54  var c *cfg.Config
    55  
    56  // TODO: (rjsadow) figure out a better way to incorporate all/any community clusters
    57  func isCritical(clusterName string) bool {
    58  	return clusterName == "k8s-infra-prow-build" || clusterName == "eks-prow-build-cluster"
    59  }
    60  
    61  func TestMain(m *testing.M) {
    62  	flag.Parse()
    63  	if *configPath == "" {
    64  		fmt.Println("--config must set")
    65  		os.Exit(1)
    66  	}
    67  
    68  	conf, err := cfg.Load(*configPath, *jobConfigPath, nil, "")
    69  	if err != nil {
    70  		fmt.Printf("Could not load config: %v", err)
    71  		os.Exit(1)
    72  	}
    73  	c = conf
    74  
    75  	os.Exit(m.Run())
    76  }
    77  
    78  func TestReportTemplate(t *testing.T) {
    79  	var testcases = []struct {
    80  		org    string
    81  		repo   string
    82  		number int
    83  		suffix string
    84  	}{
    85  		{
    86  			org:    "o",
    87  			repo:   "r",
    88  			number: 4,
    89  			suffix: "?org=o&repo=r&pr=4",
    90  		},
    91  		{
    92  			org:    "kubernetes",
    93  			repo:   "test-infra",
    94  			number: 123,
    95  			suffix: "?org=kubernetes&repo=test-infra&pr=123",
    96  		},
    97  		{
    98  			org:    "kubernetes",
    99  			repo:   "kubernetes",
   100  			number: 123,
   101  			suffix: "?org=kubernetes&repo=kubernetes&pr=123",
   102  		},
   103  		{
   104  			org:    "o",
   105  			repo:   "kubernetes",
   106  			number: 456,
   107  			suffix: "?org=o&repo=kubernetes&pr=456",
   108  		},
   109  	}
   110  	for _, tc := range testcases {
   111  		var b bytes.Buffer
   112  		refs := &prowapi.Refs{
   113  			Org:  tc.org,
   114  			Repo: tc.repo,
   115  			Pulls: []prowapi.Pull{
   116  				{
   117  					Number: tc.number,
   118  				},
   119  			},
   120  		}
   121  
   122  		reportTemplate := c.Plank.ReportTemplateForRepo(refs)
   123  		if err := reportTemplate.Execute(&b, &prowapi.ProwJob{Spec: prowapi.ProwJobSpec{Refs: refs}}); err != nil {
   124  			t.Errorf("Error executing template: %v", err)
   125  			continue
   126  		}
   127  		expectedPath := *deckPath + "/pr-history" + tc.suffix
   128  		if !strings.Contains(b.String(), expectedPath) {
   129  			t.Errorf("Expected template to contain %s, but it didn't: %s", expectedPath, b.String())
   130  		}
   131  	}
   132  }
   133  
   134  func TestURLTemplate(t *testing.T) {
   135  	testcases := []struct {
   136  		name    string
   137  		jobType prowapi.ProwJobType
   138  		org     string
   139  		repo    string
   140  		job     string
   141  		build   string
   142  		expect  string
   143  		k8sOnly bool
   144  	}{
   145  		{
   146  			name:    "k8s presubmit",
   147  			jobType: prowapi.PresubmitJob,
   148  			org:     "kubernetes",
   149  			repo:    "kubernetes",
   150  			job:     "k8s-pre-1",
   151  			build:   "1",
   152  			expect:  *deckPath + "/view/gs/" + *bucket + "/pr-logs/pull/0/k8s-pre-1/1/",
   153  			k8sOnly: true,
   154  		},
   155  		{
   156  			name:    "k8s/test-infra presubmit",
   157  			jobType: prowapi.PresubmitJob,
   158  			org:     "kubernetes",
   159  			repo:    "test-infra",
   160  			job:     "ti-pre-1",
   161  			build:   "1",
   162  			expect:  *deckPath + "/view/gs/" + *bucket + "/pr-logs/pull/test-infra/0/ti-pre-1/1/",
   163  			k8sOnly: true,
   164  		},
   165  		{
   166  			name:    "foo/k8s presubmit",
   167  			jobType: prowapi.PresubmitJob,
   168  			org:     "foo",
   169  			repo:    "kubernetes",
   170  			job:     "k8s-pre-1",
   171  			build:   "1",
   172  			expect:  *deckPath + "/view/gs/" + *bucket + "/pr-logs/pull/foo_kubernetes/0/k8s-pre-1/1/",
   173  		},
   174  		{
   175  			name:    "foo-bar presubmit",
   176  			jobType: prowapi.PresubmitJob,
   177  			org:     "foo",
   178  			repo:    "bar",
   179  			job:     "foo-pre-1",
   180  			build:   "1",
   181  			expect:  *deckPath + "/view/gs/" + *bucket + "/pr-logs/pull/foo_bar/0/foo-pre-1/1/",
   182  		},
   183  		{
   184  			name:    "k8s postsubmit",
   185  			jobType: prowapi.PostsubmitJob,
   186  			org:     "kubernetes",
   187  			repo:    "kubernetes",
   188  			job:     "k8s-post-1",
   189  			build:   "1",
   190  			expect:  *deckPath + "/view/gs/" + *bucket + "/logs/k8s-post-1/1/",
   191  		},
   192  		{
   193  			name:    "k8s periodic",
   194  			jobType: prowapi.PeriodicJob,
   195  			job:     "k8s-peri-1",
   196  			build:   "1",
   197  			expect:  *deckPath + "/view/gs/" + *bucket + "/logs/k8s-peri-1/1/",
   198  		},
   199  		{
   200  			name:    "empty periodic",
   201  			jobType: prowapi.PeriodicJob,
   202  			job:     "nan-peri-1",
   203  			build:   "1",
   204  			expect:  *deckPath + "/view/gs/" + *bucket + "/logs/nan-peri-1/1/",
   205  		},
   206  		{
   207  			name:    "k8s batch",
   208  			jobType: prowapi.BatchJob,
   209  			org:     "kubernetes",
   210  			repo:    "kubernetes",
   211  			job:     "k8s-batch-1",
   212  			build:   "1",
   213  			expect:  *deckPath + "/view/gs/" + *bucket + "/pr-logs/pull/batch/k8s-batch-1/1/",
   214  			k8sOnly: true,
   215  		},
   216  		{
   217  			name:    "foo bar batch",
   218  			jobType: prowapi.BatchJob,
   219  			org:     "foo",
   220  			repo:    "bar",
   221  			job:     "k8s-batch-1",
   222  			build:   "1",
   223  			expect:  *deckPath + "/view/gs/" + *bucket + "/pr-logs/pull/foo_bar/batch/k8s-batch-1/1/",
   224  		},
   225  	}
   226  
   227  	for _, tc := range testcases {
   228  		if !*k8sProw && tc.k8sOnly {
   229  			continue
   230  		}
   231  
   232  		var pj = prowapi.ProwJob{
   233  			ObjectMeta: metav1.ObjectMeta{Name: tc.name},
   234  			Spec: prowapi.ProwJobSpec{
   235  				Type: tc.jobType,
   236  				Job:  tc.job,
   237  			},
   238  			Status: prowapi.ProwJobStatus{
   239  				BuildID: tc.build,
   240  			},
   241  		}
   242  		if tc.jobType != prowapi.PeriodicJob {
   243  			pj.Spec.Refs = &prowapi.Refs{
   244  				Pulls: []prowapi.Pull{{}},
   245  				Org:   tc.org,
   246  				Repo:  tc.repo,
   247  			}
   248  		}
   249  
   250  		var b bytes.Buffer
   251  		if err := c.Plank.JobURLTemplate.Execute(&b, &pj); err != nil {
   252  			t.Fatalf("Error executing template: %v", err)
   253  		}
   254  		res := b.String()
   255  		if res != tc.expect {
   256  			t.Errorf("tc: %s, Expect URL: %s, got %s", tc.name, tc.expect, res)
   257  		}
   258  	}
   259  }
   260  
   261  func checkContext(t *testing.T, repo string, p cfg.Presubmit) {
   262  	if !p.SkipReport && p.Name != p.Context {
   263  		t.Errorf("Context does not match job name: %s in %s", p.Name, repo)
   264  	}
   265  }
   266  
   267  func TestContextMatches(t *testing.T) {
   268  	for repo, presubmits := range c.PresubmitsStatic {
   269  		for _, p := range presubmits {
   270  			checkContext(t, repo, p)
   271  		}
   272  	}
   273  }
   274  
   275  func checkRetest(t *testing.T, repo string, presubmits []cfg.Presubmit) {
   276  	for _, p := range presubmits {
   277  		expected := fmt.Sprintf("/test %s", p.Name)
   278  		if p.RerunCommand != expected {
   279  			t.Errorf("%s in %s rerun_command: %s != expected: %s", repo, p.Name, p.RerunCommand, expected)
   280  		}
   281  	}
   282  }
   283  
   284  func TestRetestMatchJobsName(t *testing.T) {
   285  	for repo, presubmits := range c.PresubmitsStatic {
   286  		checkRetest(t, repo, presubmits)
   287  	}
   288  }
   289  
   290  // Enforce conventions for jobs that run in test-infra-trusted cluster
   291  func TestTrustedJobs(t *testing.T) {
   292  	// TODO(fejta): allow each config/jobs/kubernetes/foo/foo-trusted.yaml
   293  	// that uses a foo-trusted cluster
   294  	const trusted = "test-infra-trusted"
   295  	trustedPaths := sets.Set[string]{}
   296  	// This file contains most of the jobs that run in the trusted cluster:
   297  	trustedPaths.Insert(path.Join(*jobConfigPath, "kubernetes", "test-infra", "test-infra-trusted.yaml"))
   298  	// The Prow image publishing postsubmits also run in the trusted cluster:
   299  	trustedPaths.Insert(path.Join(*jobConfigPath, "kubernetes-sigs", "prow", "prow-postsubmits.yaml"))
   300  
   301  	// Presubmits may not use trusted clusters.
   302  	for _, pre := range c.AllStaticPresubmits(nil) {
   303  		if pre.Cluster == trusted {
   304  			t.Errorf("%s: presubmits cannot use trusted clusters", pre.Name)
   305  		}
   306  	}
   307  
   308  	// Trusted postsubmits must be defined in trustedPath
   309  	for _, post := range c.AllStaticPostsubmits(nil) {
   310  		if post.Cluster == trusted && !trustedPaths.Has(post.SourcePath) {
   311  			t.Errorf("%s defined in %s may not run in trusted cluster", post.Name, post.SourcePath)
   312  		}
   313  	}
   314  
   315  	// Trusted periodics must be defined in trustedPath
   316  	for _, per := range c.AllPeriodics() {
   317  		if per.Cluster == trusted && !trustedPaths.Has(per.SourcePath) {
   318  			t.Errorf("%s defined in %s may not run in trusted cluster", per.Name, per.SourcePath)
   319  		}
   320  	}
   321  }
   322  
   323  // Enforce conventions for jobs that run in k8s-infra-prow-build-trusted cluster
   324  func TestK8sInfraTrusted(t *testing.T) {
   325  	jobsToFix := 0
   326  	const trusted = "k8s-infra-prow-build-trusted"
   327  	trustedPath := path.Join(*jobConfigPath, "kubernetes", "sig-k8s-infra", "trusted") + "/"
   328  	imagePushingDir := path.Join(*jobConfigPath, "image-pushing") + "/"
   329  
   330  	errs := []error{}
   331  	// Presubmits may not use this cluster
   332  	for _, pre := range c.AllStaticPresubmits(nil) {
   333  		if pre.Cluster == trusted {
   334  			jobsToFix++
   335  			errs = append(errs, fmt.Errorf("%s: presubmits may not run in trusted cluster: %s", pre.Name, trusted))
   336  		}
   337  	}
   338  
   339  	// Postsubmits and periodics:
   340  	// - jobs in config/jobs/image-pushing must run in cluster: k8s-infra-prow-build-trusted
   341  	// - jobs in config/jobs/kubernetes/sig-k8s-infra/trusted must run in cluster: k8s-infra-prow-build-trusted
   342  	// - jobs defined anywhere else may not run in cluster: k8s-infra-prow-build-trusted
   343  	jobs := []cfg.JobBase{}
   344  	for _, job := range c.AllStaticPostsubmits(nil) {
   345  		jobs = append(jobs, job.JobBase)
   346  	}
   347  	for _, job := range c.AllPeriodics() {
   348  		jobs = append(jobs, job.JobBase)
   349  	}
   350  	for _, job := range jobs {
   351  		isTrustedCluster := job.Cluster == trusted
   352  		isTrustedPath := strings.HasPrefix(job.SourcePath, imagePushingDir) || strings.HasPrefix(job.SourcePath, trustedPath)
   353  		if isTrustedPath && !isTrustedCluster {
   354  			jobsToFix++
   355  			errs = append(errs, fmt.Errorf("%s defined in %s must run in cluster: %s", job.Name, job.SourcePath, trusted))
   356  		} else if isTrustedCluster && !isTrustedPath {
   357  			jobsToFix++
   358  			errs = append(errs, fmt.Errorf("%s defined in %s may not run in cluster: %s", job.Name, job.SourcePath, trusted))
   359  		}
   360  	}
   361  	for _, err := range errs {
   362  		t.Errorf("%v", err)
   363  	}
   364  	t.Logf("summary: %4d/%4d jobs fail to meet k8s-infra-prow-build-trusted CI policy", jobsToFix, len(jobs))
   365  }
   366  
   367  // Jobs in config/jobs/image-pushing must
   368  // - run on cluster: k8s-infra-prow-build-trusted
   369  // - use a pinned version of gcr.io/k8s-staging-test-infra/image-builder
   370  // - have sig-k8s-infra-gcb in their testgrid-dashboards annotation
   371  func TestImagePushingJobs(t *testing.T) {
   372  	jobsToFix := 0
   373  	const trusted = "k8s-infra-prow-build-trusted"
   374  	imagePushingDir := path.Join(*jobConfigPath, "image-pushing") + "/"
   375  	jobs := staticJobsMatchingAll(func(job cfg.JobBase) bool {
   376  		return strings.HasPrefix(job.SourcePath, imagePushingDir)
   377  	})
   378  
   379  	for _, job := range jobs {
   380  		errs := []error{}
   381  		// Only consider Pods
   382  		if job.Spec == nil {
   383  			continue
   384  		}
   385  		// Only consider jobs in config/jobs/image-pushing/...
   386  		if !strings.HasPrefix(job.SourcePath, imagePushingDir) {
   387  			continue
   388  		}
   389  		if err := validateImagePushingImage(job.Spec); err != nil {
   390  			errs = append(errs, fmt.Errorf("%s defined in %s %w", job.Name, job.SourcePath, err))
   391  		}
   392  		if job.Cluster != trusted {
   393  			errs = append(errs, fmt.Errorf("%s defined in %s must have cluster: %v, got: %v", job.Name, job.SourcePath, trusted, job.Cluster))
   394  		}
   395  		dashboardsString, ok := job.Annotations["testgrid-dashboards"]
   396  		if !ok {
   397  			errs = append(errs, fmt.Errorf("%s defined in %s must have annotation: %v, not found", job.Name, job.SourcePath, "testgrid-dashboards"))
   398  		}
   399  		expectedDashboard := "sig-k8s-infra-gcb"
   400  		foundDashboard := false
   401  		for _, dashboardName := range strings.Split(dashboardsString, ",") {
   402  			dashboardName = strings.TrimSpace(dashboardName)
   403  			if dashboardName == expectedDashboard {
   404  				foundDashboard = true
   405  				break
   406  			}
   407  		}
   408  		if !foundDashboard {
   409  			errs = append(errs, fmt.Errorf("%s defined in %s must have %s in testgrid-dashboards annotation, got: %s", job.Name, job.SourcePath, expectedDashboard, dashboardsString))
   410  		}
   411  		if len(errs) > 0 {
   412  			jobsToFix++
   413  			for _, err := range errs {
   414  				t.Errorf("%v", err)
   415  			}
   416  		}
   417  	}
   418  	t.Logf("summary: %4d/%4d jobs in config/jobs/image-pushing fail to meet CI policy", jobsToFix, len(jobs))
   419  }
   420  
   421  func validateImagePushingImage(spec *coreapi.PodSpec) error {
   422  	const imagePushingImage = "gcr.io/k8s-staging-test-infra/image-builder"
   423  
   424  	for _, c := range spec.Containers {
   425  		if !strings.HasPrefix(c.Image, imagePushingImage+":") {
   426  			return fmt.Errorf("must use a pinned version of %s", imagePushingImage)
   427  		}
   428  	}
   429  
   430  	return nil
   431  }
   432  
   433  // Restrict the use of specific secrets to certain jobs in config/jobs/<org>/<project>/<basename>.yaml
   434  func TestTrustedJobSecretsRestricted(t *testing.T) {
   435  	type labels map[string]string
   436  
   437  	getSecretsFromPreset := func(labels labels) sets.Set[string] {
   438  		secrets := sets.New[string]()
   439  		for _, preset := range c.Presets {
   440  			match := true
   441  			for k, v1 := range preset.Labels {
   442  				// check if a given list of labels matches all labels from this preset
   443  				if v2, ok := labels[k]; !ok || v1 != v2 {
   444  					match = false
   445  					break
   446  				}
   447  			}
   448  			if match {
   449  				for _, v := range preset.Volumes {
   450  					if v.VolumeSource.Secret != nil {
   451  						secrets.Insert(v.VolumeSource.Secret.SecretName)
   452  					}
   453  				}
   454  				for _, e := range preset.Env {
   455  					if e.ValueFrom != nil && e.ValueFrom.SecretKeyRef != nil {
   456  						secrets.Insert(e.ValueFrom.SecretKeyRef.Name)
   457  					}
   458  				}
   459  			}
   460  		}
   461  		return secrets
   462  	}
   463  
   464  	secretsRestricted := map[string]struct {
   465  		secrets            sets.Set[string]
   466  		isTrusted          bool
   467  		allowedInPresubmit bool
   468  	}{
   469  		"kubernetes-sigs/sig-storage-local-static-provisioner": {secrets: sets.New[string]("sig-storage-local-static-provisioner-pusher"), isTrusted: true},
   470  		"kubernetes-csi/csi-driver-nfs":                        {secrets: getSecretsFromPreset(labels{"preset-azure-cred": "true"}), allowedInPresubmit: true},
   471  		"kubernetes-csi/csi-driver-smb":                        {secrets: getSecretsFromPreset(labels{"preset-azure-cred": "true"}), allowedInPresubmit: true},
   472  		"kubernetes-sigs/azuredisk-csi-driver":                 {secrets: getSecretsFromPreset(labels{"preset-azure-cred": "true"}), allowedInPresubmit: true},
   473  		"kubernetes-sigs/azurefile-csi-driver":                 {secrets: getSecretsFromPreset(labels{"preset-azure-cred": "true"}), allowedInPresubmit: true},
   474  		"kubernetes-sigs/blob-csi-driver":                      {secrets: getSecretsFromPreset(labels{"preset-azure-cred": "true"}), allowedInPresubmit: true},
   475  		"kubernetes-sigs/cloud-provider-azure":                 {secrets: getSecretsFromPreset(labels{"preset-azure-cred": "true"}), allowedInPresubmit: true},
   476  		"kubernetes-sigs/image-builder":                        {secrets: getSecretsFromPreset(labels{"preset-azure-cred": "true"}), allowedInPresubmit: true},
   477  		"kubernetes-sigs/secrets-store-csi-driver":             {secrets: getSecretsFromPreset(labels{"preset-azure-cred": "true"}), allowedInPresubmit: true},
   478  		"kubernetes-sigs/sig-windows":                          {secrets: getSecretsFromPreset(labels{"preset-azure-cred": "true"}), allowedInPresubmit: true},
   479  		"kubernetes/sig-cloud-provider":                        {secrets: getSecretsFromPreset(labels{"preset-azure-cred": "true"}), allowedInPresubmit: true},
   480  		"kubernetes/sig-network":                               {secrets: getSecretsFromPreset(labels{"preset-azure-cred": "true"}), allowedInPresubmit: true},
   481  		"kubernetes/sig-release":                               {secrets: getSecretsFromPreset(labels{"preset-azure-cred": "true"}), allowedInPresubmit: true},
   482  		"kubernetes-sigs/cluster-api-provider-azure":           {secrets: getSecretsFromPreset(labels{"preset-azure-cred-only": "true"}), allowedInPresubmit: true},
   483  		"kubernetes/sig-autoscaling":                           {secrets: getSecretsFromPreset(labels{"preset-azure-cred-only": "true"}), allowedInPresubmit: true},
   484  	}
   485  	allSecrets := sets.Set[string]{}
   486  	for _, s := range secretsRestricted {
   487  		allSecrets.Insert(sets.List(s.secrets)...)
   488  	}
   489  
   490  	isSecretUsedByContainer := func(secret string, container coreapi.Container) bool {
   491  		if container.EnvFrom == nil {
   492  			return false
   493  		}
   494  		for _, envFrom := range container.EnvFrom {
   495  			if envFrom.SecretRef != nil && envFrom.SecretRef.Name == secret {
   496  				return true
   497  			}
   498  		}
   499  		return false
   500  	}
   501  
   502  	isSecretUsed := func(secret string, job cfg.JobBase) bool {
   503  		if job.Spec == nil {
   504  			return false
   505  		}
   506  		if job.Spec.Volumes != nil {
   507  			for _, v := range job.Spec.Volumes {
   508  				if v.VolumeSource.Secret != nil && v.VolumeSource.Secret.SecretName == secret {
   509  					return true
   510  				}
   511  			}
   512  		}
   513  		if job.Spec.Containers != nil {
   514  			for _, c := range job.Spec.Containers {
   515  				if isSecretUsedByContainer(secret, c) {
   516  					return true
   517  				}
   518  			}
   519  		}
   520  		if job.Spec.InitContainers != nil {
   521  			for _, c := range job.Spec.InitContainers {
   522  				if isSecretUsedByContainer(secret, c) {
   523  					return true
   524  				}
   525  			}
   526  		}
   527  		// iterate all presets because they can also reference secrets
   528  		secretsFromPreset := getSecretsFromPreset(labels(job.Labels))
   529  		return secretsFromPreset.Has(secret)
   530  	}
   531  
   532  	getJobOrgProjectBasename := func(path string) (string, string, string) {
   533  		cleanPath := strings.Trim(strings.TrimPrefix(path, *jobConfigPath), string(filepath.Separator))
   534  		seps := strings.Split(cleanPath, string(filepath.Separator))
   535  		if len(seps) <= 2 {
   536  			return "", "", ""
   537  		}
   538  		return seps[0], seps[1], seps[2]
   539  	}
   540  
   541  	// Most presubmit jobs should not use any restricted secrets.
   542  	for _, job := range c.AllStaticPresubmits(nil) {
   543  		if job.Cluster != prowapi.DefaultClusterAlias {
   544  			// check against default public cluster only
   545  			continue
   546  		}
   547  		// check if this presubmit job is allowed to use the secret
   548  		org, project, _ := getJobOrgProjectBasename(job.SourcePath)
   549  		s, ok := secretsRestricted[filepath.Join(org, project)]
   550  		allowedInPresubmit := ok && s.allowedInPresubmit
   551  		for _, secret := range sets.List(allSecrets) {
   552  			if isSecretUsed(secret, job.JobBase) && !allowedInPresubmit {
   553  				t.Errorf("%q defined in %q may not use secret %q in %q cluster", job.Name, job.SourcePath, secret, job.Cluster)
   554  			}
   555  		}
   556  	}
   557  
   558  	secretsCanUseByPath := func(path string) sets.Set[string] {
   559  		org, project, basename := getJobOrgProjectBasename(path)
   560  		s, ok := secretsRestricted[filepath.Join(org, project)]
   561  		if !ok || (s.isTrusted && basename != fmt.Sprintf("%s-trusted.yaml", project)) {
   562  			return nil
   563  		}
   564  		return s.secrets
   565  	}
   566  
   567  	// Postsubmit/periodic jobs defined in
   568  	// config/jobs/<org>/<project>/<project>-trusted.yaml can and only can use restricted
   569  	// secrets for <org>/repo>.
   570  	jobs := []cfg.JobBase{}
   571  	for _, job := range c.AllStaticPostsubmits(nil) {
   572  		jobs = append(jobs, job.JobBase)
   573  	}
   574  	for _, job := range c.AllPeriodics() {
   575  		jobs = append(jobs, job.JobBase)
   576  	}
   577  	for _, job := range jobs {
   578  		if job.Cluster != prowapi.DefaultClusterAlias {
   579  			// check against default public cluster only
   580  			continue
   581  		}
   582  		secretsCanUse := secretsCanUseByPath(job.SourcePath)
   583  		for _, secret := range sets.List(allSecrets) {
   584  			if secretsCanUse != nil && secretsCanUse.Has(secret) {
   585  				t.Logf("allow secret %v for job %s defined in %s", secret, job.Name, job.SourcePath)
   586  				continue
   587  			}
   588  			if isSecretUsed(secret, job) {
   589  				t.Errorf("%q defined in %q may not use secret %q in %q cluster", job.Name, job.SourcePath, secret, job.Cluster)
   590  			}
   591  		}
   592  	}
   593  }
   594  
   595  // checkDockerSocketVolumes returns an error if any volume uses a hostpath
   596  // to the docker socket. we do not want to allow this
   597  func checkDockerSocketVolumes(volumes []coreapi.Volume) error {
   598  	for _, volume := range volumes {
   599  		if volume.HostPath != nil && volume.HostPath.Path == "/var/run/docker.sock" {
   600  			return errors.New("job uses HostPath with docker socket")
   601  		}
   602  	}
   603  	return nil
   604  }
   605  
   606  // Make sure jobs are not using the docker socket as a host path
   607  func TestJobDoesNotHaveDockerSocket(t *testing.T) {
   608  	for _, presubmit := range c.AllStaticPresubmits(nil) {
   609  		if presubmit.Spec != nil {
   610  			if err := checkDockerSocketVolumes(presubmit.Spec.Volumes); err != nil {
   611  				t.Errorf("Error in presubmit: %v", err)
   612  			}
   613  		}
   614  	}
   615  
   616  	for _, postsubmit := range c.AllStaticPostsubmits(nil) {
   617  		if postsubmit.Spec != nil {
   618  			if err := checkDockerSocketVolumes(postsubmit.Spec.Volumes); err != nil {
   619  				t.Errorf("Error in postsubmit: %v", err)
   620  			}
   621  		}
   622  	}
   623  
   624  	for _, periodic := range c.Periodics {
   625  		if periodic.Spec != nil {
   626  			if err := checkDockerSocketVolumes(periodic.Spec.Volumes); err != nil {
   627  				t.Errorf("Error in periodic: %v", err)
   628  			}
   629  		}
   630  	}
   631  }
   632  
   633  // checkLatestUsesImagePullPolicy returns an error if an image is a `latest-.*` tag,
   634  // but doesn't have imagePullPolicy: Always
   635  func checkLatestUsesImagePullPolicy(spec *coreapi.PodSpec) error {
   636  	for _, container := range spec.Containers {
   637  		if strings.Contains(container.Image, ":latest-") {
   638  			// If the job doesn't specify imagePullPolicy: Always,
   639  			// we aren't guaranteed to check for the latest version of the image.
   640  			if container.ImagePullPolicy != "Always" {
   641  				return errors.New("job uses latest- tag, but does not specify imagePullPolicy: Always")
   642  			}
   643  		}
   644  		if strings.HasSuffix(container.Image, ":latest") {
   645  			// The k8s default for `:latest` images is `imagePullPolicy: Always`
   646  			// Check the job didn't override
   647  			if container.ImagePullPolicy != "" && container.ImagePullPolicy != "Always" {
   648  				return errors.New("job uses latest tag, but does not specify imagePullPolicy: Always")
   649  			}
   650  		}
   651  
   652  	}
   653  	return nil
   654  }
   655  
   656  // Make sure jobs that use `latest-*` tags specify `imagePullPolicy: Always`
   657  func TestLatestUsesImagePullPolicy(t *testing.T) {
   658  	for _, presubmit := range c.AllStaticPresubmits(nil) {
   659  		if presubmit.Spec != nil {
   660  			if err := checkLatestUsesImagePullPolicy(presubmit.Spec); err != nil {
   661  				t.Errorf("Error in presubmit %q: %v", presubmit.Name, err)
   662  			}
   663  		}
   664  	}
   665  
   666  	for _, postsubmit := range c.AllStaticPostsubmits(nil) {
   667  		if postsubmit.Spec != nil {
   668  			if err := checkLatestUsesImagePullPolicy(postsubmit.Spec); err != nil {
   669  				t.Errorf("Error in postsubmit %q: %v", postsubmit.Name, err)
   670  			}
   671  		}
   672  	}
   673  
   674  	for _, periodic := range c.AllPeriodics() {
   675  		if periodic.Spec != nil {
   676  			if err := checkLatestUsesImagePullPolicy(periodic.Spec); err != nil {
   677  				t.Errorf("Error in periodic %q: %v", periodic.Name, err)
   678  			}
   679  		}
   680  	}
   681  }
   682  
   683  // checkKubekinsPresets returns an error if a spec references to kubekins-e2e|bootstrap image,
   684  // but doesn't use service preset or ssh preset
   685  func checkKubekinsPresets(jobName string, spec *coreapi.PodSpec, labels map[string]string, validLabels map[string]bool) error {
   686  	service := true
   687  	ssh := true
   688  
   689  	for _, container := range spec.Containers {
   690  		if strings.Contains(container.Image, "kubekins-e2e") || strings.Contains(container.Image, "bootstrap") {
   691  			service = false
   692  			for key, val := range labels {
   693  				if key == "preset-service-account" && val == "true" {
   694  					service = true
   695  				}
   696  			}
   697  		}
   698  
   699  		scenario := ""
   700  		for _, arg := range container.Args {
   701  			if strings.HasPrefix(arg, "--scenario=") {
   702  				scenario = strings.TrimPrefix(arg, "--scenario=")
   703  			}
   704  		}
   705  
   706  		if scenario == "kubernetes_e2e" {
   707  			ssh = false
   708  			for key, val := range labels {
   709  				if (key == "preset-k8s-ssh" || key == "preset-aws-ssh") && val == "true" {
   710  					ssh = true
   711  				}
   712  			}
   713  		}
   714  	}
   715  
   716  	if !service {
   717  		return fmt.Errorf("cannot find service account preset")
   718  	}
   719  
   720  	if !ssh {
   721  		return fmt.Errorf("cannot find ssh preset")
   722  	}
   723  
   724  	for key, val := range labels {
   725  		pair := key + ":" + val
   726  		if validVal, ok := validLabels[pair]; !ok || !validVal {
   727  			return fmt.Errorf("key-value pair %s is not found in list of valid presets list", pair)
   728  		}
   729  	}
   730  
   731  	return nil
   732  }
   733  
   734  // TestValidPresets makes sure all presets name starts with 'preset-', all job presets are valid,
   735  // and jobs that uses kubekins-e2e image has the right service account preset
   736  func TestValidPresets(t *testing.T) {
   737  	validLabels := map[string]bool{}
   738  	for _, preset := range c.Presets {
   739  		for label, val := range preset.Labels {
   740  			if !strings.HasPrefix(label, "preset-") {
   741  				t.Errorf("Preset label %s - label name should start with 'preset-'", label)
   742  			}
   743  			pair := label + ":" + val
   744  			if _, ok := validLabels[pair]; ok {
   745  				t.Errorf("Duplicated preset 'label:value' pair : %s", pair)
   746  			} else {
   747  				validLabels[pair] = true
   748  			}
   749  		}
   750  	}
   751  
   752  	if !*k8sProw {
   753  		return
   754  	}
   755  
   756  	for _, presubmit := range c.AllStaticPresubmits(nil) {
   757  		if presubmit.Spec != nil && !*presubmit.Decorate {
   758  			if err := checkKubekinsPresets(presubmit.Name, presubmit.Spec, presubmit.Labels, validLabels); err != nil {
   759  				t.Errorf("Error in presubmit %q: %v", presubmit.Name, err)
   760  			}
   761  		}
   762  	}
   763  
   764  	for _, postsubmit := range c.AllStaticPostsubmits(nil) {
   765  		if postsubmit.Spec != nil && !*postsubmit.Decorate {
   766  			if err := checkKubekinsPresets(postsubmit.Name, postsubmit.Spec, postsubmit.Labels, validLabels); err != nil {
   767  				t.Errorf("Error in postsubmit %q: %v", postsubmit.Name, err)
   768  			}
   769  		}
   770  	}
   771  
   772  	for _, periodic := range c.AllPeriodics() {
   773  		if periodic.Spec != nil && !*periodic.Decorate {
   774  			if err := checkKubekinsPresets(periodic.Name, periodic.Spec, periodic.Labels, validLabels); err != nil {
   775  				t.Errorf("Error in periodic %q: %v", periodic.Name, err)
   776  			}
   777  		}
   778  	}
   779  }
   780  
   781  func hasArg(wanted string, args []string) bool {
   782  	for _, arg := range args {
   783  		if strings.HasPrefix(arg, wanted) {
   784  			return true
   785  		}
   786  	}
   787  
   788  	return false
   789  }
   790  
   791  func checkBootstrapImage(jobName, imageName string) error {
   792  	if strings.Contains(imageName, "bootstrap") {
   793  		return fmt.Errorf("job %s: image %s has been decommissioned", jobName, imageName)
   794  	}
   795  	return nil
   796  }
   797  
   798  func checkScenarioArgs(jobName, imageName string, args []string) error {
   799  	// env files/scenarios validation
   800  	scenarioArgs := false
   801  	scenario := ""
   802  	for _, arg := range args {
   803  		if strings.HasPrefix(arg, "--env-file=") {
   804  			return fmt.Errorf("job %s: --env-file is deprecated, please migrate to presets %s", jobName, arg)
   805  		}
   806  
   807  		if arg == "--" {
   808  			scenarioArgs = true
   809  		}
   810  
   811  		if strings.HasPrefix(arg, "--scenario=") {
   812  			scenario = strings.TrimPrefix(arg, "--scenario=")
   813  		}
   814  	}
   815  
   816  	if scenario != "" {
   817  		return fmt.Errorf("job %s: scenario (%s) based bootstrap.py jobs are not supported",
   818  			jobName, scenario)
   819  	}
   820  	if scenarioArgs {
   821  		return fmt.Errorf("job %s: scenario based bootstrap.py jobs are not supported", jobName)
   822  	}
   823  
   824  	// shared build args
   825  	useSharedBuildInArgs := hasArg("--use-shared-build", args)
   826  	extractInArgs := hasArg("--extract", args)
   827  	buildInArgs := hasArg("--build", args)
   828  
   829  	if useSharedBuildInArgs && extractInArgs {
   830  		return fmt.Errorf("job %s: --use-shared-build and --extract cannot be combined", jobName)
   831  	}
   832  
   833  	if useSharedBuildInArgs && buildInArgs {
   834  		return fmt.Errorf("job %s: --use-shared-build and --build cannot be combined", jobName)
   835  	}
   836  
   837  	if scenario != "kubernetes_e2e" {
   838  		return nil
   839  	}
   840  
   841  	if hasArg("--provider=gke", args) {
   842  		if !hasArg("--deployment=gke", args) {
   843  			return fmt.Errorf("with --provider=gke, job %s must use --deployment=gke", jobName)
   844  		}
   845  		if hasArg("--gcp-master-image", args) {
   846  			return fmt.Errorf("with --provider=gke, job %s cannot use --gcp-master-image", jobName)
   847  		}
   848  		if hasArg("--gcp-nodes", args) {
   849  			return fmt.Errorf("with --provider=gke, job %s cannot use --gcp-nodes", jobName)
   850  		}
   851  	}
   852  
   853  	if hasArg("--deployment=gke", args) && !hasArg("--gcp-node-image", args) {
   854  		return fmt.Errorf("with --deployment=gke, job %s must use --gcp-node-image", jobName)
   855  	}
   856  
   857  	if hasArg("--stage=gs://kubernetes-release-pull", args) && hasArg("--check-leaked-resources", args) {
   858  		return fmt.Errorf("presubmit job %s should not check for resource leaks", jobName)
   859  	}
   860  
   861  	extracts := hasArg("--extract=", args)
   862  	sharedBuilds := hasArg("--use-shared-build", args)
   863  	nodeE2e := hasArg("--deployment=node", args)
   864  	builds := hasArg("--build", args)
   865  
   866  	if sharedBuilds && extracts {
   867  		return fmt.Errorf("e2e jobs %s cannot have --use-shared-build and --extract", jobName)
   868  	}
   869  
   870  	if !sharedBuilds && !extracts && !nodeE2e && !builds {
   871  		return fmt.Errorf("e2e jobs %s should get k8s build from one of --extract, --use-shared-build, --build or use --deployment=node", jobName)
   872  	}
   873  
   874  	expectedExtract := 1
   875  	if sharedBuilds || nodeE2e {
   876  		expectedExtract = 0
   877  	} else if builds && !extracts {
   878  		expectedExtract = 0
   879  	} else if strings.Contains(jobName, "ingress") {
   880  		expectedExtract = 1
   881  	} else if strings.Contains(jobName, "upgrade") ||
   882  		strings.Contains(jobName, "skew") ||
   883  		strings.Contains(jobName, "rollback") ||
   884  		strings.Contains(jobName, "downgrade") ||
   885  		jobName == "ci-kubernetes-e2e-gce-canary" {
   886  		expectedExtract = 2
   887  	}
   888  
   889  	numExtract := 0
   890  	for _, arg := range args {
   891  		if strings.HasPrefix(arg, "--extract=") {
   892  			numExtract++
   893  		}
   894  	}
   895  	if numExtract != expectedExtract {
   896  		return fmt.Errorf("e2e jobs %s should have %d --extract flags, got %d", jobName, expectedExtract, numExtract)
   897  	}
   898  
   899  	if hasArg("--image-family", args) != hasArg("--image-project", args) {
   900  		return fmt.Errorf("e2e jobs %s should have both --image-family and --image-project, or none of them", jobName)
   901  	}
   902  
   903  	// test_args should not have double slashes on ginkgo flags
   904  	for _, arg := range args {
   905  		ginkgoArgs := ""
   906  		if strings.HasPrefix(arg, "--test_args=") {
   907  			split := strings.SplitN(arg, "=", 2)
   908  			ginkgoArgs = split[1]
   909  		} else if strings.HasPrefix(arg, "--upgrade_args=") {
   910  			split := strings.SplitN(arg, "=", 2)
   911  			ginkgoArgs = split[1]
   912  		}
   913  
   914  		if strings.Contains(ginkgoArgs, "\\\\") {
   915  			return fmt.Errorf("jobs %s - double slashes in ginkgo args should be single slash now : arg %s", jobName, arg)
   916  		}
   917  	}
   918  
   919  	// timeout should be valid
   920  	bootstrapTimeout := 0 * time.Minute
   921  	kubetestTimeout := 0 * time.Minute
   922  	var err error
   923  	kubetest := false
   924  	for _, arg := range args {
   925  		if strings.HasPrefix(arg, "--timeout=") {
   926  			timeout := strings.SplitN(arg, "=", 2)[1]
   927  			if kubetest {
   928  				if kubetestTimeout, err = time.ParseDuration(timeout); err != nil {
   929  					return fmt.Errorf("jobs %s - invalid kubetest timeout : arg %s", jobName, arg)
   930  				}
   931  			} else {
   932  				if bootstrapTimeout, err = time.ParseDuration(timeout + "m"); err != nil {
   933  					return fmt.Errorf("jobs %s - invalid bootstrap timeout : arg %s", jobName, arg)
   934  				}
   935  			}
   936  		}
   937  
   938  		if arg == "--" {
   939  			kubetest = true
   940  		}
   941  	}
   942  
   943  	if bootstrapTimeout.Minutes()-kubetestTimeout.Minutes() < 20.0 {
   944  		return fmt.Errorf(
   945  			"jobs %s - kubetest timeout(%v), bootstrap timeout(%v): bootstrap timeout need to be 20min more than kubetest timeout!", jobName, kubetestTimeout, bootstrapTimeout)
   946  	}
   947  
   948  	return nil
   949  }
   950  
   951  func TestPreSubmitPathAlias(t *testing.T) {
   952  	for _, job := range c.AllStaticPresubmits([]string{"kubernetes/kubernetes"}) {
   953  		if job.PathAlias != "k8s.io/kubernetes" {
   954  			t.Errorf("Invalid PathAlias (%s) in job %s for kubernetes/kubernetes repository", job.Name, job.PathAlias)
   955  		}
   956  	}
   957  }
   958  
   959  // TestValidScenarioArgs makes sure all scenario args in job configs are valid
   960  func TestValidScenarioArgs(t *testing.T) {
   961  	for _, job := range c.AllStaticPresubmits(nil) {
   962  		if job.Spec != nil && !*job.Decorate {
   963  			if err := checkScenarioArgs(job.Name, job.Spec.Containers[0].Image, job.Spec.Containers[0].Args); err != nil {
   964  				t.Errorf("Invalid Scenario Args : %s", err)
   965  			}
   966  		}
   967  		if err := checkBootstrapImage(job.Name, job.Spec.Containers[0].Image); err != nil {
   968  			t.Errorf("Invalid image : %s", err)
   969  		}
   970  	}
   971  
   972  	for _, job := range c.AllStaticPostsubmits(nil) {
   973  		if job.Spec != nil && !*job.Decorate {
   974  			if err := checkScenarioArgs(job.Name, job.Spec.Containers[0].Image, job.Spec.Containers[0].Args); err != nil {
   975  				t.Errorf("Invalid Scenario Args : %s", err)
   976  			}
   977  		}
   978  		if err := checkBootstrapImage(job.Name, job.Spec.Containers[0].Image); err != nil {
   979  			t.Errorf("Invalid image : %s", err)
   980  		}
   981  	}
   982  
   983  	for _, job := range c.AllPeriodics() {
   984  		if job.Spec != nil && !*job.Decorate {
   985  			if err := checkScenarioArgs(job.Name, job.Spec.Containers[0].Image, job.Spec.Containers[0].Args); err != nil {
   986  				t.Errorf("Invalid Scenario Args : %s", err)
   987  			}
   988  		}
   989  		if err := checkBootstrapImage(job.Name, job.Spec.Containers[0].Image); err != nil {
   990  			t.Errorf("Invalid image : %s", err)
   991  		}
   992  	}
   993  }
   994  
   995  type jobBasePredicate func(job cfg.JobBase) bool
   996  
   997  func allStaticJobs() []cfg.JobBase {
   998  	jobs := []cfg.JobBase{}
   999  	for _, job := range c.AllStaticPresubmits(nil) {
  1000  		jobs = append(jobs, job.JobBase)
  1001  	}
  1002  	for _, job := range c.AllStaticPostsubmits(nil) {
  1003  		jobs = append(jobs, job.JobBase)
  1004  	}
  1005  	for _, job := range c.AllPeriodics() {
  1006  		jobs = append(jobs, job.JobBase)
  1007  	}
  1008  	sort.Slice(jobs, func(i, j int) bool {
  1009  		return jobs[i].Name < jobs[j].Name
  1010  	})
  1011  	return jobs
  1012  }
  1013  
  1014  func staticJobsMatchingAll(predicates ...jobBasePredicate) []cfg.JobBase {
  1015  	jobs := allStaticJobs()
  1016  	matchingJobs := []cfg.JobBase{}
  1017  	for _, job := range jobs {
  1018  		matched := true
  1019  		for _, p := range predicates {
  1020  			if !p(job) {
  1021  				matched = false
  1022  				break
  1023  			}
  1024  		}
  1025  		if matched {
  1026  			matchingJobs = append(matchingJobs, job)
  1027  		}
  1028  	}
  1029  	return matchingJobs
  1030  }
  1031  
  1032  func verifyPodQOSGuaranteed(spec *coreapi.PodSpec, required bool) (errs []error) {
  1033  	should := "should"
  1034  	if required {
  1035  		should = "must"
  1036  	}
  1037  	resourceNames := []coreapi.ResourceName{
  1038  		coreapi.ResourceCPU,
  1039  		coreapi.ResourceMemory,
  1040  	}
  1041  	zero := resource.MustParse("0")
  1042  	for _, c := range spec.Containers {
  1043  		for _, r := range resourceNames {
  1044  			limit, ok := c.Resources.Limits[r]
  1045  			if !ok {
  1046  				errs = append(errs, fmt.Errorf("container '%v' %v have resources.limits[%v] specified", c.Name, should, r))
  1047  			}
  1048  			request, ok := c.Resources.Requests[r]
  1049  			if !ok {
  1050  				errs = append(errs, fmt.Errorf("container '%v' %v have resources.requests[%v] specified", c.Name, should, r))
  1051  			}
  1052  			if limit.Cmp(zero) == 0 {
  1053  				errs = append(errs, fmt.Errorf("container '%v' resources.limits[%v] %v be non-zero", c.Name, r, should))
  1054  			} else if limit.Cmp(request) != 0 {
  1055  				errs = append(errs, fmt.Errorf("container '%v' resources.limits[%v] (%v) %v match request (%v)", c.Name, r, limit.String(), should, request.String()))
  1056  			}
  1057  		}
  1058  	}
  1059  	return errs
  1060  }
  1061  
  1062  // A job is merge-blocking if it:
  1063  // - is not optional
  1064  // - reports (aka does not skip reporting)
  1065  // - always runs OR runs if some path changed
  1066  func isMergeBlocking(job cfg.Presubmit) bool {
  1067  	return !job.Optional && !job.SkipReport && (job.AlwaysRun || job.RunIfChanged != "" || job.SkipIfOnlyChanged != "")
  1068  }
  1069  
  1070  func isKubernetesReleaseBlocking(job cfg.JobBase) bool {
  1071  	re := regexp.MustCompile(`sig-release-(1.[0-9]{2}|master)-blocking`)
  1072  	dashboards, ok := job.Annotations["testgrid-dashboards"]
  1073  	if !ok {
  1074  		return false
  1075  	}
  1076  	return re.MatchString(dashboards)
  1077  }
  1078  
  1079  func TestKubernetesMergeBlockingJobsCIPolicy(t *testing.T) {
  1080  	jobsToFix := 0
  1081  	repo := "kubernetes/kubernetes"
  1082  	jobs := c.AllStaticPresubmits([]string{repo})
  1083  	sort.Slice(jobs, func(i, j int) bool {
  1084  		return jobs[i].Name < jobs[j].Name
  1085  	})
  1086  	for _, job := range jobs {
  1087  		// Only consider Pods that are merge-blocking
  1088  		if job.Spec == nil || !isMergeBlocking(job) {
  1089  			continue
  1090  		}
  1091  		// job Pod must qualify for Guaranteed QoS
  1092  		errs := verifyPodQOSGuaranteed(job.Spec, true)
  1093  		if !isCritical(job.Cluster) {
  1094  			errs = append(errs, fmt.Errorf("must run in cluster: k8s-infra-prow-build or eks-prow-build-cluster, found: %v", job.Cluster))
  1095  		}
  1096  		branches := job.Branches
  1097  		if len(errs) > 0 {
  1098  			jobsToFix++
  1099  		}
  1100  		for _, err := range errs {
  1101  			t.Errorf("%v (%v): %v", job.Name, branches, err)
  1102  		}
  1103  	}
  1104  	t.Logf("summary: %4d/%4d jobs fail to meet kubernetes/kubernetes merge-blocking CI policy", jobsToFix, len(jobs))
  1105  }
  1106  
  1107  func TestClusterName(t *testing.T) {
  1108  	jobsToFix := 0
  1109  	jobs := allStaticJobs()
  1110  	for _, job := range jobs {
  1111  		// Useful for identifiying how many jobs are running a specific cluster by omitting from this list
  1112  		validClusters := []string{"default", "test-infra-trusted", "k8s-infra-kops-prow-build", "k8s-infra-prow-build", "k8s-infra-prow-build-trusted", "eks-prow-build-cluster"}
  1113  		if !slices.Contains(validClusters, job.Cluster) || job.Cluster == "" {
  1114  			err := fmt.Errorf("must run in one of these clusters: %v, found: %v", validClusters, job.Cluster)
  1115  			t.Errorf("%v: %v", job.Name, err)
  1116  			jobsToFix++
  1117  		}
  1118  
  1119  	}
  1120  	t.Logf("summary: %4d/%4d jobs fail to meet sig-k8s-infra cluster name policy", jobsToFix, len(jobs))
  1121  }
  1122  func TestKubernetesReleaseBlockingJobsCIPolicy(t *testing.T) {
  1123  	jobsToFix := 0
  1124  	jobs := allStaticJobs()
  1125  	for _, job := range jobs {
  1126  		// Only consider Pods that are release-blocking
  1127  		if job.Spec == nil || !isKubernetesReleaseBlocking(job) {
  1128  			continue
  1129  		}
  1130  		// job Pod must qualify for Guaranteed QoS
  1131  		errs := verifyPodQOSGuaranteed(job.Spec, true)
  1132  		if !isCritical(job.Cluster) {
  1133  			errs = append(errs, fmt.Errorf("must run in cluster: k8s-infra-prow-build or eks-prow-build-cluster, found: %v", job.Cluster))
  1134  		}
  1135  		if len(errs) > 0 {
  1136  			jobsToFix++
  1137  		}
  1138  		for _, err := range errs {
  1139  			t.Errorf("%v: %v", job.Name, err)
  1140  		}
  1141  	}
  1142  	t.Logf("summary: %4d/%4d jobs fail to meet kubernetes/kubernetes release-blocking CI policy", jobsToFix, len(jobs))
  1143  }
  1144  
  1145  func TestK8sInfraProwBuildJobsCIPolicy(t *testing.T) {
  1146  	jobsToFix := 0
  1147  	jobs := allStaticJobs()
  1148  	for _, job := range jobs {
  1149  		if job.Spec == nil || !isCritical(job.Cluster) {
  1150  			continue
  1151  		}
  1152  		// job Pod must qualify for Guaranteed QoS
  1153  		errs := verifyPodQOSGuaranteed(job.Spec, true)
  1154  		if len(errs) > 0 {
  1155  			jobsToFix++
  1156  		}
  1157  		for _, err := range errs {
  1158  			t.Errorf("%v: %v", job.Name, err)
  1159  		}
  1160  	}
  1161  	t.Logf("summary: %4d/%4d jobs fail to meet k8s-infra-prow-build CI policy", jobsToFix, len(jobs))
  1162  }
  1163  
  1164  // Fast builds take 20-30m, cross builds take 90m-2h. We want to pick up builds
  1165  // containing the latest merged PRs as soon as possible for the in-development release
  1166  func TestSigReleaseMasterBlockingOrInformingJobsMustUseFastBuilds(t *testing.T) {
  1167  	jobsToFix := 0
  1168  	jobs := allStaticJobs()
  1169  	for _, job := range jobs {
  1170  		dashboards, ok := job.Annotations["testgrid-dashboards"]
  1171  		if !ok || !strings.Contains(dashboards, "sig-release-master-blocking") || !strings.Contains(dashboards, "sig-release-master-informing") {
  1172  			continue
  1173  		}
  1174  		errs := []error{}
  1175  		extract := ""
  1176  		for _, arg := range job.Spec.Containers[0].Args {
  1177  			if strings.HasPrefix(arg, "--extract=") {
  1178  				extract = strings.TrimPrefix(arg, "--extract=")
  1179  				if extract != "ci/fast/latest-fast" {
  1180  					errs = append(errs, fmt.Errorf("release-master-blocking e2e jobs must use --extract=ci/fast/latest-fast, found --extract=%s instead", extract))
  1181  				}
  1182  			}
  1183  		}
  1184  		for _, err := range errs {
  1185  			t.Errorf("%v: %v", job.Name, err)
  1186  		}
  1187  	}
  1188  	t.Logf("summary: %4d/%4d jobs fail to meet release-master-blocking CI policy", jobsToFix, len(jobs))
  1189  }
  1190  
  1191  // matches regex used by the "version" extractMode defined in kubetest/extract_k8s.go
  1192  var kubetestVersionExtractModeRegex = regexp.MustCompile(`^(v\d+\.\d+\.\d+[\w.\-+]*)$`)
  1193  
  1194  // extractUsesCIBucket returns true if kubetest --extract=foo
  1195  // would use the value of --extract-ci-bucket, false otherwise
  1196  func extractUsesCIBucket(extract string) bool {
  1197  	if strings.HasPrefix(extract, "ci/") || strings.HasPrefix(extract, "gci/") {
  1198  		return true
  1199  	}
  1200  	mat := kubetestVersionExtractModeRegex.FindStringSubmatch(extract)
  1201  	if mat != nil {
  1202  		version := mat[1]
  1203  		// non-gke versions that include a + are CI builds
  1204  		return !strings.Contains(version, "-gke.") && strings.Contains(version, "+")
  1205  	}
  1206  	return false
  1207  }
  1208  
  1209  // extractUsesReleaseBucket returns true if kubetest --extract=foo
  1210  // would use the value of --extract-release-bucket, false otherwise
  1211  func extractUsesReleaseBucket(extract string) bool {
  1212  	if strings.HasPrefix(extract, "release/") {
  1213  		return true
  1214  	}
  1215  	mat := kubetestVersionExtractModeRegex.FindStringSubmatch(extract)
  1216  	if mat != nil {
  1217  		version := mat[1]
  1218  		// non-gke versions that lack a + are release builds
  1219  		return !strings.Contains(version, "-gke.") && !strings.Contains(version, "+")
  1220  	}
  1221  	return false
  1222  }
  1223  
  1224  // To help with migration to community-owned buckets for CI and release artifacts:
  1225  // - jobs using --extract=ci/fast/latest-fast MUST pull from gs://k8s-release-dev
  1226  // - release-blocking jobs using --extract=ci/*  MUST from pull gs://k8s-release-dev
  1227  // TODO(https://github.com/kubernetes/k8s.io/issues/846): switch from SHOULD to MUST once all jobs migrated
  1228  // - jobs using --extract=ci/* SHOULD pull from gs://k8s-release-dev
  1229  // TODO(https://github.com/kubernetes/k8s.io/issues/1569): start warning once gs://k8s-release populated
  1230  // - jobs using --extract=release/* SHOULD pull from gs://k8s-release
  1231  func TestKubernetesE2eJobsMustExtractFromK8sInfraBuckets(t *testing.T) {
  1232  	jobsToFix := 0
  1233  	jobs := allStaticJobs()
  1234  	for _, job := range jobs {
  1235  		needsFix := false
  1236  		extracts := []string{}
  1237  		const (
  1238  			defaultCIBucket       = "k8s-release-dev" // ensure this matches kubetest --extract-ci-bucket default
  1239  			expectedCIBucket      = "k8s-release-dev"
  1240  			defaultReleaseBucket  = "kubernetes-release" // ensure this matches kubetest --extract-release-bucket default
  1241  			expectedReleaseBucket = "k8s-release"
  1242  			k8sReleaseIsPopulated = false // TODO(kubernetes/k8s.io#1569): drop this once gs://k8s-release populated
  1243  		)
  1244  		ciBucket := defaultCIBucket
  1245  		releaseBucket := defaultReleaseBucket
  1246  		for _, container := range job.Spec.Containers {
  1247  			for _, arg := range container.Args {
  1248  				if strings.HasPrefix(arg, "--extract=") {
  1249  					extracts = append(extracts, strings.TrimPrefix(arg, "--extract="))
  1250  				}
  1251  				if strings.HasPrefix(arg, "--extract-ci-bucket=") {
  1252  					ciBucket = strings.TrimPrefix(arg, "--extract-ci-bucket=")
  1253  				}
  1254  				if strings.HasPrefix(arg, "--extract-release-bucket=") {
  1255  					releaseBucket = strings.TrimPrefix(arg, "--extract-release-bucket=")
  1256  				}
  1257  			}
  1258  			for _, extract := range extracts {
  1259  				fail := false
  1260  				if extractUsesCIBucket(extract) && ciBucket != expectedCIBucket {
  1261  					needsFix = true
  1262  					jobDesc := "jobs"
  1263  					fail = extract == "ci/fast/latest-fast"
  1264  					if isKubernetesReleaseBlocking(job) {
  1265  						fail = true
  1266  						jobDesc = "release-blocking jobs"
  1267  					}
  1268  					msg := fmt.Sprintf("%s: %s using --extract=%s must have --extract-ci-bucket=%s", job.Name, jobDesc, extract, expectedCIBucket)
  1269  					if fail {
  1270  						t.Errorf("FAIL - %s", msg)
  1271  					} else {
  1272  						t.Logf("WARN - %s", msg)
  1273  					}
  1274  				}
  1275  				if k8sReleaseIsPopulated && extractUsesReleaseBucket(extract) && releaseBucket != expectedReleaseBucket {
  1276  					needsFix = true
  1277  					jobDesc := "jobs"
  1278  					if isKubernetesReleaseBlocking(job) {
  1279  						fail = true
  1280  						jobDesc = "release-blocking jobs"
  1281  					}
  1282  					fail := isKubernetesReleaseBlocking(job)
  1283  					msg := fmt.Sprintf("%s: %s using --extract=%s must have --extract-release-bucket=%s", job.Name, jobDesc, extract, expectedCIBucket)
  1284  					if fail {
  1285  						t.Errorf("FAIL - %s", msg)
  1286  					} else {
  1287  						t.Logf("WARN - %s", msg)
  1288  					}
  1289  				}
  1290  			}
  1291  		}
  1292  		if needsFix {
  1293  			jobsToFix++
  1294  		}
  1295  	}
  1296  	t.Logf("summary: %4d/%4d jobs should be updated to pull from community-owned gcs buckets", jobsToFix, len(jobs))
  1297  }
  1298  
  1299  // Prow jobs should use pod-utils instead of relying on bootstrap
  1300  // https://github.com/kubernetes/test-infra/issues/20760
  1301  func TestKubernetesProwJobsShouldUsePodUtils(t *testing.T) {
  1302  	jobsToFix := 0
  1303  	jobs := allStaticJobs()
  1304  	for _, job := range jobs {
  1305  		// Only consider Pods
  1306  		if job.Spec == nil {
  1307  			continue
  1308  		}
  1309  		if !*job.Decorate {
  1310  			// bootstrap jobs don't use multiple containers
  1311  			container := job.Spec.Containers[0]
  1312  			repos := []string{}
  1313  			scenario := ""
  1314  			for _, arg := range container.Args {
  1315  				if strings.HasPrefix(arg, "--repo=") {
  1316  					repos = append(repos, strings.TrimPrefix(arg, "--repo="))
  1317  				}
  1318  				if strings.HasPrefix(arg, "--scenario=") {
  1319  					scenario = strings.TrimPrefix(arg, "--scenario=")
  1320  				}
  1321  			}
  1322  			jobsToFix++
  1323  			if len(repos) > 0 {
  1324  				t.Logf("%v: %v: should use pod-utils, found bootstrap args to clone: %v", job.SourcePath, job.Name, repos)
  1325  			} else if scenario != "" {
  1326  				t.Logf("%v: %v: should use pod-utils, found --scenario=%v, implies clone: [kubernetes/test-infra]", job.SourcePath, job.Name, scenario)
  1327  			} else {
  1328  				t.Logf("%v: %v: should use pod-utils, unknown case", job.SourcePath, job.Name)
  1329  			}
  1330  		}
  1331  	}
  1332  	t.Logf("summary: %4d/%4d jobs do not use pod-utils", jobsToFix, len(jobs))
  1333  }
  1334  
  1335  // Prow jobs should use kubetest2 instead of deprecated scenarios
  1336  // https://github.com/kubernetes/test-infra/tree/master/scenarios#deprecation-notice
  1337  func TestKubernetesProwJobsShouldNotUseDeprecatedScenarios(t *testing.T) {
  1338  	jobsToFix := 0
  1339  	jobs := allStaticJobs()
  1340  	for _, job := range jobs {
  1341  		// Only consider Pods
  1342  		if job.Spec == nil {
  1343  			continue
  1344  		}
  1345  		// bootstrap jobs don't use multiple containers
  1346  		container := job.Spec.Containers[0]
  1347  		// might also be good proxy for "relies on bootstrap"
  1348  		// if strings.Contains(container.Image, "kubekins-e2e") || strings.Contains(container.Image, "bootstrap")
  1349  		scenario := ""
  1350  		r, _ := regexp.Compile(".*/scenarios/([a-z0-9_]+).py.*")
  1351  		for _, cmd := range container.Command {
  1352  			if submatches := r.FindStringSubmatch(cmd); submatches != nil {
  1353  				scenario = submatches[1]
  1354  			}
  1355  		}
  1356  		if scenario != "" {
  1357  			jobsToFix++
  1358  			t.Logf("%v: %v: should not be using deprecated scenarios, is directly invoking: %v", job.SourcePath, job.Name, scenario)
  1359  			continue
  1360  		}
  1361  		for _, arg := range container.Args {
  1362  			if strings.HasPrefix(arg, "--scenario=") {
  1363  				scenario = strings.TrimPrefix(arg, "--scenario=")
  1364  			}
  1365  		}
  1366  		if scenario != "" {
  1367  			jobsToFix++
  1368  			t.Logf("%v: %v: should not be using deprecated scenarios, is invoking via bootrap: %v", job.SourcePath, job.Name, scenario)
  1369  		}
  1370  	}
  1371  	if jobsToFix > 0 {
  1372  		t.Logf("summary: %v/%v jobs using deprecated scenarios", jobsToFix, len(jobs))
  1373  	}
  1374  }
  1375  
  1376  // Could test against all prowjobs but in doing so we discovered all current violations
  1377  // are in presubmits, and we want to know presubmit-specific things about those
  1378  func TestKubernetesPresubmitsShouldNotUseKubernetesReleasePullBucket(t *testing.T) {
  1379  	jobsToFix := 0
  1380  	jobs := c.AllStaticPresubmits(nil)
  1381  	for _, job := range jobs {
  1382  		// Only consider Pods
  1383  		if job.Spec == nil {
  1384  			continue
  1385  		}
  1386  		for _, container := range job.Spec.Containers {
  1387  			foundExtractLocal := false
  1388  			stagePath := ""
  1389  			provider := ""
  1390  			jobName := job.Name
  1391  			if len(job.Branches) > 1 {
  1392  				jobName = fmt.Sprintf("%v@%v", job.Name, job.Branches)
  1393  			} else if len(job.Branches) > 0 {
  1394  				jobName = fmt.Sprintf("%v@%v", job.Name, job.Branches[0])
  1395  			}
  1396  			for _, arg := range container.Args {
  1397  				if strings.HasPrefix(arg, "--extract=local") {
  1398  					foundExtractLocal = true
  1399  				}
  1400  				if strings.HasPrefix(arg, "--stage=gs://kubernetes-release-pull") {
  1401  					stagePath = strings.TrimPrefix(arg, "--stage=gs://kubernetes-release-pull")
  1402  				}
  1403  				if strings.HasPrefix(arg, "--provider=") {
  1404  					provider = strings.TrimPrefix(arg, "--provider=")
  1405  				}
  1406  			}
  1407  			if stagePath != "" && foundExtractLocal {
  1408  				jobsToFix++
  1409  				t.Logf("%v: %v: jobs using --extract=local and --provider=%v should not use --stage=gs://kubernetes-release-pull%v", job.SourcePath, jobName, provider, stagePath)
  1410  			}
  1411  		}
  1412  	}
  1413  	if jobsToFix > 0 {
  1414  		t.Logf("summary: %v/%v jobs using --stage=gs://kubernetes-release-pull/...", jobsToFix, len(jobs))
  1415  	}
  1416  }