sigs.k8s.io/prow@v0.0.0-20240503223140-c5e374dc7eb1/cmd/checkconfig/main.go (about) 1 /* 2 Copyright 2018 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 // checkconfig loads configuration for Prow to validate it 18 package main 19 20 import ( 21 "context" 22 "encoding/json" 23 "errors" 24 "flag" 25 "fmt" 26 stdio "io" 27 "io/fs" 28 "net/url" 29 "os" 30 "path" 31 "reflect" 32 "sort" 33 "strings" 34 35 "github.com/sirupsen/logrus" 36 "k8s.io/apimachinery/pkg/util/sets" 37 "k8s.io/apimachinery/pkg/util/validation" 38 "sigs.k8s.io/yaml" 39 40 utilerrors "k8s.io/apimachinery/pkg/util/errors" 41 42 needsrebase "sigs.k8s.io/prow/cmd/external-plugins/needs-rebase/plugin" 43 v1 "sigs.k8s.io/prow/pkg/apis/prowjobs/v1" 44 "sigs.k8s.io/prow/pkg/config" 45 "sigs.k8s.io/prow/pkg/flagutil" 46 configflagutil "sigs.k8s.io/prow/pkg/flagutil/config" 47 pluginsflagutil "sigs.k8s.io/prow/pkg/flagutil/plugins" 48 "sigs.k8s.io/prow/pkg/github" 49 _ "sigs.k8s.io/prow/pkg/hook/plugin-imports" 50 "sigs.k8s.io/prow/pkg/io" 51 "sigs.k8s.io/prow/pkg/kube" 52 "sigs.k8s.io/prow/pkg/labels" 53 "sigs.k8s.io/prow/pkg/logrusutil" 54 "sigs.k8s.io/prow/pkg/plank" 55 "sigs.k8s.io/prow/pkg/plugins" 56 "sigs.k8s.io/prow/pkg/plugins/approve" 57 "sigs.k8s.io/prow/pkg/plugins/blockade" 58 "sigs.k8s.io/prow/pkg/plugins/blunderbuss" 59 "sigs.k8s.io/prow/pkg/plugins/bugzilla" 60 "sigs.k8s.io/prow/pkg/plugins/cherrypickunapproved" 61 "sigs.k8s.io/prow/pkg/plugins/hold" 62 labelplugin "sigs.k8s.io/prow/pkg/plugins/label" 63 "sigs.k8s.io/prow/pkg/plugins/lgtm" 64 ownerslabel "sigs.k8s.io/prow/pkg/plugins/owners-label" 65 "sigs.k8s.io/prow/pkg/plugins/releasenote" 66 "sigs.k8s.io/prow/pkg/plugins/trigger" 67 verifyowners "sigs.k8s.io/prow/pkg/plugins/verify-owners" 68 "sigs.k8s.io/prow/pkg/plugins/wip" 69 ) 70 71 type options struct { 72 config configflagutil.ConfigOptions 73 pluginsConfig pluginsflagutil.PluginOptions 74 75 prowYAMLRepoName string 76 prowYAMLPath string 77 78 warnings flagutil.Strings 79 excludeWarnings flagutil.Strings 80 requiredJobAnnotations flagutil.Strings 81 strict bool 82 expensive bool 83 includeDefaultWarnings bool 84 85 github flagutil.GitHubOptions 86 storage flagutil.StorageClientOptions 87 } 88 89 func reportWarning(strict bool, errs utilerrors.Aggregate) { 90 for _, item := range errs.Errors() { 91 logrus.Warn(item.Error()) 92 } 93 if strict { 94 logrus.Fatal("Strict is set and there were warnings") 95 } 96 } 97 98 func (o *options) warningEnabled(warning string) bool { 99 return sets.New[string](o.warnings.Strings()...).Difference(sets.New[string](o.excludeWarnings.Strings()...)).Has(warning) 100 } 101 102 const ( 103 mismatchedTideWarning = "mismatched-tide" 104 mismatchedTideLenientWarning = "mismatched-tide-lenient" 105 tideStrictBranchWarning = "tide-strict-branch" 106 tideContextPolicy = "tide-context-policy" 107 nonDecoratedJobsWarning = "non-decorated-jobs" 108 validDecorationConfigWarning = "valid-decoration-config" 109 jobNameLengthWarning = "long-job-names" 110 jobRefsDuplicationWarning = "duplicate-job-refs" 111 needsOkToTestWarning = "needs-ok-to-test" 112 managedWebhooksWarning = "managed-webhooks" 113 validateOwnersWarning = "validate-owners" 114 missingTriggerWarning = "missing-trigger" 115 validateURLsWarning = "validate-urls" 116 unknownFieldsWarning = "unknown-fields" 117 unknownFieldsAllWarning = "unknown-fields-all" // Superset of "unknown-fields" that includes validating job config. 118 verifyOwnersFilePresence = "verify-owners-presence" 119 validateClusterFieldWarning = "validate-cluster-field" 120 validateSupplementalProwConfigOrgRepoHirarchy = "validate-supplemental-prow-config-hirarchy" 121 validateUnmanagedBranchConfigHasNoSubconfig = "validate-unmanaged-branchconfig-has-no-subconfig" 122 validateGitHubAppInstallationWarning = "validate-github-app-installation" 123 validateLabelWarning = "validate-label" 124 requiredJobAnnotationsWarning = "required-job-annotations" 125 periodicDefaultCloneWarning = "periodic-default-clone-config" 126 127 defaultHourlyTokens = 3000 128 defaultAllowedBurst = 100 129 ) 130 131 var defaultWarnings = []string{ 132 mismatchedTideWarning, 133 tideStrictBranchWarning, 134 tideContextPolicy, 135 mismatchedTideLenientWarning, 136 nonDecoratedJobsWarning, 137 jobNameLengthWarning, 138 jobRefsDuplicationWarning, 139 needsOkToTestWarning, 140 managedWebhooksWarning, 141 validateOwnersWarning, 142 missingTriggerWarning, 143 validateURLsWarning, 144 unknownFieldsWarning, 145 validateClusterFieldWarning, 146 validateSupplementalProwConfigOrgRepoHirarchy, 147 validateUnmanagedBranchConfigHasNoSubconfig, 148 validateLabelWarning, 149 requiredJobAnnotationsWarning, 150 periodicDefaultCloneWarning, 151 } 152 153 var expensiveWarnings = []string{ 154 verifyOwnersFilePresence, 155 } 156 157 var optionalWarnings = []string{ 158 validDecorationConfigWarning, 159 // It would be nice to make "unknown-fields-all" a default, but difficult to do due to K8s configs. 160 // https://github.com/kubernetes/test-infra/pull/21075#issuecomment-862550510 161 unknownFieldsAllWarning, 162 validateGitHubAppInstallationWarning, 163 } 164 165 var throttlerDefaults = flagutil.ThrottlerDefaults(defaultHourlyTokens, defaultAllowedBurst) 166 167 func getAllWarnings() []string { 168 var all []string 169 all = append(all, defaultWarnings...) 170 all = append(all, expensiveWarnings...) 171 all = append(all, optionalWarnings...) 172 173 return all 174 } 175 176 func (o *options) DefaultAndValidate() error { 177 allWarnings := getAllWarnings() 178 for _, validate := range []interface{ Validate(bool) error }{&o.config, &o.pluginsConfig, &o.storage} { 179 if err := validate.Validate(false); err != nil { 180 return err 181 } 182 } 183 184 if o.prowYAMLPath != "" && o.prowYAMLRepoName == "" { 185 return errors.New("--prow-yaml-repo-path requires --prow-yaml-repo-name to be set") 186 } 187 for _, warning := range o.warnings.Strings() { 188 found := false 189 for _, registeredWarning := range allWarnings { 190 if warning == registeredWarning { 191 found = true 192 break 193 } 194 } 195 if !found { 196 return fmt.Errorf("no such warning %q, valid warnings: %v", warning, allWarnings) 197 } 198 } 199 return nil 200 } 201 202 func parseOptions() (options, error) { 203 o := options{} 204 205 if err := o.gatherOptions(flag.CommandLine, os.Args[1:]); err != nil { 206 return options{}, err 207 } 208 return o, nil 209 } 210 211 func (o *options) gatherOptions(flag *flag.FlagSet, args []string) error { 212 o.pluginsConfig.CheckUnknownPlugins = true 213 flag.StringVar(&o.prowYAMLRepoName, "prow-yaml-repo-name", "", "Name of the repo whose .prow.yaml should be checked.") 214 flag.StringVar(&o.prowYAMLPath, "prow-yaml-path", "", "Path to the .prow.yaml file to check. Requires --prow-yaml-repo-name to be set. Omit to look for either .prow.yaml or a .prow directory in the current working directory (recommended).") 215 flag.Var(&o.warnings, "warnings", "Warnings to validate. Use repeatedly to provide a list of warnings") 216 flag.Var(&o.excludeWarnings, "exclude-warning", "Warnings to exclude. Use repeatedly to provide a list of warnings to exclude") 217 flag.Var(&o.requiredJobAnnotations, "required-job-annotations", "Required annotation names that job has to include in a definition. Use repeatedly to provide a list of required annotations") 218 flag.BoolVar(&o.expensive, "expensive-checks", false, "If set, additional expensive warnings will be enabled") 219 flag.BoolVar(&o.strict, "strict", false, "If set, consider all warnings as errors.") 220 flag.BoolVar(&o.includeDefaultWarnings, "include-default-warnings", false, "If set force inclusion of default warning set. Normally this is inferred based on a lack of '--warnings' flags.") 221 o.github.AddCustomizedFlags(flag, throttlerDefaults) 222 o.github.AllowAnonymous = true 223 o.config.AddFlags(flag) 224 o.pluginsConfig.AddFlags(flag) 225 o.storage.AddFlags(flag) 226 if err := flag.Parse(args); err != nil { 227 return fmt.Errorf("parse flags: %w", err) 228 } 229 if err := o.DefaultAndValidate(); err != nil { 230 return fmt.Errorf("invalid options: %w", err) 231 } 232 return nil 233 } 234 235 func main() { 236 logrusutil.ComponentInit() 237 238 o, err := parseOptions() 239 if err != nil { 240 logrus.Fatalf("Error parsing options - %v", err) 241 } 242 243 if err := validate(o); err != nil { 244 switch e := err.(type) { 245 case utilerrors.Aggregate: 246 reportWarning(o.strict, e) 247 default: 248 logrus.WithError(err).Fatal("Validation failed") 249 } 250 251 } else { 252 logrus.Info("checkconfig passes without any error!") 253 } 254 } 255 256 func validate(o options) error { 257 // use all warnings by default 258 if len(o.warnings.Strings()) == 0 || o.includeDefaultWarnings { 259 if o.expensive { 260 o.warnings = flagutil.NewStrings(append(o.warnings.Strings(), getAllWarnings()...)...) 261 } else { 262 o.warnings = flagutil.NewStrings(append(o.warnings.Strings(), defaultWarnings...)...) 263 } 264 } 265 if o.github.AppID != "" && o.github.AppPrivateKeyPath != "" { 266 o.warnings.Add(validateGitHubAppInstallationWarning) 267 } 268 269 configAgent, err := o.config.ConfigAgent() 270 if err != nil { 271 return fmt.Errorf("error loading prow config: %w", err) 272 } 273 cfg := configAgent.Config() 274 275 if o.prowYAMLRepoName != "" { 276 if err := validateInRepoConfig(cfg, o.prowYAMLPath, o.prowYAMLRepoName, o.warningEnabled(unknownFieldsAllWarning)); err != nil { 277 return fmt.Errorf("error validating .prow.yaml: %w", err) 278 } 279 } 280 281 var pcfg *plugins.Configuration 282 if o.pluginsConfig.PluginConfigPath != "" { 283 pluginAgent, err := o.pluginsConfig.PluginAgent() 284 if err != nil { 285 return fmt.Errorf("error loading Prow plugin config: %w", err) 286 } 287 pcfg = pluginAgent.Config() 288 } 289 290 // the following checks are useful in finding user errors but their 291 // presence won't lead to strictly incorrect behavior, so we can 292 // detect them here but don't necessarily want to stop config re-load 293 // in all components on their failure. 294 var errs []error 295 if pcfg != nil && o.warningEnabled(verifyOwnersFilePresence) { 296 if o.github.TokenPath == "" { 297 return errors.New("cannot verify OWNERS file presence without a GitHub token") 298 } 299 300 githubClient, err := o.github.GitHubClient(false) 301 if err != nil { 302 return fmt.Errorf("error loading GitHub client: %w", err) 303 } 304 // 404s are expected to happen, no point in retrying 305 githubClient.SetMax404Retries(0) 306 307 if err := verifyOwnersPresence(pcfg, githubClient); err != nil { 308 errs = append(errs, err) 309 } 310 } 311 if pcfg != nil && o.warningEnabled(mismatchedTideWarning) { 312 if err := validateTideRequirements(cfg, pcfg, true); err != nil { 313 errs = append(errs, err) 314 } 315 } else if pcfg != nil && o.warningEnabled(mismatchedTideLenientWarning) { 316 if err := validateTideRequirements(cfg, pcfg, false); err != nil { 317 errs = append(errs, err) 318 } 319 } 320 if o.warningEnabled(nonDecoratedJobsWarning) { 321 if err := validateDecoratedJobs(cfg); err != nil { 322 errs = append(errs, err) 323 } 324 } 325 if o.warningEnabled(validDecorationConfigWarning) { 326 if err := validateDecorationConfig(cfg); err != nil { 327 errs = append(errs, err) 328 } 329 } 330 if o.warningEnabled(jobNameLengthWarning) { 331 if err := validateJobRequirements(cfg.JobConfig); err != nil { 332 errs = append(errs, err) 333 } 334 } 335 if o.warningEnabled(jobRefsDuplicationWarning) { 336 if err := validateJobExtraRefs(cfg.JobConfig); err != nil { 337 errs = append(errs, err) 338 } 339 } 340 if o.warningEnabled(periodicDefaultCloneWarning) { 341 if err := validatePeriodicDefaultCloneConfig(cfg.JobConfig); err != nil { 342 errs = append(errs, err) 343 } 344 } 345 if o.warningEnabled(needsOkToTestWarning) { 346 if err := validateNeedsOkToTestLabel(cfg); err != nil { 347 errs = append(errs, err) 348 } 349 } 350 if o.warningEnabled(managedWebhooksWarning) { 351 if err := validateManagedWebhooks(cfg); err != nil { 352 errs = append(errs, err) 353 } 354 } 355 if pcfg != nil && o.warningEnabled(validateOwnersWarning) { 356 if err := verifyOwnersPlugin(pcfg); err != nil { 357 errs = append(errs, err) 358 } 359 } 360 if pcfg != nil && o.warningEnabled(missingTriggerWarning) { 361 if err := validateTriggers(cfg, pcfg); err != nil { 362 errs = append(errs, err) 363 } 364 } 365 if pcfg != nil && o.warningEnabled(validateURLsWarning) { 366 if err := validateURLs(cfg.ProwConfig); err != nil { 367 errs = append(errs, err) 368 } 369 } 370 // If both "unknown-fields" and "unknown-fields-all" are enabled, just run "unknown-fields-all" validation 371 // since it is a superset. This will avoid duplicate warnings. 372 unknownAllEnabled := o.warningEnabled(unknownFieldsAllWarning) 373 unknownEnabled := o.warningEnabled(unknownFieldsWarning) 374 if unknownAllEnabled { 375 if _, err := config.LoadStrict(o.config.ConfigPath, o.config.JobConfigPath, nil, ""); err != nil { 376 errs = append(errs, err) 377 } 378 } else if unknownEnabled { 379 cfgBytes, err := os.ReadFile(o.config.ConfigPath) 380 if err != nil { 381 return fmt.Errorf("error reading Prow config for validation: %w", err) 382 } 383 if err := validateUnknownFields(&config.Config{}, cfgBytes, o.config.ConfigPath); err != nil { 384 errs = append(errs, err) 385 } 386 } 387 if pcfg != nil && (unknownEnabled || unknownAllEnabled) { 388 pcfgBytes, err := os.ReadFile(o.pluginsConfig.PluginConfigPath) 389 if err != nil { 390 return fmt.Errorf("error reading Prow plugin config for validation: %w", err) 391 } 392 if err := validateUnknownFields(&plugins.Configuration{}, pcfgBytes, o.pluginsConfig.PluginConfigPath); err != nil { 393 errs = append(errs, err) 394 } 395 } 396 if o.warningEnabled(tideStrictBranchWarning) { 397 if err := validateStrictBranches(cfg.ProwConfig); err != nil { 398 errs = append(errs, err) 399 } 400 } 401 if o.warningEnabled(tideContextPolicy) { 402 if err := validateTideContextPolicy(cfg); err != nil { 403 errs = append(errs, err) 404 } 405 } 406 if o.warningEnabled(validateClusterFieldWarning) { 407 opener, err := io.NewOpener(context.Background(), o.storage.GCSCredentialsFile, o.storage.S3CredentialsFile) 408 if err != nil { 409 logrus.WithError(err).Fatal("Error creating opener") 410 } 411 if err := validateCluster(cfg, opener); err != nil { 412 errs = append(errs, err) 413 } 414 } 415 416 if o.warningEnabled(validateSupplementalProwConfigOrgRepoHirarchy) { 417 if err := validateAdditionalProwConfigIsInOrgRepoDirectoryStructure(os.DirFS("./"), o.config.SupplementalProwConfigDirs.Strings(), o.pluginsConfig.SupplementalPluginsConfigDirs.Strings(), o.config.SupplementalProwConfigsFileNameSuffix, o.pluginsConfig.SupplementalPluginsConfigsFileNameSuffix); err != nil { 418 errs = append(errs, err) 419 } 420 } 421 422 if o.warningEnabled(validateUnmanagedBranchConfigHasNoSubconfig) { 423 if err := validateUnmanagedBranchprotectionConfigDoesntHaveSubconfig(cfg.BranchProtection); err != nil { 424 errs = append(errs, err) 425 } 426 } 427 428 if o.warningEnabled(validateGitHubAppInstallationWarning) { 429 githubClient, err := o.github.GitHubClient(false) 430 if err != nil { 431 return fmt.Errorf("error loading GitHub client: %w", err) 432 } 433 434 if err := validateGitHubAppIsInstalled(githubClient, cfg.AllRepos); err != nil { 435 errs = append(errs, err) 436 } 437 } 438 439 if pcfg != nil && o.warningEnabled(validateLabelWarning) { 440 if err := verifyLabelPlugin(pcfg.Label); err != nil { 441 errs = append(errs, err) 442 } 443 } 444 445 if o.warningEnabled(requiredJobAnnotationsWarning) { 446 if err := validateRequiredJobAnnotations(o.requiredJobAnnotations.Strings(), cfg.JobConfig); err != nil { 447 errs = append(errs, err) 448 } 449 } 450 451 // validate rerun commands match presubmit job triggering regex 452 for _, presubmits := range cfg.JobConfig.PresubmitsStatic { 453 for _, p := range presubmits { 454 if !cfg.Gerrit.IsAllowedPresubmitTrigger(p.RerunCommand) { 455 errs = append(errs, fmt.Errorf("rerun command %s in job %s does not conform to test command requirements,"+ 456 "please make sure the trigger regex is a subset of %s and the rerun command matches the trigger regex", 457 p.RerunCommand, p.Name, cfg.Gerrit.AllowedPresubmitTriggerReRawString)) 458 } 459 } 460 } 461 462 return utilerrors.NewAggregate(errs) 463 } 464 func policyIsStrict(p config.Policy) bool { 465 if p.Protect == nil || !*p.Protect { 466 return false 467 } 468 if p.RequiredStatusChecks == nil || p.RequiredStatusChecks.Strict == nil { 469 return false 470 } 471 return *p.RequiredStatusChecks.Strict 472 } 473 474 func strictBranchesConfig(c config.ProwConfig) (*orgRepoConfig, error) { 475 strictOrgExceptions := make(map[string]sets.Set[string]) 476 strictRepos := sets.New[string]() 477 for orgName := range c.BranchProtection.Orgs { 478 org := c.BranchProtection.GetOrg(orgName) 479 // First find explicitly configured repos and partition based on strictness. 480 // If any branch in a repo is strict we assume that the whole repo is to 481 // simplify this validation. 482 strictExplicitRepos, nonStrictExplicitRepos := sets.New[string](), sets.New[string]() 483 for repoName := range org.Repos { 484 repo := org.GetRepo(repoName) 485 strict := policyIsStrict(repo.Policy) 486 if !strict { 487 for branchName := range repo.Branches { 488 branch, err := repo.GetBranch(branchName) 489 if err != nil { 490 return nil, fmt.Errorf("error for repo=%s/%s and branch=%s: %w", 491 orgName, repoName, branchName, err) 492 } 493 if policyIsStrict(branch.Policy) { 494 strict = true 495 break 496 } 497 } 498 } 499 fullRepoName := fmt.Sprintf("%s/%s", orgName, repoName) 500 if strict { 501 strictExplicitRepos.Insert(fullRepoName) 502 } else { 503 nonStrictExplicitRepos.Insert(fullRepoName) 504 } 505 } 506 // Done partitioning the repos. 507 508 if policyIsStrict(org.Policy) { 509 // This org is strict, record with repo exceptions ("denylist") 510 strictOrgExceptions[orgName] = nonStrictExplicitRepos 511 } else { 512 // The org is not strict, record member repos that are allowed 513 strictRepos.Insert(strictExplicitRepos.UnsortedList()...) 514 } 515 } 516 return newOrgRepoConfig(strictOrgExceptions, strictRepos), nil 517 } 518 519 func validateStrictBranches(c config.ProwConfig) error { 520 const explanation = "See #5: https://docs.prow.k8s.io/docs/components/core/tide/maintainers/#best-practices Also note that this validation is imperfect, see the check-config code for details" 521 if len(c.Tide.Queries) == 0 { 522 // Short circuit here so that we can allow global level branchprotector 523 // 'strict: true' if Tide is not enabled. 524 // Ignoring the case where Tide is enabled only on orgs/repos specifically 525 // exempted from the global setting simplifies validation immensely. 526 return nil 527 } 528 if policyIsStrict(c.BranchProtection.Policy) { 529 return fmt.Errorf("strict branchprotection context requirements cannot be globally enabled when Tide is configured for use. %s", explanation) 530 } 531 // The two assumptions below are not necessarily true, but they hold for all 532 // known instances and make this validation much simpler. 533 534 // Assumes if any branch is managed by Tide, the whole repo is. 535 overallTideConfig := newOrgRepoConfig(c.Tide.Queries.OrgExceptionsAndRepos()) 536 // Assumes if any branch is strict the repo is strict. 537 strictBranchConfig, err := strictBranchesConfig(c) 538 if err != nil { 539 return err 540 } 541 542 conflicts := overallTideConfig.intersection(strictBranchConfig).items() 543 if len(conflicts) == 0 { 544 return nil 545 } 546 return fmt.Errorf( 547 "the following enable strict branchprotection context requirements even though Tide handles merges: [%s]. %s", 548 strings.Join(conflicts, "; "), 549 explanation, 550 ) 551 } 552 553 func validateURLs(c config.ProwConfig) error { 554 var validationErrs []error 555 556 if _, err := url.Parse(c.StatusErrorLink); err != nil { 557 validationErrs = append(validationErrs, fmt.Errorf("status_error_link is not a valid url: %s", c.StatusErrorLink)) 558 } 559 560 return utilerrors.NewAggregate(validationErrs) 561 } 562 563 func validateUnknownFields(cfg interface{}, cfgBytes []byte, filePath string) error { 564 err := yaml.Unmarshal(cfgBytes, &cfg, yaml.DisallowUnknownFields) 565 if err != nil { 566 return fmt.Errorf("unknown fields or bad config in %s: %w", filePath, err) 567 } 568 return nil 569 } 570 571 func validateJobRequirements(c config.JobConfig) error { 572 var validationErrs []error 573 for repo, jobs := range c.PresubmitsStatic { 574 for _, job := range jobs { 575 validationErrs = append(validationErrs, validatePresubmitJob(repo, job)) 576 } 577 } 578 for repo, jobs := range c.PostsubmitsStatic { 579 for _, job := range jobs { 580 validationErrs = append(validationErrs, validatePostsubmitJob(repo, job)) 581 } 582 } 583 for _, job := range c.Periodics { 584 validationErrs = append(validationErrs, validatePeriodicJob(job)) 585 } 586 587 return utilerrors.NewAggregate(validationErrs) 588 } 589 590 func validatePresubmitJob(repo string, job config.Presubmit) error { 591 var validationErrs []error 592 // Prow labels k8s resources with job names. Labels are capped at 63 chars. 593 if job.Agent == string(v1.KubernetesAgent) && len(job.Name) > validation.LabelValueMaxLength { 594 validationErrs = append(validationErrs, fmt.Errorf("name of Presubmit job %q (for repo %q) too long (should be at most 63 characters)", job.Name, repo)) 595 } 596 return utilerrors.NewAggregate(validationErrs) 597 } 598 599 func validatePostsubmitJob(repo string, job config.Postsubmit) error { 600 var validationErrs []error 601 // Prow labels k8s resources with job names. Labels are capped at 63 chars. 602 if job.Agent == string(v1.KubernetesAgent) && len(job.Name) > validation.LabelValueMaxLength { 603 validationErrs = append(validationErrs, fmt.Errorf("name of Postsubmit job %q (for repo %q) too long (should be at most 63 characters)", job.Name, repo)) 604 } 605 return utilerrors.NewAggregate(validationErrs) 606 } 607 608 func validateJobExtraRefs(cfg config.JobConfig) error { 609 var validationErrs []error 610 for repo, presubmits := range cfg.PresubmitsStatic { 611 for _, presubmit := range presubmits { 612 if err := config.ValidateRefs(repo, presubmit.JobBase); err != nil { 613 validationErrs = append(validationErrs, err) 614 } 615 } 616 } 617 return utilerrors.NewAggregate(validationErrs) 618 } 619 620 func validatePeriodicDefaultCloneConfig(cfg config.JobConfig) error { 621 var validationErrs []error 622 for _, job := range cfg.Periodics { 623 // Top level clone configs don't make sense for periodics jobs. 624 if job.CloneDepth != 0 || job.CloneURI != "" || job.PathAlias != "" { 625 validationErrs = append(validationErrs, fmt.Errorf("periodic jobs might clone 0, 1, or more repos, top level `clone_depth`, `clone_uri`, and `path_alias` don't have any effect. Name: %q", job.Name)) 626 } 627 } 628 return utilerrors.NewAggregate(validationErrs) 629 } 630 631 func validatePeriodicJob(job config.Periodic) error { 632 var validationErrs []error 633 // Prow labels k8s resources with job names. Labels are capped at 63 chars. 634 if job.Agent == string(v1.KubernetesAgent) && len(job.Name) > validation.LabelValueMaxLength { 635 validationErrs = append(validationErrs, fmt.Errorf("name of Periodic job %q too long (should be at most 63 characters)", job.Name)) 636 } 637 return utilerrors.NewAggregate(validationErrs) 638 } 639 640 func validateTideRequirements(cfg *config.Config, pcfg *plugins.Configuration, includeForbidden bool) error { 641 type matcher struct { 642 // matches determines if the tide query appropriately honors the 643 // label in question -- whether by requiring it or forbidding it 644 matches func(label string, query config.TideQuery) bool 645 // verb is used in forming error messages 646 verb string 647 } 648 requires := matcher{ 649 matches: func(label string, query config.TideQuery) bool { 650 return sets.New[string](query.Labels...).Has(label) 651 }, 652 verb: "require", 653 } 654 forbids := matcher{ 655 matches: func(label string, query config.TideQuery) bool { 656 return sets.New[string](query.MissingLabels...).Has(label) 657 }, 658 verb: "forbid", 659 } 660 661 type plugin struct { 662 // name and label identify the relationship we are validating 663 name, label string 664 // external indicates plugin is external or not 665 external bool 666 // matcher determines if the tide query appropriately honors the 667 // label in question -- whether by requiring it or forbidding it 668 matcher matcher 669 // config holds the orgs and repos for which tide does honor the 670 // label; this container is populated conditionally from queries 671 // using the matcher 672 config *orgRepoConfig 673 } 674 // configs list relationships between tide config 675 // and plugin enablement that we want to validate 676 configs := []plugin{ 677 {name: lgtm.PluginName, label: labels.LGTM, matcher: requires}, 678 {name: approve.PluginName, label: labels.Approved, matcher: requires}, 679 } 680 if includeForbidden { 681 configs = append(configs, 682 plugin{name: hold.PluginName, label: labels.Hold, matcher: forbids}, 683 plugin{name: wip.PluginName, label: labels.WorkInProgress, matcher: forbids}, 684 plugin{name: bugzilla.PluginName, label: labels.InvalidBug, matcher: forbids}, 685 plugin{name: verifyowners.PluginName, label: labels.InvalidOwners, matcher: forbids}, 686 plugin{name: releasenote.PluginName, label: labels.ReleaseNoteLabelNeeded, matcher: forbids}, 687 plugin{name: cherrypickunapproved.PluginName, label: labels.CpUnapproved, matcher: forbids}, 688 plugin{name: blockade.PluginName, label: labels.BlockedPaths, matcher: forbids}, 689 plugin{name: needsrebase.PluginName, label: labels.NeedsRebase, external: true, matcher: forbids}, 690 ) 691 } 692 693 for i := range configs { 694 // For each plugin determine the subset of tide queries that match and then 695 // the orgs and repos that the subset matches. 696 var matchingQueries config.TideQueries 697 for _, query := range cfg.Tide.Queries { 698 if configs[i].matcher.matches(configs[i].label, query) { 699 matchingQueries = append(matchingQueries, query) 700 } 701 } 702 configs[i].config = newOrgRepoConfig(matchingQueries.OrgExceptionsAndRepos()) 703 } 704 705 overallTideConfig := newOrgRepoConfig(cfg.Tide.Queries.OrgExceptionsAndRepos()) 706 707 // Now actually execute the checks we just configured. 708 var validationErrs []error 709 for _, pluginConfig := range configs { 710 err := ensureValidConfiguration( 711 pluginConfig.name, 712 pluginConfig.label, 713 pluginConfig.matcher.verb, 714 pluginConfig.config, 715 overallTideConfig, 716 enabledOrgReposForPlugin(pcfg, pluginConfig.name, pluginConfig.external), 717 ) 718 validationErrs = append(validationErrs, err) 719 } 720 721 return utilerrors.NewAggregate(validationErrs) 722 } 723 724 func newOrgRepoConfig(orgExceptions map[string]sets.Set[string], repos sets.Set[string]) *orgRepoConfig { 725 return &orgRepoConfig{ 726 orgExceptions: orgExceptions, 727 repos: repos, 728 } 729 } 730 731 // orgRepoConfig describes a set of repositories with an explicit 732 // allowlist and a mapping of denied repos for owning orgs 733 type orgRepoConfig struct { 734 // orgExceptions holds explicit denylists of repos for owning orgs 735 orgExceptions map[string]sets.Set[string] 736 // repos is an allowed list of repos 737 repos sets.Set[string] 738 } 739 740 func (c *orgRepoConfig) items() []string { 741 items := make([]string, 0, len(c.orgExceptions)+len(c.repos)) 742 for org, excepts := range c.orgExceptions { 743 item := fmt.Sprintf("org: %s", org) 744 if excepts.Len() > 0 { 745 item = fmt.Sprintf("%s without repo(s) %s", item, strings.Join(sets.List(excepts), ", ")) 746 for _, repo := range sets.List(excepts) { 747 item = fmt.Sprintf("%s '%s'", item, repo) 748 } 749 } 750 items = append(items, item) 751 } 752 for _, repo := range sets.List(c.repos) { 753 items = append(items, fmt.Sprintf("repo: %s", repo)) 754 } 755 return items 756 } 757 758 // difference returns a new orgRepoConfig that represents the set difference of 759 // the repos specified by the receiver and the parameter orgRepoConfigs. 760 func (c *orgRepoConfig) difference(c2 *orgRepoConfig) *orgRepoConfig { 761 res := &orgRepoConfig{ 762 orgExceptions: make(map[string]sets.Set[string]), 763 repos: sets.New[string]().Union(c.repos), 764 } 765 for org, excepts1 := range c.orgExceptions { 766 if excepts2, ok := c2.orgExceptions[org]; ok { 767 res.repos.Insert(excepts2.Difference(excepts1).UnsortedList()...) 768 } else { 769 excepts := sets.New[string]().Union(excepts1) 770 // Add any applicable repos in repos2 to excepts 771 for _, repo := range c2.repos.UnsortedList() { 772 if parts := strings.SplitN(repo, "/", 2); len(parts) == 2 && parts[0] == org { 773 excepts.Insert(repo) 774 } 775 } 776 res.orgExceptions[org] = excepts 777 } 778 } 779 780 res.repos = res.repos.Difference(c2.repos) 781 782 for _, repo := range res.repos.UnsortedList() { 783 if parts := strings.SplitN(repo, "/", 2); len(parts) == 2 { 784 if excepts2, ok := c2.orgExceptions[parts[0]]; ok && !excepts2.Has(repo) { 785 res.repos.Delete(repo) 786 } 787 } 788 } 789 return res 790 } 791 792 // intersection returns a new orgRepoConfig that represents the set intersection 793 // of the repos specified by the receiver and the parameter orgRepoConfigs. 794 func (c *orgRepoConfig) intersection(c2 *orgRepoConfig) *orgRepoConfig { 795 res := &orgRepoConfig{ 796 orgExceptions: make(map[string]sets.Set[string]), 797 repos: sets.New[string](), 798 } 799 for org, excepts1 := range c.orgExceptions { 800 // Include common orgs, but union exceptions. 801 if excepts2, ok := c2.orgExceptions[org]; ok { 802 res.orgExceptions[org] = excepts1.Union(excepts2) 803 } else { 804 // Include right side repos that match left side org. 805 for _, repo := range c2.repos.UnsortedList() { 806 if parts := strings.SplitN(repo, "/", 2); len(parts) == 2 && parts[0] == org && !excepts1.Has(repo) { 807 res.repos.Insert(repo) 808 } 809 } 810 } 811 } 812 for _, repo := range c.repos.UnsortedList() { 813 if c2.repos.Has(repo) { 814 res.repos.Insert(repo) 815 } else if parts := strings.SplitN(repo, "/", 2); len(parts) == 2 { 816 // Include left side repos that match right side org. 817 if excepts2, ok := c2.orgExceptions[parts[0]]; ok && !excepts2.Has(repo) { 818 res.repos.Insert(repo) 819 } 820 } 821 } 822 return res 823 } 824 825 // union returns a new orgRepoConfig that represents the set union of the 826 // repos specified by the receiver and the parameter orgRepoConfigs 827 func (c *orgRepoConfig) union(c2 *orgRepoConfig) *orgRepoConfig { 828 res := &orgRepoConfig{ 829 orgExceptions: make(map[string]sets.Set[string]), 830 repos: sets.New[string](), 831 } 832 833 for org, excepts1 := range c.orgExceptions { 834 // keep only items in both denylists that are not in the 835 // explicit repo allowlist for the other configuration; 836 // we know from how the orgRepoConfigs are constructed that 837 // a org denylist won't intersect it's own repo allowlist 838 pruned := excepts1.Difference(c2.repos) 839 if excepts2, ok := c2.orgExceptions[org]; ok { 840 res.orgExceptions[org] = pruned.Intersection(excepts2.Difference(c.repos)) 841 } else { 842 res.orgExceptions[org] = pruned 843 } 844 } 845 846 for org, excepts2 := range c2.orgExceptions { 847 // update any denylists not previously updated 848 if _, exists := res.orgExceptions[org]; !exists { 849 res.orgExceptions[org] = excepts2.Difference(c.repos) 850 } 851 } 852 853 // we need to prune out repos in the allowed lists which are 854 // covered by an org already; we know from above that no 855 // org denylist in the result will contain a repo allowlist 856 for _, repo := range c.repos.Union(c2.repos).UnsortedList() { 857 parts := strings.SplitN(repo, "/", 2) 858 if len(parts) != 2 { 859 logrus.Warnf("org/repo %q is formatted incorrectly", repo) 860 continue 861 } 862 if _, exists := res.orgExceptions[parts[0]]; !exists { 863 res.repos.Insert(repo) 864 } 865 } 866 return res 867 } 868 869 func enabledOrgReposForPlugin(c *plugins.Configuration, plugin string, external bool) *orgRepoConfig { 870 var ( 871 orgs []string 872 repos []string 873 ) 874 var orgMap map[string]sets.Set[string] 875 if external { 876 orgs, repos = c.EnabledReposForExternalPlugin(plugin) 877 orgMap = make(map[string]sets.Set[string], len(orgs)) 878 for _, org := range orgs { 879 orgMap[org] = nil 880 } 881 } else { 882 _, repos, orgMap = c.EnabledReposForPlugin(plugin) 883 } 884 return newOrgRepoConfig(orgMap, sets.New[string](repos...)) 885 } 886 887 // ensureValidConfiguration enforces rules about tide and plugin config. 888 // In this context, a subset is the set of repos or orgs for which a specific 889 // plugin is either enabled (for plugins) or required for merge (for tide). The 890 // tide superset is every org or repo that has any configuration at all in tide. 891 // Specifically: 892 // - every item in the tide subset must also be in the plugins subset 893 // - every item in the plugins subset that is in the tide superset must also be in the tide subset 894 // 895 // For example: 896 // - if org/repo is configured in tide to require lgtm, it must have the lgtm plugin enabled 897 // - if org/repo is configured in tide, the tide configuration must require the same set of 898 // plugins as are configured. If the repository has LGTM and approve enabled, the tide query 899 // must require both labels 900 func ensureValidConfiguration(plugin, label, verb string, tideSubSet, tideSuperSet, pluginsSubSet *orgRepoConfig) error { 901 notEnabled := tideSubSet.difference(pluginsSubSet).items() 902 notRequired := pluginsSubSet.intersection(tideSuperSet).difference(tideSubSet).items() 903 904 var configErrors []error 905 if len(notEnabled) > 0 { 906 configErrors = append(configErrors, fmt.Errorf("the following orgs or repos %s the %s label for merging but do not enable the %s plugin: %v", verb, label, plugin, notEnabled)) 907 } 908 if len(notRequired) > 0 { 909 configErrors = append(configErrors, fmt.Errorf("the following orgs or repos enable the %s plugin but do not %s the %s label for merging: %v", plugin, verb, label, notRequired)) 910 } 911 912 return utilerrors.NewAggregate(configErrors) 913 } 914 915 func validateDecoratedJobs(cfg *config.Config) error { 916 var nonDecoratedJobs []string 917 for _, presubmit := range cfg.AllStaticPresubmits([]string{}) { 918 if presubmit.Agent == string(v1.KubernetesAgent) && !*presubmit.JobBase.UtilityConfig.Decorate { 919 nonDecoratedJobs = append(nonDecoratedJobs, presubmit.Name) 920 } 921 } 922 923 for _, postsubmit := range cfg.AllStaticPostsubmits([]string{}) { 924 if postsubmit.Agent == string(v1.KubernetesAgent) && !*postsubmit.JobBase.UtilityConfig.Decorate { 925 nonDecoratedJobs = append(nonDecoratedJobs, postsubmit.Name) 926 } 927 } 928 929 for _, periodic := range cfg.AllPeriodics() { 930 if periodic.Agent == string(v1.KubernetesAgent) && !*periodic.JobBase.UtilityConfig.Decorate { 931 nonDecoratedJobs = append(nonDecoratedJobs, periodic.Name) 932 } 933 } 934 935 if len(nonDecoratedJobs) > 0 { 936 return fmt.Errorf("the following jobs use the kubernetes provider but do not use the pod utilities: %v", nonDecoratedJobs) 937 } 938 return nil 939 } 940 941 func validateDecorationConfig(cfg *config.Config) error { 942 var configErrors []error 943 for _, presubmit := range cfg.AllStaticPresubmits([]string{}) { 944 if presubmit.Agent == string(v1.KubernetesAgent) && presubmit.Decorate != nil && *presubmit.Decorate && presubmit.DecorationConfig != nil { 945 if err := presubmit.DecorationConfig.Validate(); err != nil { 946 configErrors = append(configErrors, err) 947 } 948 } 949 } 950 951 for _, postsubmit := range cfg.AllStaticPostsubmits([]string{}) { 952 if postsubmit.Agent == string(v1.KubernetesAgent) && postsubmit.Decorate != nil && *postsubmit.Decorate && postsubmit.DecorationConfig != nil { 953 if err := postsubmit.DecorationConfig.Validate(); err != nil { 954 configErrors = append(configErrors, err) 955 } 956 } 957 } 958 959 for _, periodic := range cfg.AllPeriodics() { 960 if periodic.Agent == string(v1.KubernetesAgent) && periodic.Decorate != nil && *periodic.Decorate && periodic.DecorationConfig != nil { 961 if err := periodic.DecorationConfig.Validate(); err != nil { 962 configErrors = append(configErrors, err) 963 } 964 } 965 } 966 return utilerrors.NewAggregate(configErrors) 967 } 968 969 func validateNeedsOkToTestLabel(cfg *config.Config) error { 970 var queryErrors []error 971 for i, query := range cfg.Tide.Queries { 972 for _, label := range query.Labels { 973 if label == lgtm.LGTMLabel { 974 for _, label := range query.MissingLabels { 975 if label == labels.NeedsOkToTest { 976 queryErrors = append(queryErrors, fmt.Errorf( 977 "the tide query at position %d"+ 978 "forbids the %q label and requires the %q label, "+ 979 "which is not recommended; "+ 980 "see https://docs.prow.k8s.io/docs/components/core/tide/maintainers/#best-practices "+ 981 "for more information", 982 i, labels.NeedsOkToTest, lgtm.LGTMLabel), 983 ) 984 } 985 } 986 } 987 } 988 } 989 return utilerrors.NewAggregate(queryErrors) 990 } 991 992 func validateManagedWebhooks(cfg *config.Config) error { 993 mw := cfg.ManagedWebhooks 994 var errs []error 995 orgs := sets.Set[string]{} 996 for repo := range mw.OrgRepoConfig { 997 if !strings.Contains(repo, "/") { 998 org := repo 999 orgs.Insert(org) 1000 } 1001 } 1002 for repo := range mw.OrgRepoConfig { 1003 if strings.Contains(repo, "/") { 1004 org := strings.SplitN(repo, "/", 2)[0] 1005 if orgs.Has(org) { 1006 errs = append(errs, fmt.Errorf( 1007 "org-level and repo-level webhooks are configured together for %q, "+ 1008 "which is not allowed as there will be duplicated webhook events", repo)) 1009 } 1010 } 1011 } 1012 return utilerrors.NewAggregate(errs) 1013 } 1014 1015 func pluginsWithOwnersFile() string { 1016 return strings.Join([]string{approve.PluginName, blunderbuss.PluginName, ownerslabel.PluginName}, ", ") 1017 } 1018 1019 func orgReposUsingOwnersFile(cfg *plugins.Configuration) *orgRepoConfig { 1020 // we do not know the set of repos that use OWNERS, but we 1021 // can get a reasonable proxy for this by looking at where 1022 // the `approve', `blunderbuss' and `owners-label' plugins 1023 // are enabled 1024 approveConfig := enabledOrgReposForPlugin(cfg, approve.PluginName, false) 1025 blunderbussConfig := enabledOrgReposForPlugin(cfg, blunderbuss.PluginName, false) 1026 ownersLabelConfig := enabledOrgReposForPlugin(cfg, ownerslabel.PluginName, false) 1027 return approveConfig.union(blunderbussConfig).union(ownersLabelConfig) 1028 } 1029 1030 type FileInRepoExistsChecker interface { 1031 GetRepos(org string, isUser bool) ([]github.Repo, error) 1032 GetFile(org, repo, filepath, commit string) ([]byte, error) 1033 } 1034 1035 func verifyOwnersPresence(cfg *plugins.Configuration, rc FileInRepoExistsChecker) error { 1036 ownersConfig := orgReposUsingOwnersFile(cfg) 1037 1038 var missing []string 1039 for org, excluded := range ownersConfig.orgExceptions { 1040 repos, err := rc.GetRepos(org, false) 1041 if err != nil { 1042 return err 1043 } 1044 1045 for _, repo := range repos { 1046 if excluded.Has(repo.FullName) || repo.Archived { 1047 continue 1048 } 1049 if _, err := rc.GetFile(repo.Owner.Login, repo.Name, "OWNERS", ""); err != nil { 1050 if _, nf := err.(*github.FileNotFound); nf { 1051 missing = append(missing, repo.FullName) 1052 } else { 1053 return fmt.Errorf("got error: %w", err) 1054 } 1055 } 1056 } 1057 } 1058 1059 for repo := range ownersConfig.repos { 1060 items := strings.Split(repo, "/") 1061 if len(items) != 2 { 1062 return fmt.Errorf("bad repository '%s', expected org/repo format", repo) 1063 } 1064 if _, err := rc.GetFile(items[0], items[1], "OWNERS", ""); err != nil { 1065 if _, nf := err.(*github.FileNotFound); nf { 1066 missing = append(missing, repo) 1067 } else { 1068 return fmt.Errorf("got error: %w", err) 1069 } 1070 } 1071 } 1072 1073 if len(missing) > 0 { 1074 return fmt.Errorf("the following orgs or repos enable at least one"+ 1075 " plugin that uses OWNERS files (%s), but its master branch does not contain"+ 1076 " a root level OWNERS file: %v", pluginsWithOwnersFile(), missing) 1077 } 1078 return nil 1079 } 1080 1081 func verifyOwnersPlugin(cfg *plugins.Configuration) error { 1082 ownersConfig := orgReposUsingOwnersFile(cfg) 1083 validateOwnersConfig := enabledOrgReposForPlugin(cfg, verifyowners.PluginName, false) 1084 1085 invalid := ownersConfig.difference(validateOwnersConfig).items() 1086 if len(invalid) > 0 { 1087 return fmt.Errorf("the following orgs or repos "+ 1088 "enable at least one plugin that uses OWNERS files (%s) "+ 1089 "but do not enable the %s plugin to ensure validity of OWNERS files: %v", 1090 pluginsWithOwnersFile(), verifyowners.PluginName, invalid, 1091 ) 1092 } 1093 return nil 1094 } 1095 1096 func verifyLabelPlugin(label plugins.Label) error { 1097 var orgReposWithEmptyLabelConfig []string 1098 var errs []error 1099 restrictedAndAdditionalLabels := make(map[string][]string) 1100 for orgRepo, restrictedLabels := range label.RestrictedLabels { 1101 for _, restrictedLabel := range restrictedLabels { 1102 if label.IsRestrictedLabelInAdditionalLables(restrictedLabel.Label) { 1103 restrictedAndAdditionalLabels[restrictedLabel.Label] = append(restrictedAndAdditionalLabels[restrictedLabel.Label], orgRepo) 1104 } 1105 if restrictedLabel.Label == "" { 1106 orgReposWithEmptyLabelConfig = append(orgReposWithEmptyLabelConfig, orgRepo) 1107 } 1108 } 1109 } 1110 1111 for label, repos := range restrictedAndAdditionalLabels { 1112 sort.Strings(repos) 1113 errs = append(errs, 1114 fmt.Errorf("the following orgs or repos have configuration of label plugin using the restricted label %s which is also configured as an additional label: %s", label, strings.Join(repos, ", "))) 1115 } 1116 1117 if len(orgReposWithEmptyLabelConfig) > 0 { 1118 sort.Strings(orgReposWithEmptyLabelConfig) 1119 errs = append(errs, fmt.Errorf("the following orgs or repos have configuration of %s plugin using the empty string as label name in restricted labels: %s", 1120 labelplugin.PluginName, strings.Join(orgReposWithEmptyLabelConfig, ", "), 1121 )) 1122 } 1123 return utilerrors.NewAggregate(errs) 1124 } 1125 1126 func validateTriggers(cfg *config.Config, pcfg *plugins.Configuration) error { 1127 configuredRepos := sets.New[string]() 1128 for orgRepo := range cfg.JobConfig.PresubmitsStatic { 1129 configuredRepos.Insert(orgRepo) 1130 } 1131 for orgRepo := range cfg.JobConfig.PostsubmitsStatic { 1132 configuredRepos.Insert(orgRepo) 1133 } 1134 1135 configured := newOrgRepoConfig(map[string]sets.Set[string]{}, configuredRepos) 1136 enabled := enabledOrgReposForPlugin(pcfg, trigger.PluginName, false) 1137 1138 if missing := configured.difference(enabled).items(); len(missing) > 0 { 1139 return fmt.Errorf("the following repos have jobs configured but do not have the %s plugin enabled: %s", trigger.PluginName, strings.Join(missing, ", ")) 1140 } 1141 return nil 1142 } 1143 1144 func validateInRepoConfig(cfg *config.Config, filepath, repoIdentifier string, strict bool) error { 1145 var dir string 1146 var err error 1147 // Unfortunately we must continue to support the filepath arg for existing uses. 1148 if filepath != "" { 1149 dir = path.Dir(filepath) 1150 } else { 1151 if dir, err = os.Getwd(); err != nil { 1152 return fmt.Errorf("failed to get current working directory") 1153 } 1154 } 1155 prowYAML, err := config.ReadProwYAML(logrus.WithField("repo", repoIdentifier), dir, strict) 1156 if err != nil { 1157 return fmt.Errorf("failed to read Prow YAML: %w", err) 1158 } 1159 1160 if err := config.DefaultAndValidateProwYAML(cfg, prowYAML, repoIdentifier); err != nil { 1161 return fmt.Errorf("failed to validate Prow YAML: %w", err) 1162 } 1163 1164 var errs []error 1165 for _, pre := range prowYAML.Presubmits { 1166 if !cfg.Gerrit.IsAllowedPresubmitTrigger(pre.RerunCommand) { 1167 errs = append(errs, fmt.Errorf("rerun command %s in job %s does not conform to test command requirements,"+ 1168 "please make sure the trigger regex is a subset of %s and the rerun command matches the trigger regex", 1169 pre.RerunCommand, pre.Name, cfg.Gerrit.AllowedPresubmitTriggerReRawString)) 1170 } 1171 } 1172 if len(errs) > 0 { 1173 return utilerrors.NewAggregate(errs) 1174 } 1175 1176 return nil 1177 } 1178 1179 func validateTideContextPolicy(cfg *config.Config) error { 1180 // We can not know all possible branches without asking GitHub, so instead we verify 1181 // all branches that are explicitly configured on any job. This will hopefully catch 1182 // most cases. 1183 allKnownOrgRepoBranches := map[string]sets.Set[string]{} 1184 for orgRepo, jobs := range cfg.PresubmitsStatic { 1185 if _, ok := allKnownOrgRepoBranches[orgRepo]; !ok { 1186 allKnownOrgRepoBranches[orgRepo] = sets.Set[string]{} 1187 } 1188 1189 for _, job := range jobs { 1190 allKnownOrgRepoBranches[orgRepo].Insert(job.Branches...) 1191 } 1192 } 1193 1194 // We have to disableInRepoConfig for this check, else we will 1195 // attempt to clone the repo if its enabled 1196 originalInRepoConfig := cfg.InRepoConfig 1197 cfg.InRepoConfig = config.InRepoConfig{} 1198 defer func() { cfg.InRepoConfig = originalInRepoConfig }() 1199 1200 var errs []error 1201 for orgRepo, branches := range allKnownOrgRepoBranches { 1202 split := strings.Split(orgRepo, "/") 1203 if n := len(split); n != 2 { 1204 // May happen for gerrit 1205 continue 1206 } 1207 org, repo := split[0], split[1] 1208 1209 if branches.Len() == 0 { 1210 // Make sure we always test at least one branch per repo 1211 // to catch cases where ppl only have jobs with empty branch 1212 // configs. 1213 branches.Insert("master") 1214 } 1215 for _, branch := range sets.List(branches) { 1216 if _, err := cfg.GetTideContextPolicy(nil, org, repo, branch, nil, ""); err != nil { 1217 errs = append(errs, fmt.Errorf("context policy for %s branch in %s/%s is invalid: %w", branch, org, repo, err)) 1218 } 1219 } 1220 } 1221 1222 return utilerrors.NewAggregate(errs) 1223 } 1224 1225 var agentsNotSupportingCluster = sets.New[string]("jenkins") 1226 1227 func validateJobCluster(job config.JobBase, statuses map[string]plank.ClusterStatus) error { 1228 if job.Cluster != "" && job.Cluster != kube.DefaultClusterAlias && agentsNotSupportingCluster.Has(job.Agent) { 1229 return fmt.Errorf("%s: cannot set cluster field if agent is %s", job.Name, job.Agent) 1230 } 1231 if statuses != nil { 1232 status, ok := statuses[job.Cluster] 1233 if !ok { 1234 return fmt.Errorf("job configuration for %q specifies unknown 'cluster' value %q", job.Name, job.Cluster) 1235 } 1236 if status != plank.ClusterStatusReachable { 1237 logrus.Warnf("Job configuration for %q specifies cluster %q which cannot be reached from Plank. Status: %q", job.Name, job.Cluster, status) 1238 } 1239 } 1240 return nil 1241 } 1242 1243 func validateCluster(cfg *config.Config, opener io.Opener) error { 1244 var statuses map[string]plank.ClusterStatus 1245 if location := cfg.Plank.BuildClusterStatusFile; location != "" { 1246 reader, err := opener.Reader(context.Background(), location) 1247 if err != nil { 1248 if !io.IsNotExist(err) { 1249 return fmt.Errorf("error opening build cluster status file for reading: %w", err) 1250 } 1251 logrus.Warnf("Build cluster status file location was specified, but could not be found: %v. This is expected when the location is first configured, before plank creates the file.", err) 1252 } else { 1253 defer reader.Close() 1254 b, err := stdio.ReadAll(reader) 1255 if err != nil { 1256 return fmt.Errorf("error reading build cluster status file: %w", err) 1257 } 1258 statuses = map[string]plank.ClusterStatus{} 1259 if err := json.Unmarshal(b, &statuses); err != nil { 1260 return fmt.Errorf("error unmarshaling build cluster status file: %w", err) 1261 } 1262 } 1263 } 1264 var errs []error 1265 for orgRepo, jobs := range cfg.PresubmitsStatic { 1266 for _, job := range jobs { 1267 if err := validateJobCluster(job.JobBase, statuses); err != nil { 1268 errs = append(errs, fmt.Errorf("%s: %w", orgRepo, err)) 1269 } 1270 } 1271 } 1272 for _, job := range cfg.Periodics { 1273 if err := validateJobCluster(job.JobBase, statuses); err != nil { 1274 errs = append(errs, fmt.Errorf("%s: %w", "invalid periodic job", err)) 1275 } 1276 1277 } 1278 for orgRepo, jobs := range cfg.PostsubmitsStatic { 1279 for _, job := range jobs { 1280 if err := validateJobCluster(job.JobBase, statuses); err != nil { 1281 errs = append(errs, fmt.Errorf("%s: %w", orgRepo, err)) 1282 } 1283 } 1284 } 1285 return utilerrors.NewAggregate(errs) 1286 } 1287 1288 func validateAdditionalProwConfigIsInOrgRepoDirectoryStructure(filesystem fs.FS, supplementalProwConfigDirs, supplementalPluginsConfigDirs []string, supplementalProwConfigsFileNameSuffix, supplementalPluginsConfigFileNameSuffix string) error { 1289 var errs []error 1290 1291 for _, supplementalProwConfigDir := range supplementalPluginsConfigDirs { 1292 if err := validateAdditionalConfigIsInOrgRepoDirectoryStructure(supplementalProwConfigDir, filesystem, func() hierarchicalConfig { return &config.Config{} }, supplementalProwConfigsFileNameSuffix); err != nil { 1293 errs = append(errs, err) 1294 } 1295 } 1296 for _, supplementalPluginsConfigDir := range supplementalPluginsConfigDirs { 1297 if err := validateAdditionalConfigIsInOrgRepoDirectoryStructure(supplementalPluginsConfigDir, filesystem, func() hierarchicalConfig { return &plugins.Configuration{} }, supplementalPluginsConfigFileNameSuffix); err != nil { 1298 errs = append(errs, err) 1299 } 1300 } 1301 1302 return utilerrors.NewAggregate(errs) 1303 } 1304 1305 func validateAdditionalConfigIsInOrgRepoDirectoryStructure(root string, filesystem fs.FS, target func() hierarchicalConfig, filesuffix string) error { 1306 var errs []error 1307 errs = append(errs, fs.WalkDir(filesystem, root, func(path string, d fs.DirEntry, err error) error { 1308 if err != nil { 1309 errs = append(errs, fmt.Errorf("error when walking: %w", err)) 1310 return nil 1311 } 1312 // Kubernetes configmap mounts create symlinks for the configmap keys that point to files prefixed with '..'. 1313 // This allows it to do atomic changes by changing the symlink to a new target when the configmap content changes. 1314 // This means that we should ignore the '..'-prefixed files, otherwise we might end up reading a half-written file and will 1315 // get duplicate data. 1316 if strings.HasPrefix(d.Name(), "..") { 1317 if d.IsDir() { 1318 return fs.SkipDir 1319 } 1320 return nil 1321 } 1322 1323 fs.ReadFile(filesystem, path) 1324 1325 if d.IsDir() || !strings.HasSuffix(path, filesuffix) { 1326 return nil 1327 } 1328 1329 pathWithoutRoot := strings.TrimPrefix(path, root) 1330 pathWithoutRoot = strings.TrimPrefix(pathWithoutRoot, "/") 1331 1332 pathElements := strings.Split(pathWithoutRoot, "/") 1333 nestingDepth := len(pathElements) - 1 1334 1335 var isOrgConfig, isRepoConfig bool 1336 switch nestingDepth { 1337 case 0: 1338 // Global config, might contain anything or not even be a Prow config 1339 return nil 1340 case 1: 1341 isOrgConfig = true 1342 case 2: 1343 isRepoConfig = true 1344 default: 1345 errs = append(errs, fmt.Errorf("config %s is at an invalid location. All configs must be below %s. If they are org-specific, they must be in a folder named like the org. If they are repo-specific, they must be in a folder named like the repo below a folder named like the org.", path, root)) 1346 return nil 1347 } 1348 1349 cfg := target() 1350 isGlobal, targetedOrgs, targetedRepos, err := getSupplementalConfigScope(path, filesystem, cfg) 1351 if err != nil { 1352 errs = append(errs, err) 1353 return nil 1354 } 1355 1356 if isOrgConfig { 1357 expectedTargetOrg := pathElements[0] 1358 if !isGlobal && len(targetedOrgs) == 1 && targetedOrgs.Has(expectedTargetOrg) && len(targetedRepos) == 0 { 1359 return nil 1360 } 1361 errMsg := fmt.Sprintf("config %s is invalid: Must contain only config for org %s, but", path, expectedTargetOrg) 1362 var needsAnd bool 1363 if isGlobal { 1364 errMsg += " contains global config" 1365 needsAnd = true 1366 } 1367 for _, org := range sets.List(targetedOrgs.Delete(expectedTargetOrg)) { 1368 errMsg += prefixWithAndIfNeeded(fmt.Sprintf(" contains config for org %s", org), needsAnd) 1369 needsAnd = true 1370 } 1371 for _, repo := range sets.List(targetedRepos) { 1372 errMsg += prefixWithAndIfNeeded(fmt.Sprintf(" contains config for repo %s", repo), needsAnd) 1373 needsAnd = true 1374 } 1375 errs = append(errs, errors.New(errMsg)) 1376 return nil 1377 } 1378 1379 if isRepoConfig { 1380 expectedTargetRepo := pathElements[0] + "/" + pathElements[1] 1381 if !isGlobal && len(targetedOrgs) == 0 && len(targetedRepos) == 1 && targetedRepos.Has(expectedTargetRepo) { 1382 return nil 1383 } 1384 1385 errMsg := fmt.Sprintf("config %s is invalid: Must only contain config for repo %s, but", path, expectedTargetRepo) 1386 var needsAnd bool 1387 if isGlobal { 1388 errMsg += " contains global config" 1389 needsAnd = true 1390 } 1391 for _, org := range sets.List(targetedOrgs) { 1392 errMsg += prefixWithAndIfNeeded(fmt.Sprintf(" contains config for org %s", org), needsAnd) 1393 needsAnd = true 1394 } 1395 for _, repo := range sets.List(targetedRepos.Delete(expectedTargetRepo)) { 1396 errMsg += prefixWithAndIfNeeded(fmt.Sprintf(" contains config for repo %s", repo), needsAnd) 1397 needsAnd = true 1398 } 1399 errs = append(errs, errors.New(errMsg)) 1400 return nil 1401 } 1402 1403 // We should have left the function earlier. Error out so bugs in this code can not be abused. 1404 return fmt.Errorf("BUG: You should never see this. Path: %s, isGlobal: %t, targetedOrgs: %v, targetedRepos: %v", path, isGlobal, targetedOrgs, targetedRepos) 1405 })) 1406 1407 return utilerrors.NewAggregate(errs) 1408 } 1409 1410 func validateUnmanagedBranchprotectionConfigDoesntHaveSubconfig(bp config.BranchProtection) error { 1411 var errs []error 1412 if bp.Unmanaged != nil && *bp.Unmanaged { 1413 if doesUnmanagedBranchprotectionPolicyHaveSettings(bp.Policy) && !bp.HasManagedOrgs() && !bp.HasManagedRepos() && !bp.HasManagedBranches() { 1414 errs = append(errs, errors.New("branch protection is globally set to unmanaged, but has configuration")) 1415 } 1416 for orgName, org := range bp.Orgs { 1417 // The global level setting is overridden by a lower level 1418 if org.HasManagedRepos() { 1419 continue 1420 } 1421 for _, repo := range org.Repos { 1422 if repo.HasManagedBranches() { 1423 continue 1424 } 1425 } 1426 errs = append(errs, fmt.Errorf("branch protection config is globally set to unmanaged but has configuration for org %s without setting the org to unmanaged: false", orgName)) 1427 } 1428 } 1429 for orgName, orgConfig := range bp.Orgs { 1430 if orgConfig.Unmanaged != nil && *orgConfig.Unmanaged { 1431 if doesUnmanagedBranchprotectionPolicyHaveSettings(orgConfig.Policy) && !orgConfig.HasManagedRepos() && !orgConfig.HasManagedBranches() { 1432 errs = append(errs, fmt.Errorf("branch protection config for org %s is set to unmanaged, but it defines settings", orgName)) 1433 } 1434 for repoName, repo := range orgConfig.Repos { 1435 // The org level setting is overridden by a lower level 1436 if repo.HasManagedBranches() { 1437 continue 1438 } 1439 errs = append(errs, fmt.Errorf("branch protection config for repo %s/%s is defined, but branch protection is unmanaged for org %s without setting the repo to unmanaged: false", orgName, repoName, orgName)) 1440 } 1441 } 1442 1443 for repoName, repoConfig := range orgConfig.Repos { 1444 if repoConfig.Unmanaged != nil && *repoConfig.Unmanaged { 1445 if doesUnmanagedBranchprotectionPolicyHaveSettings(repoConfig.Policy) && !repoConfig.HasManagedBranches() { 1446 errs = append(errs, fmt.Errorf("branch protection config for repo %s/%s is set to unmanaged, but it defines settings", orgName, repoName)) 1447 } 1448 1449 for branchName, branch := range repoConfig.Branches { 1450 // The repo level setting is overridden by a lower level 1451 if branch.Policy.Managed() { 1452 continue 1453 } 1454 errs = append(errs, fmt.Errorf("branch protection for repo %s/%s is set to unmanaged, but it defines settings for branch %s without setting the branch to unmanaged: false", orgName, repoName, branchName)) 1455 } 1456 1457 } 1458 1459 for branchName, branchConfig := range repoConfig.Branches { 1460 if branchConfig.Unmanaged != nil && *branchConfig.Unmanaged && doesUnmanagedBranchprotectionPolicyHaveSettings(branchConfig.Policy) { 1461 errs = append(errs, fmt.Errorf("branch protection config for branch %s in repo %s/%s is set to unmanaged but defines settings", branchName, orgName, repoName)) 1462 } 1463 } 1464 } 1465 } 1466 1467 return utilerrors.NewAggregate(errs) 1468 } 1469 1470 func doesUnmanagedBranchprotectionPolicyHaveSettings(p config.Policy) bool { 1471 emptyRef := config.Policy{Unmanaged: p.Unmanaged} 1472 return !reflect.DeepEqual(p, emptyRef) 1473 } 1474 1475 func prefixWithAndIfNeeded(s string, needsAnd bool) string { 1476 if needsAnd { 1477 return " and" + s 1478 } 1479 return s 1480 } 1481 1482 type hierarchicalConfig interface { 1483 HasConfigFor() (bool, sets.Set[string], sets.Set[string]) 1484 } 1485 1486 func getSupplementalConfigScope(path string, filesystem fs.FS, cfg hierarchicalConfig) (global bool, orgs sets.Set[string], repos sets.Set[string], err error) { 1487 data, err := fs.ReadFile(filesystem, path) 1488 if err != nil { 1489 return false, nil, nil, fmt.Errorf("failed to read %s: %w", path, err) 1490 } 1491 if err := yaml.Unmarshal(data, cfg); err != nil { 1492 return false, nil, nil, fmt.Errorf("failed to unmarshal %s into %T: %w", path, cfg, err) 1493 } 1494 1495 global, orgs, repos = cfg.HasConfigFor() 1496 return global, orgs, repos, nil 1497 } 1498 1499 type ghAppListingClient interface { 1500 ListAppInstallations() ([]github.AppInstallation, error) 1501 } 1502 1503 func validateGitHubAppIsInstalled(client ghAppListingClient, allRepos sets.Set[string]) error { 1504 installations, err := client.ListAppInstallations() 1505 if err != nil { 1506 return fmt.Errorf("failed to list app installations from GitHub: %w", err) 1507 } 1508 orgsWithInstalledApp := sets.Set[string]{} 1509 for _, installation := range installations { 1510 orgsWithInstalledApp.Insert(installation.Account.Login) 1511 } 1512 1513 var errs []error 1514 for _, repo := range sets.List(allRepos) { 1515 if org := strings.Split(repo, "/")[0]; !orgsWithInstalledApp.Has(org) { 1516 errs = append(errs, fmt.Errorf("There is configuration for the GitHub org %q but the GitHub app is not installed there", org)) 1517 } 1518 } 1519 1520 return utilerrors.NewAggregate(errs) 1521 } 1522 1523 func validateRequiredJobAnnotations(a []string, c config.JobConfig) error { 1524 validator := func(job config.JobBase, annotations []string) error { 1525 var errs []error 1526 for _, annotation := range annotations { 1527 if _, ok := job.Annotations[annotation]; !ok { 1528 errs = append(errs, fmt.Errorf(annotation)) 1529 } 1530 } 1531 return utilerrors.NewAggregate(errs) 1532 } 1533 var errs []error 1534 1535 for _, presubmits := range c.PresubmitsStatic { 1536 for _, presubmit := range presubmits { 1537 if err := validator(presubmit.JobBase, a); err != nil { 1538 errs = append(errs, fmt.Errorf("job '%s' is missing required annotations: %w", presubmit.Name, err)) 1539 } 1540 } 1541 } 1542 for _, postsubmits := range c.PostsubmitsStatic { 1543 for _, postsubmit := range postsubmits { 1544 if err := validator(postsubmit.JobBase, a); err != nil { 1545 errs = append(errs, fmt.Errorf("job '%s' is missing required annotations: %w", postsubmit.Name, err)) 1546 } 1547 } 1548 } 1549 for _, periodic := range c.Periodics { 1550 if err := validator(periodic.JobBase, a); err != nil { 1551 errs = append(errs, fmt.Errorf("job '%s' is missing required annotations: %w", periodic.Name, err)) 1552 } 1553 } 1554 return utilerrors.NewAggregate(errs) 1555 }