sigs.k8s.io/prow@v0.0.0-20240503223140-c5e374dc7eb1/pkg/config/config.go (about) 1 /* 2 Copyright 2017 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 // Package config knows how to read and parse config.yaml. 18 package config 19 20 import ( 21 "bytes" 22 "compress/gzip" 23 "encoding/json" 24 "errors" 25 "fmt" 26 "io" 27 "net/url" 28 "os" 29 "path" 30 "path/filepath" 31 "regexp" 32 "runtime/debug" 33 "sort" 34 "strconv" 35 "strings" 36 "sync" 37 "text/template" 38 "time" 39 40 gitignore "github.com/denormal/go-gitignore" 41 "github.com/google/go-cmp/cmp" 42 "github.com/google/go-cmp/cmp/cmpopts" 43 "github.com/sirupsen/logrus" 44 pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" 45 "gopkg.in/robfig/cron.v2" 46 v1 "k8s.io/api/core/v1" 47 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 48 "k8s.io/apimachinery/pkg/labels" 49 utilerrors "k8s.io/apimachinery/pkg/util/errors" 50 "k8s.io/apimachinery/pkg/util/sets" 51 "k8s.io/apimachinery/pkg/util/validation" 52 gerritsource "sigs.k8s.io/prow/pkg/gerrit/source" 53 "sigs.k8s.io/yaml" 54 55 prowapi "sigs.k8s.io/prow/pkg/apis/prowjobs/v1" 56 "sigs.k8s.io/prow/pkg/git/types" 57 "sigs.k8s.io/prow/pkg/git/v2" 58 "sigs.k8s.io/prow/pkg/github" 59 "sigs.k8s.io/prow/pkg/kube" 60 "sigs.k8s.io/prow/pkg/pod-utils/decorate" 61 "sigs.k8s.io/prow/pkg/pod-utils/downwardapi" 62 ) 63 64 const ( 65 // DefaultJobTimeout represents the default deadline for a prow job. 66 DefaultJobTimeout = 24 * time.Hour 67 68 // DefaultMoonrakerClientTimeout is the default timeout for all Moonraker 69 // clients. Note that this is a client-side timeout, and does not affect 70 // whether Moonraker itself will finish doing the Git fetch/parsing in the 71 // background (esp. for new repos that need the extra cloning time). 72 DefaultMoonrakerClientTimeout = 10 * time.Minute 73 74 ProwImplicitGitResource = "PROW_IMPLICIT_GIT_REF" 75 76 // ConfigVersionFileName is the name of a file that will be added to 77 // all configmaps by the configupdater and contain the git sha that 78 // triggered said configupdate. The configloading in turn will pick 79 // it up if present. This allows components to include the config version 80 // in their logs, which can be useful for debugging. 81 ConfigVersionFileName = "VERSION" 82 83 DefaultTenantID = "GlobalDefaultID" 84 85 ProwIgnoreFileName = ".prowignore" 86 ) 87 88 var ( 89 DefaultDiffOpts []cmp.Option = []cmp.Option{cmpopts.IgnoreFields(TideBranchMergeType{}, "Regexpr"), 90 cmpopts.IgnoreUnexported(Gerrit{})} 91 ) 92 93 // Config is a read-only snapshot of the config. 94 type Config struct { 95 JobConfig 96 ProwConfig 97 } 98 99 // JobConfig is config for all prow jobs. 100 type JobConfig struct { 101 // Presets apply to all job types. 102 Presets []Preset `json:"presets,omitempty"` 103 // .PresubmitsStatic contains the presubmits in Prows main config. 104 // **Warning:** This does not return dynamic Presubmits configured 105 // inside the code repo, hence giving an incomplete view. Use 106 // `GetPresubmits` instead if possible. 107 PresubmitsStatic map[string][]Presubmit `json:"presubmits,omitempty"` 108 // .PostsubmitsStatic contains the Postsubmits in Prows main config. 109 // **Warning:** This does not return dynamic postsubmits configured 110 // inside the code repo, hence giving an incomplete view. Use 111 // `GetPostsubmits` instead if possible. 112 PostsubmitsStatic map[string][]Postsubmit `json:"postsubmits,omitempty"` 113 114 // Periodics are not associated with any repo. 115 Periodics []Periodic `json:"periodics,omitempty"` 116 117 // AllRepos contains all Repos that have one or more jobs configured or 118 // for which a tide query is configured. 119 AllRepos sets.Set[string] `json:"-"` 120 121 // ProwYAMLGetterWithDefaults is the function to get a ProwYAML with 122 // defaults based on the rest of the Config. Tests should provide their own 123 // implementation. 124 ProwYAMLGetterWithDefaults ProwYAMLGetter `json:"-"` 125 126 // ProwYAMLGetter is like ProwYAMLGetterWithDefaults, but does not default 127 // the retrieved ProwYAML with defaulted values. It is mocked by 128 // TestGetPresubmitsAndPostubmitsCached (and in production, prowYAMLGetter() 129 // is used). 130 ProwYAMLGetter ProwYAMLGetter `json:"-"` 131 132 // DecorateAllJobs determines whether all jobs are decorated by default. 133 DecorateAllJobs bool `json:"decorate_all_jobs,omitempty"` 134 135 // ProwIgnored is a well known, unparsed field where non-Prow fields can 136 // be defined without conflicting with unknown field validation. 137 ProwIgnored *json.RawMessage `json:"prow_ignored,omitempty"` 138 } 139 140 // ProwConfig is config for all prow controllers. 141 type ProwConfig struct { 142 // The git sha from which this config was generated. 143 ConfigVersionSHA string `json:"config_version_sha,omitempty"` 144 Tide Tide `json:"tide,omitempty"` 145 Plank Plank `json:"plank,omitempty"` 146 Sinker Sinker `json:"sinker,omitempty"` 147 Deck Deck `json:"deck,omitempty"` 148 BranchProtection BranchProtection `json:"branch-protection"` 149 Gerrit Gerrit `json:"gerrit"` 150 GitHubReporter GitHubReporter `json:"github_reporter"` 151 Horologium Horologium `json:"horologium"` 152 SlackReporterConfigs SlackReporterConfigs `json:"slack_reporter_configs,omitempty"` 153 InRepoConfig InRepoConfig `json:"in_repo_config"` 154 155 // Gangway contains configurations needed by the the Prow API server of the 156 // same name. It encodes an allowlist of API clients and what kinds of Prow 157 // Jobs they are authorized to trigger. 158 Gangway Gangway `json:"gangway,omitempty"` 159 160 // Moonraker contains configurations for Moonraker, such as the client 161 // timeout to use for all Prow services that need to send requests to 162 // Moonraker. 163 Moonraker Moonraker `json:"moonraker,omitempty"` 164 165 // Scheduler contains configuration for the additional scheduler. 166 // It has to be explicitly enabled. 167 Scheduler Scheduler `json:"scheduler,omitempty"` 168 169 // TODO: Move this out of the main config. 170 JenkinsOperators []JenkinsOperator `json:"jenkins_operators,omitempty"` 171 172 // ProwJobNamespace is the namespace in the cluster that prow 173 // components will use for looking up ProwJobs. The namespace 174 // needs to exist and will not be created by prow. 175 // Defaults to "default". 176 ProwJobNamespace string `json:"prowjob_namespace,omitempty"` 177 // PodNamespace is the namespace in the cluster that prow 178 // components will use for looking up Pods owned by ProwJobs. 179 // The namespace needs to exist and will not be created by prow. 180 // Defaults to "default". 181 PodNamespace string `json:"pod_namespace,omitempty"` 182 183 // LogLevel enables dynamically updating the log level of the 184 // standard logger that is used by all prow components. 185 // 186 // Valid values: 187 // 188 // "debug", "info", "warn", "warning", "error", "fatal", "panic" 189 // 190 // Defaults to "info". 191 LogLevel string `json:"log_level,omitempty"` 192 193 // PushGateway is a prometheus push gateway. 194 PushGateway PushGateway `json:"push_gateway,omitempty"` 195 196 // OwnersDirDenylist is used to configure regular expressions matching directories 197 // to ignore when searching for OWNERS{,_ALIAS} files in a repo. 198 OwnersDirDenylist *OwnersDirDenylist `json:"owners_dir_denylist,omitempty"` 199 200 // Pub/Sub Subscriptions that we want to listen to. 201 PubSubSubscriptions PubsubSubscriptions `json:"pubsub_subscriptions,omitempty"` 202 203 // PubSubTriggers defines Pub/Sub Subscriptions that we want to listen to, 204 // can be used to restrict build cluster on a topic. 205 PubSubTriggers PubSubTriggers `json:"pubsub_triggers,omitempty"` 206 207 // GitHubOptions allows users to control how prow applications display GitHub website links. 208 GitHubOptions GitHubOptions `json:"github,omitempty"` 209 210 // StatusErrorLink is the url that will be used for jenkins prowJobs that can't be 211 // found, or have another generic issue. The default that will be used if this is not set 212 // is: https://github.com/kubernetes/test-infra/issues. 213 StatusErrorLink string `json:"status_error_link,omitempty"` 214 215 // DefaultJobTimeout this is default deadline for prow jobs. This value is used when 216 // no timeout is configured at the job level. This value is set to 24 hours. 217 DefaultJobTimeout *metav1.Duration `json:"default_job_timeout,omitempty"` 218 219 // ManagedWebhooks contains information about all github repositories and organizations which are using 220 // non-global Hmac token. 221 ManagedWebhooks ManagedWebhooks `json:"managed_webhooks,omitempty"` 222 223 // ProwJobDefaultEntries holds a list of defaults for specific values 224 // Each entry in the slice specifies Repo and CLuster regexp filter fields to 225 // match against the jobs and a corresponding ProwJobDefault . All entries that 226 // match a job are used. Later matching entries override the fields of earlier 227 // matching entires. 228 ProwJobDefaultEntries []*ProwJobDefaultEntry `json:"prowjob_default_entries,omitempty"` 229 230 // DisabledClusters holds a list of disabled build cluster names. The same context names will be ignored while 231 // Prow components load the kubeconfig files. 232 DisabledClusters []string `json:"disabled_clusters,omitempty"` 233 } 234 235 type InRepoConfig struct { 236 // Enabled describes whether InRepoConfig is enabled for a given repository. This can 237 // be set globally, per org or per repo using '*', 'org' or 'org/repo' as key. The 238 // narrowest match always takes precedence. 239 Enabled map[string]*bool `json:"enabled,omitempty"` 240 // AllowedClusters is a list of allowed clusternames that can be used for jobs on 241 // a given repo. All clusters that are allowed for the specific repo, its org or 242 // globally can be used. 243 AllowedClusters map[string][]string `json:"allowed_clusters,omitempty"` 244 } 245 246 func SplitRepoName(fullRepoName string) (string, string, error) { 247 // Gerrit org/repo contains https://, should be handled differently. 248 if gerritsource.IsGerritOrg(fullRepoName) { 249 return gerritsource.OrgRepoFromCloneURI(fullRepoName) 250 } 251 252 s := strings.SplitN(fullRepoName, "/", 2) 253 if len(s) != 2 { 254 return "", "", fmt.Errorf("repo %s cannot be split into org/repo", fullRepoName) 255 } 256 return s[0], s[1], nil 257 } 258 259 // InRepoConfigEnabled returns whether InRepoConfig is enabled for a given repository. 260 // There is no assumption that config will include http:// or https:// or not. 261 func (c *Config) InRepoConfigEnabled(identifier string) bool { 262 for _, key := range keysForIdentifier(identifier) { 263 if c.InRepoConfig.Enabled[key] != nil { 264 return *c.InRepoConfig.Enabled[key] 265 } 266 } 267 return false 268 } 269 270 // InRepoConfigAllowsCluster determines if a given cluster may be used for a given repository 271 // Assumes that config will not include http:// or https:// 272 func (c *Config) InRepoConfigAllowsCluster(clusterName, identifier string) bool { 273 for _, key := range keysForIdentifier(identifier) { 274 for _, allowedCluster := range c.InRepoConfig.AllowedClusters[key] { 275 if allowedCluster == clusterName { 276 return true 277 } 278 } 279 } 280 return false 281 } 282 283 // keysForIdentifier returns all possible identifiers for given keys. In 284 // consideration of Gerrit identifiers that contain `https://` prefix, it 285 // returns keys contain both `https://foo/bar` and `foo/bar` for identifier 286 // `https://foo/bar`. The returned keys also include `https://foo`, `foo`, and 287 // `*`. 288 func keysForIdentifier(identifier string) []string { 289 var candidates []string 290 291 normalizedIdentifier := identifier 292 if gerritsource.IsGerritOrg(identifier) { 293 normalizedIdentifier = gerritsource.NormalizeCloneURI(identifier) 294 } 295 296 candidates = append(candidates, normalizedIdentifier) 297 // gerritsource.TrimHTTPSPrefix(identifier) trims https:// prefix, it 298 // doesn't hurt for identifier without https:// 299 candidates = append(candidates, gerritsource.TrimHTTPSPrefix(identifier)) 300 301 org, _, _ := SplitRepoName(normalizedIdentifier) 302 // Errors if failed to split. We are ignoring this and just checking if org != "" instead. 303 if org != "" { 304 candidates = append(candidates, org) 305 // gerritsource.TrimHTTPSPrefix(identifier) trims https:// prefix, it 306 // doesn't hurt for identifier without https:// 307 candidates = append(candidates, gerritsource.TrimHTTPSPrefix(org)) 308 } 309 310 candidates = append(candidates, "*") 311 312 var res []string 313 visited := sets.New[string]() 314 for _, cand := range candidates { 315 if visited.Has(cand) { 316 continue 317 } 318 res = append(res, cand) 319 visited.Insert(cand) 320 } 321 322 return res 323 } 324 325 // RefGetter is used to retrieve a Git Reference. Its purpose is 326 // to be able to defer calling out to GitHub in the context of 327 // inrepoconfig to make sure its only done when we actually need 328 // to have that info. 329 type RefGetter = func() (string, error) 330 331 type refGetterForGitHubPullRequestClient interface { 332 GetPullRequest(org, repo string, number int) (*github.PullRequest, error) 333 GetRef(org, repo, ref string) (string, error) 334 } 335 336 // NewRefGetterForGitHubPullRequest returns a brand new RefGetterForGitHubPullRequest. 337 func NewRefGetterForGitHubPullRequest(ghc refGetterForGitHubPullRequestClient, org, repo string, number int) *RefGetterForGitHubPullRequest { 338 return &RefGetterForGitHubPullRequest{ 339 ghc: ghc, 340 org: org, 341 repo: repo, 342 number: number, 343 lock: &sync.Mutex{}, 344 } 345 } 346 347 // RefGetterForGitHubPullRequest is used to get the Presubmits for a GitHub PullRequest 348 // when that PullRequest wasn't fetched yet. It will only fetch it if someone calls 349 // its .PullRequest() func. It is threadsafe. 350 type RefGetterForGitHubPullRequest struct { 351 ghc refGetterForGitHubPullRequestClient 352 org string 353 repo string 354 number int 355 lock *sync.Mutex 356 pr *github.PullRequest 357 baseSHA string 358 } 359 360 func (rg *RefGetterForGitHubPullRequest) PullRequest() (*github.PullRequest, error) { 361 rg.lock.Lock() 362 defer rg.lock.Unlock() 363 if rg.pr != nil { 364 return rg.pr, nil 365 } 366 367 pr, err := rg.ghc.GetPullRequest(rg.org, rg.repo, rg.number) 368 if err != nil { 369 return nil, err 370 } 371 372 rg.pr = pr 373 return rg.pr, nil 374 } 375 376 // HeadSHA is a RefGetter that returns the headSHA for the PullRequest. 377 func (rg *RefGetterForGitHubPullRequest) HeadSHA() (string, error) { 378 if rg.pr == nil { 379 if _, err := rg.PullRequest(); err != nil { 380 return "", err 381 } 382 } 383 return rg.pr.Head.SHA, nil 384 } 385 386 // BaseSHA is a RefGetter that returns the baseRef for the PullRequest. 387 func (rg *RefGetterForGitHubPullRequest) BaseSHA() (string, error) { 388 if rg.pr == nil { 389 if _, err := rg.PullRequest(); err != nil { 390 return "", err 391 } 392 } 393 394 // rg.PullRequest also wants the lock, so we must not acquire it before 395 // calling that. 396 rg.lock.Lock() 397 defer rg.lock.Unlock() 398 399 if rg.baseSHA != "" { 400 return rg.baseSHA, nil 401 } 402 403 baseSHA, err := rg.ghc.GetRef(rg.org, rg.repo, "heads/"+rg.pr.Base.Ref) 404 if err != nil { 405 return "", err 406 } 407 rg.baseSHA = baseSHA 408 409 return rg.baseSHA, nil 410 } 411 412 // GetAndCheckRefs resolves all uniquely-identifying information related to the 413 // retrieval of a *ProwYAML. 414 func GetAndCheckRefs( 415 baseSHAGetter RefGetter, 416 headSHAGetters ...RefGetter) (string, []string, error) { 417 418 // Parse "baseSHAGetter". 419 baseSHA, err := baseSHAGetter() 420 if err != nil { 421 return "", nil, fmt.Errorf("failed to get baseSHA: %v", err) 422 } 423 424 // Parse "headSHAGetters". 425 var headSHAs []string 426 for _, headSHAGetter := range headSHAGetters { 427 headSHA, err := headSHAGetter() 428 if err != nil { 429 return "", nil, fmt.Errorf("failed to get headRef: %v", err) 430 } 431 if headSHA != "" { 432 headSHAs = append(headSHAs, headSHA) 433 } 434 } 435 436 return baseSHA, headSHAs, nil 437 } 438 439 // getProwYAMLWithDefaults will load presubmits and postsubmits for the given 440 // identifier that are versioned inside the tested repo, if the inrepoconfig 441 // feature is enabled. Consumers that pass in a RefGetter implementation that 442 // does a call to GitHub and who also need the result of that GitHub call just 443 // keep a pointer to its result, but must nilcheck that pointer before accessing 444 // it. 445 func (c *Config) getProwYAMLWithDefaults(gc git.ClientFactory, identifier, baseBranch string, baseSHAGetter RefGetter, headSHAGetters ...RefGetter) (*ProwYAML, error) { 446 if identifier == "" { 447 return nil, errors.New("no identifier for repo given") 448 } 449 if !c.InRepoConfigEnabled(identifier) { 450 return &ProwYAML{}, nil 451 } 452 453 baseSHA, headSHAs, err := GetAndCheckRefs(baseSHAGetter, headSHAGetters...) 454 if err != nil { 455 return nil, err 456 } 457 458 prowYAML, err := c.ProwYAMLGetterWithDefaults(c, gc, identifier, baseBranch, baseSHA, headSHAs...) 459 if err != nil { 460 return nil, err 461 } 462 463 return prowYAML, nil 464 } 465 466 // getProwYAML is like getProwYAMLWithDefaults, minus the defaulting logic. 467 func (c *Config) getProwYAML(gc git.ClientFactory, identifier, baseBranch string, baseSHAGetter RefGetter, headSHAGetters ...RefGetter) (*ProwYAML, error) { 468 if identifier == "" { 469 return nil, errors.New("no identifier for repo given") 470 } 471 if !c.InRepoConfigEnabled(identifier) { 472 return &ProwYAML{}, nil 473 } 474 475 baseSHA, headSHAs, err := GetAndCheckRefs(baseSHAGetter, headSHAGetters...) 476 if err != nil { 477 return nil, err 478 } 479 480 prowYAML, err := c.ProwYAMLGetter(c, gc, identifier, baseBranch, baseSHA, headSHAs...) 481 if err != nil { 482 return nil, err 483 } 484 485 return prowYAML, nil 486 } 487 488 // GetPresubmits will return all presubmits for the given identifier. This includes 489 // Presubmits that are versioned inside the tested repo, if the inrepoconfig feature 490 // is enabled. 491 // Consumers that pass in a RefGetter implementation that does a call to GitHub and who 492 // also need the result of that GitHub call just keep a pointer to its result, but must 493 // nilcheck that pointer before accessing it. 494 func (c *Config) GetPresubmits(gc git.ClientFactory, identifier, baseBranch string, baseSHAGetter RefGetter, headSHAGetters ...RefGetter) ([]Presubmit, error) { 495 prowYAML, err := c.getProwYAMLWithDefaults(gc, identifier, baseBranch, baseSHAGetter, headSHAGetters...) 496 if err != nil { 497 return nil, err 498 } 499 500 return append(c.GetPresubmitsStatic(identifier), prowYAML.Presubmits...), nil 501 } 502 503 // GetPresubmitsStatic will return presubmits for the given identifier that are versioned inside the tested repo. 504 func (c *Config) GetPresubmitsStatic(identifier string) []Presubmit { 505 keys := []string{identifier} 506 if gerritsource.IsGerritOrg(identifier) { 507 // For Gerrit, allow users to define jobs without https:// prefix, which 508 // is what's supported right now. 509 keys = append(keys, gerritsource.TrimHTTPSPrefix(identifier)) 510 } 511 var res []Presubmit 512 for _, key := range keys { 513 res = append(res, c.PresubmitsStatic[key]...) 514 } 515 return res 516 } 517 518 // GetPostsubmits will return all postsubmits for the given identifier. This includes 519 // Postsubmits that are versioned inside the tested repo, if the inrepoconfig feature 520 // is enabled. 521 // Consumers that pass in a RefGetter implementation that does a call to GitHub and who 522 // also need the result of that GitHub call just keep a pointer to its result, but must 523 // nilcheck that pointer before accessing it. 524 func (c *Config) GetPostsubmits(gc git.ClientFactory, identifier, baseBranch string, baseSHAGetter RefGetter, headSHAGetters ...RefGetter) ([]Postsubmit, error) { 525 prowYAML, err := c.getProwYAMLWithDefaults(gc, identifier, baseBranch, baseSHAGetter, headSHAGetters...) 526 if err != nil { 527 return nil, err 528 } 529 530 return append(c.GetPostsubmitsStatic(identifier), prowYAML.Postsubmits...), nil 531 } 532 533 // GetPostsubmitsStatic will return postsubmits for the given identifier that are versioned inside the tested repo. 534 func (c *Config) GetPostsubmitsStatic(identifier string) []Postsubmit { 535 keys := []string{identifier} 536 if gerritsource.IsGerritOrg(identifier) { 537 // For Gerrit, allow users to define jobs without https:// prefix, which 538 // is what's supported right now. 539 keys = append(keys, gerritsource.TrimHTTPSPrefix(identifier)) 540 } 541 var res []Postsubmit 542 for _, key := range keys { 543 res = append(res, c.PostsubmitsStatic[key]...) 544 } 545 return res 546 } 547 548 // OwnersDirDenylist is used to configure regular expressions matching directories 549 // to ignore when searching for OWNERS{,_ALIAS} files in a repo. 550 type OwnersDirDenylist struct { 551 // Repos configures a directory denylist per repo (or org). 552 Repos map[string][]string `json:"repos,omitempty"` 553 // Default configures a default denylist for all repos (or orgs). 554 // Some directories like ".git", "_output" and "vendor/.*/OWNERS" 555 // are already preconfigured to be denylisted, and need not be included here. 556 Default []string `json:"default,omitempty"` 557 // By default, some directories like ".git", "_output" and "vendor/.*/OWNERS" 558 // are preconfigured to be denylisted. 559 // If set, IgnorePreconfiguredDefaults will not add these preconfigured directories 560 // to the denylist. 561 IgnorePreconfiguredDefaults bool `json:"ignore_preconfigured_defaults,omitempty"` 562 } 563 564 // ListIgnoredDirs returns regular expressions matching directories to ignore when 565 // searching for OWNERS{,_ALIAS} files in a repo. 566 func (o OwnersDirDenylist) ListIgnoredDirs(org, repo string) (ignorelist []string) { 567 ignorelist = append(ignorelist, o.Default...) 568 if bl, ok := o.Repos[org]; ok { 569 ignorelist = append(ignorelist, bl...) 570 } 571 if bl, ok := o.Repos[org+"/"+repo]; ok { 572 ignorelist = append(ignorelist, bl...) 573 } 574 575 preconfiguredDefaults := []string{"\\.git$", "_output$", "vendor/.*/.*"} 576 if !o.IgnorePreconfiguredDefaults { 577 ignorelist = append(ignorelist, preconfiguredDefaults...) 578 } 579 return 580 } 581 582 // PushGateway is a prometheus push gateway. 583 type PushGateway struct { 584 // Endpoint is the location of the prometheus pushgateway 585 // where prow will push metrics to. 586 Endpoint string `json:"endpoint,omitempty"` 587 // Interval specifies how often prow will push metrics 588 // to the pushgateway. Defaults to 1m. 589 Interval *metav1.Duration `json:"interval,omitempty"` 590 // ServeMetrics tells if or not the components serve metrics. 591 ServeMetrics bool `json:"serve_metrics"` 592 } 593 594 // Controller holds configuration applicable to all agent-specific 595 // prow controllers. 596 type Controller struct { 597 // JobURLTemplateString compiles into JobURLTemplate at load time. 598 JobURLTemplateString string `json:"job_url_template,omitempty"` 599 // JobURLTemplate is compiled at load time from JobURLTemplateString. It 600 // will be passed a prowapi.ProwJob and is used to set the URL for the 601 // "Details" link on GitHub as well as the link from deck. 602 JobURLTemplate *template.Template `json:"-"` 603 604 // ReportTemplateString compiles into ReportTemplate at load time. 605 ReportTemplateString string `json:"report_template,omitempty"` 606 607 // ReportTemplateStrings is a mapping of template comments. 608 // Use `org/repo`, `org` or `*` as a key. 609 ReportTemplateStrings map[string]string `json:"report_templates,omitempty"` 610 611 // ReportTemplates is a mapping of templates that is compliled at load 612 // time from ReportTemplateStrings. 613 ReportTemplates map[string]*template.Template `json:"-"` 614 615 // MaxConcurrency is the maximum number of tests running concurrently that 616 // will be allowed by the controller. 0 implies no limit. 617 MaxConcurrency int `json:"max_concurrency,omitempty"` 618 619 // MaxGoroutines is the maximum number of goroutines spawned inside the 620 // controller to handle tests. Defaults to 20. Needs to be a positive 621 // number. 622 MaxGoroutines int `json:"max_goroutines,omitempty"` 623 } 624 625 // ReportTemplateForRepo returns the template that belong to a specific repository. 626 // If the repository doesn't exist in the report_templates configuration it will 627 // inherit the values from its organization, otherwise the default values will be used. 628 func (c *Controller) ReportTemplateForRepo(refs *prowapi.Refs) *template.Template { 629 def := c.ReportTemplates["*"] 630 631 if refs == nil { 632 return def 633 } 634 635 orgRepo := fmt.Sprintf("%s/%s", refs.Org, refs.Repo) 636 if tmplByRepo, ok := c.ReportTemplates[orgRepo]; ok { 637 return tmplByRepo 638 } 639 if tmplByOrg, ok := c.ReportTemplates[refs.Org]; ok { 640 return tmplByOrg 641 } 642 return def 643 } 644 645 // Plank is config for the plank controller. 646 type Plank struct { 647 Controller `json:",inline"` 648 // PodPendingTimeout defines how long the controller will wait to perform a garbage 649 // collection on pending pods. Defaults to 10 minutes. 650 PodPendingTimeout *metav1.Duration `json:"pod_pending_timeout,omitempty"` 651 // PodRunningTimeout defines how long the controller will wait to abort a prowjob pod 652 // stuck in running state. Defaults to two days. 653 PodRunningTimeout *metav1.Duration `json:"pod_running_timeout,omitempty"` 654 // PodUnscheduledTimeout defines how long the controller will wait to abort a prowjob 655 // stuck in an unscheduled state. Defaults to 5 minutes. 656 PodUnscheduledTimeout *metav1.Duration `json:"pod_unscheduled_timeout,omitempty"` 657 658 // DefaultDecorationConfigs holds the default decoration config for specific values. 659 // 660 // Each entry in the slice specifies Repo and Cluster regexp filter fields to 661 // match against jobs and a corresponding DecorationConfig. All entries that 662 // match a job are used. Later matching entries override the fields of earlier 663 // matching entries. 664 // 665 // In FinalizeDefaultDecorationConfigs(), this field is populated either directly from 666 // DefaultDecorationConfigEntries, or from DefaultDecorationConfigsMap after 667 // it is converted to a slice. These fields are mutually exclusive, and 668 // defining both is an error. 669 DefaultDecorationConfigs []*DefaultDecorationConfigEntry `json:"-"` 670 // DefaultDecorationConfigsMap is a mapping from 'org', 'org/repo', or the 671 // literal string '*', to the default decoration config to use for that key. 672 // The '*' key matches all jobs. (Periodics use extra_refs[0] for matching 673 // if present.) 674 // 675 // This field is mutually exclusive with the DefaultDecorationConfigEntries field. 676 DefaultDecorationConfigsMap map[string]*prowapi.DecorationConfig `json:"default_decoration_configs,omitempty"` 677 // DefaultDecorationConfigEntries is used to populate DefaultDecorationConfigs. 678 // 679 // Each entry in the slice specifies Repo and Cluster regexp filter fields to 680 // match against jobs and a corresponding DecorationConfig. All entries that 681 // match a job are used. Later matching entries override the fields of earlier 682 // matching entries. 683 // 684 // This field is smarter than the DefaultDecorationConfigsMap, because each 685 // entry includes additional Cluster regexp information that the old format 686 // does not consider. 687 // 688 // This field is mutually exclusive with the DefaultDecorationConfigsMap field. 689 DefaultDecorationConfigEntries []*DefaultDecorationConfigEntry `json:"default_decoration_config_entries,omitempty"` 690 691 // JobURLPrefixConfig is the host and path prefix under which job details 692 // will be viewable. Use `org/repo`, `org` or `*`as key and an url as value. 693 JobURLPrefixConfig map[string]string `json:"job_url_prefix_config,omitempty"` 694 695 // JobURLPrefixDisableAppendStorageProvider disables that the storageProvider is 696 // automatically appended to the JobURLPrefix. 697 JobURLPrefixDisableAppendStorageProvider bool `json:"jobURLPrefixDisableAppendStorageProvider,omitempty"` 698 699 // BuildClusterStatusFile is an optional field used to specify the blob storage location 700 // to publish cluster status information. 701 // e.g. gs://my-bucket/cluster-status.json 702 BuildClusterStatusFile string `json:"build_cluster_status_file,omitempty"` 703 704 // JobQueueCapacities is an optional field used to define job queue max concurrency. 705 // Each job can be assigned to a specific queue which has its own max concurrency, 706 // independent from the job's name. Setting the concurrency to 0 will block any job 707 // from being triggered. Setting the concurrency to a negative value will remove the 708 // limit. An example use case would be easier scheduling of jobs using boskos resources. 709 // This mechanism is separate from ProwJob's MaxConcurrency setting. 710 JobQueueCapacities map[string]int `json:"job_queue_capacities,omitempty"` 711 } 712 713 type ProwJobDefaultEntry struct { 714 // Matching/filtering fields. All filters must match for an entry to match. 715 716 // OrgRepo matches against the "org" or "org/repo" that the presubmit or postsubmit 717 // is associated with. If the job is a periodic, extra_refs[0] is used. If the 718 // job is a periodic without extra_refs, the empty string will be used. 719 // If this field is omitted all jobs will match. 720 OrgRepo string `json:"repo,omitempty"` 721 // Cluster matches against the cluster alias of the build cluster that the 722 // ProwJob is configured to run on. Recall that ProwJobs default to running on 723 // the "default" build cluster if they omit the "cluster" field in config. 724 Cluster string `json:"cluster,omitempty"` 725 726 // Config is the ProwJobDefault to apply if the filter fields all match the 727 // ProwJob. Note that when multiple entries match a ProwJob they are all used 728 // by sequentially merging with later entries overriding fields from earlier 729 // entries. 730 Config *prowapi.ProwJobDefault `json:"config,omitempty"` 731 } 732 733 // DefaultDecorationConfigEntry contains a DecorationConfig and a set of 734 // regexps. If the regexps here match a ProwJob, then that ProwJob uses defaults 735 // by looking the DecorationConfig defined here in this entry. 736 // 737 // If multiple entries match a single ProwJob, the multiple entries' 738 // DecorationConfigs are merged, with later entries overriding values from 739 // earlier entries. Then finally that merged DecorationConfig is used by the 740 // matching ProwJob. 741 type DefaultDecorationConfigEntry struct { 742 // Matching/filtering fields. All filters must match for an entry to match. 743 744 // OrgRepo matches against the "org" or "org/repo" that the presubmit or postsubmit 745 // is associated with. If the job is a periodic, extra_refs[0] is used. If the 746 // job is a periodic without extra_refs, the empty string will be used. 747 // If this field is omitted all jobs will match. 748 OrgRepo string `json:"repo,omitempty"` 749 // Cluster matches against the cluster alias of the build cluster that the 750 // ProwJob is configured to run on. Recall that ProwJobs default to running on 751 // the "default" build cluster if they omit the "cluster" field in config. 752 Cluster string `json:"cluster,omitempty"` 753 754 // Config is the DecorationConfig to apply if the filter fields all match the 755 // ProwJob. Note that when multiple entries match a ProwJob they are all used 756 // by sequentially merging with later entries overriding fields from earlier 757 // entries. 758 Config *prowapi.DecorationConfig `json:"config,omitempty"` 759 } 760 761 // TODO(mpherman): Make a Matcher struct embedded in both ProwJobDefaultEntry and 762 // DefaultDecorationConfigEntry and DefaultRerunAuthConfigEntry. 763 func matches(givenOrgRepo, givenCluster, orgRepo, cluster string) bool { 764 if givenCluster != "" && givenCluster != "*" && givenCluster != cluster { 765 return false 766 } 767 if givenOrgRepo == "" || givenOrgRepo == "*" || givenOrgRepo == orgRepo { 768 return true 769 } 770 // For Gerrit use, the repo from the adapter probably contains the 771 // extra "-review" hostname; trim that away. 772 orgRepo = gerritsource.EnsureCodeURL(orgRepo) 773 // Ensure a bare given repo name matches the http-prefixed repo 774 // that arises in pre/postsubmit jobs. 775 orgRepo = strings.TrimPrefix(orgRepo, "https://") 776 orgRepo = strings.TrimPrefix(orgRepo, "http://") 777 return givenOrgRepo == orgRepo || givenOrgRepo == strings.Split(orgRepo, "/")[0] 778 } 779 780 // matches returns true iff all the filters for the entry match a job. 781 func (d *ProwJobDefaultEntry) matches(repo, cluster string) bool { 782 return matches(d.OrgRepo, d.Cluster, repo, cluster) 783 } 784 785 // matches returns true iff all the filters for the entry match a job. 786 func (d *DefaultDecorationConfigEntry) matches(repo, cluster string) bool { 787 return matches(d.OrgRepo, d.Cluster, repo, cluster) 788 } 789 790 // matches returns true iff all the filters for the entry match a job. 791 func (d *DefaultRerunAuthConfigEntry) matches(repo, cluster string) bool { 792 return matches(d.OrgRepo, d.Cluster, repo, cluster) 793 } 794 795 // mergeProwJobDefault finds all matching ProwJobDefaultEntry 796 // for a job and merges them sequentially before merging into the job's own 797 // PrwoJobDefault. Configs merged later override values from earlier configs. 798 func (pc *ProwConfig) mergeProwJobDefault(repo, cluster string, jobDefault *prowapi.ProwJobDefault) *prowapi.ProwJobDefault { 799 var merged *prowapi.ProwJobDefault 800 for _, entry := range pc.ProwJobDefaultEntries { 801 if entry.matches(repo, cluster) { 802 merged = entry.Config.ApplyDefault(merged) 803 } 804 } 805 merged = jobDefault.ApplyDefault(merged) 806 if merged == nil { 807 merged = &prowapi.ProwJobDefault{} 808 } 809 if merged.TenantID == "" { 810 merged.TenantID = DefaultTenantID 811 } 812 return merged 813 } 814 815 // mergeDefaultDecorationConfig finds all matching DefaultDecorationConfigEntry 816 // for a job and merges them sequentially before merging into the job's own 817 // DecorationConfig. Configs merged later override values from earlier configs. 818 func (p *Plank) mergeDefaultDecorationConfig(repo, cluster string, jobDC *prowapi.DecorationConfig) *prowapi.DecorationConfig { 819 var merged *prowapi.DecorationConfig 820 for _, entry := range p.DefaultDecorationConfigs { 821 if entry.matches(repo, cluster) { 822 merged = entry.Config.ApplyDefault(merged) 823 } 824 } 825 merged = jobDC.ApplyDefault(merged) 826 if merged == nil { 827 merged = &prowapi.DecorationConfig{} 828 } 829 return merged 830 } 831 832 // GetProwJobDefault finds the resolved prowJobDefault config for a given repo and 833 // cluster. 834 func (c *Config) GetProwJobDefault(repo, cluster string) *prowapi.ProwJobDefault { 835 return c.mergeProwJobDefault(repo, cluster, nil) 836 } 837 838 // GuessDefaultDecorationConfig attempts to find the resolved default decoration 839 // config for a given repo and cluster. It is primarily used for best effort 840 // guesses about GCS configuration for undecorated jobs. 841 func (p *Plank) GuessDefaultDecorationConfig(repo, cluster string) *prowapi.DecorationConfig { 842 return p.mergeDefaultDecorationConfig(repo, cluster, nil) 843 } 844 845 // GuessDefaultDecorationConfig attempts to find the resolved default decoration 846 // config for a given repo, cluster and job DecorationConfig. It is primarily used for best effort 847 // guesses about GCS configuration for undecorated jobs. 848 func (p *Plank) GuessDefaultDecorationConfigWithJobDC(repo, cluster string, jobDC *prowapi.DecorationConfig) *prowapi.DecorationConfig { 849 return p.mergeDefaultDecorationConfig(repo, cluster, jobDC) 850 } 851 852 // defaultDecorationMapToSlice converts the old format 853 // (map[string]*prowapi.DecorationConfig) to the new format 854 // ([]*DefaultDecorationConfigEntry). 855 func defaultDecorationMapToSlice(m map[string]*prowapi.DecorationConfig) []*DefaultDecorationConfigEntry { 856 var entries []*DefaultDecorationConfigEntry 857 add := func(repo string, dc *prowapi.DecorationConfig) { 858 entries = append(entries, &DefaultDecorationConfigEntry{ 859 OrgRepo: repo, 860 Cluster: "", 861 Config: dc, 862 }) 863 } 864 // Ensure "*" comes first... 865 if dc, ok := m["*"]; ok { 866 add("*", dc) 867 } 868 // then orgs... 869 for key, dc := range m { 870 if key == "*" || strings.Contains(key, "/") { 871 continue 872 } 873 add(key, dc) 874 } 875 // then repos. 876 for key, dc := range m { 877 if key == "*" || !strings.Contains(key, "/") { 878 continue 879 } 880 add(key, dc) 881 } 882 return entries 883 } 884 885 // DefaultDecorationMapToSliceTesting is a convenience function that is exposed 886 // to allow unit tests to convert the old map format to the new slice format. 887 // It should only be used in testing. 888 func DefaultDecorationMapToSliceTesting(m map[string]*prowapi.DecorationConfig) []*DefaultDecorationConfigEntry { 889 return defaultDecorationMapToSlice(m) 890 } 891 892 // FinalizeDefaultDecorationConfigs prepares the entries of 893 // Plank.DefaultDecorationConfigs for use in finalizing the job config. 894 // It sets p.DefaultDecorationConfigs into either the old map 895 // format or the new slice format: 896 // Old format: map[string]*prowapi.DecorationConfig where the key is org, 897 // 898 // org/repo, or "*". 899 // 900 // New format: []*DefaultDecorationConfigEntry 901 // If the old format is parsed it is converted to the new format, then all 902 // filter regexp are compiled. 903 func (p *Plank) FinalizeDefaultDecorationConfigs() error { 904 mapped, sliced := len(p.DefaultDecorationConfigsMap) > 0, len(p.DefaultDecorationConfigEntries) > 0 905 if mapped && sliced { 906 return fmt.Errorf("plank.default_decoration_configs and plank.default_decoration_config_entries are mutually exclusive, please use one or the other") 907 } 908 if mapped { 909 p.DefaultDecorationConfigs = defaultDecorationMapToSlice(p.DefaultDecorationConfigsMap) 910 } else { 911 p.DefaultDecorationConfigs = p.DefaultDecorationConfigEntries 912 } 913 return nil 914 } 915 916 // GetJobURLPrefix gets the job url prefix from the config 917 // for the given refs. 918 func (p Plank) GetJobURLPrefix(pj *prowapi.ProwJob) string { 919 if pj.Spec.DecorationConfig != nil && pj.Spec.DecorationConfig.GCSConfiguration != nil && pj.Spec.DecorationConfig.GCSConfiguration.JobURLPrefix != "" { 920 return pj.Spec.DecorationConfig.GCSConfiguration.JobURLPrefix 921 } 922 923 var org, repo string 924 if pj.Spec.Refs != nil { 925 org = pj.Spec.Refs.Org 926 repo = pj.Spec.Refs.Repo 927 } else if len(pj.Spec.ExtraRefs) > 0 { 928 org = pj.Spec.ExtraRefs[0].Org 929 repo = pj.Spec.ExtraRefs[0].Repo 930 } 931 932 if org == "" { 933 return p.JobURLPrefixConfig["*"] 934 } 935 if p.JobURLPrefixConfig[fmt.Sprintf("%s/%s", org, repo)] != "" { 936 return p.JobURLPrefixConfig[fmt.Sprintf("%s/%s", org, repo)] 937 } 938 if p.JobURLPrefixConfig[org] != "" { 939 return p.JobURLPrefixConfig[org] 940 } 941 return p.JobURLPrefixConfig["*"] 942 } 943 944 // Gerrit is config for the gerrit controller. 945 type Gerrit struct { 946 // TickInterval is how often we do a sync with bound gerrit instance. 947 TickInterval *metav1.Duration `json:"tick_interval,omitempty"` 948 // RateLimit defines how many changes to query per gerrit API call 949 // default is 5. 950 RateLimit int `json:"ratelimit,omitempty"` 951 // DeckURL is the root URL of Deck. This is used to construct links to 952 // job runs for a given CL. 953 DeckURL string `json:"deck_url,omitempty"` 954 OrgReposConfig *GerritOrgRepoConfigs `json:"org_repos_config,omitempty"` 955 // AllowedPresubmitTriggerRe is used to match presubmit test related commands in comments 956 AllowedPresubmitTriggerRe *CopyableRegexp `json:"-"` 957 AllowedPresubmitTriggerReRawString string `json:"allowed_presubmit_trigger_re,omitempty"` 958 } 959 960 func (g *Gerrit) DefaultAndValidate() error { 961 if g.TickInterval == nil { 962 g.TickInterval = &metav1.Duration{Duration: time.Minute} 963 } 964 965 if g.RateLimit == 0 { 966 g.RateLimit = 5 967 } 968 969 re, err := regexp.Compile(g.AllowedPresubmitTriggerReRawString) 970 if err != nil { 971 return fmt.Errorf("failed to compile regex for allowed presubmit triggers: %s", err.Error()) 972 } 973 g.AllowedPresubmitTriggerRe = &CopyableRegexp{re} 974 return nil 975 } 976 977 func (g *Gerrit) IsAllowedPresubmitTrigger(message string) bool { 978 return g.AllowedPresubmitTriggerRe.MatchString(message) 979 } 980 981 // GerritOrgRepoConfigs is config for repos. 982 type GerritOrgRepoConfigs []GerritOrgRepoConfig 983 984 // GerritOrgRepoConfig is config for repos. 985 type GerritOrgRepoConfig struct { 986 // Org is the name of the Gerrit instance/host. It's required to keep the 987 // https:// or http:// prefix. 988 Org string `json:"org,omitempty"` 989 // Repos are a slice of repos under the `Org`. 990 Repos []string `json:"repos,omitempty"` 991 // OptOutHelp is the flag for determining whether the repos defined under 992 // here opting out of help or not. If this is true, Prow will not command 993 // the help message with comments like `/test ?`, `/retest ?`, `/test 994 // job-not-exist`, `/test job-only-available-from-another-prow`. 995 OptOutHelp bool `json:"opt_out_help,omitempty"` 996 // Filters are used for limiting the scope of querying the Gerrit server. 997 // Currently supports branches and excluded branches. 998 Filters *GerritQueryFilter `json:"filters,omitempty"` 999 } 1000 1001 type GerritQueryFilter struct { 1002 Branches []string `json:"branches,omitempty"` 1003 ExcludedBranches []string `json:"excluded_branches,omitempty"` 1004 // OptInByDefault indicates that all of the PRs are considered by Tide from 1005 // these repos, unless `Prow-Auto-Submit` label is voted -1. 1006 OptInByDefault bool `json:"opt_in_by_default,omitempty"` 1007 } 1008 1009 func (goc *GerritOrgRepoConfigs) AllRepos() map[string]map[string]*GerritQueryFilter { 1010 var res map[string]map[string]*GerritQueryFilter 1011 for _, orgConfig := range *goc { 1012 if res == nil { 1013 res = make(map[string]map[string]*GerritQueryFilter) 1014 } 1015 for _, repo := range orgConfig.Repos { 1016 if _, ok := res[orgConfig.Org]; !ok { 1017 res[orgConfig.Org] = make(map[string]*GerritQueryFilter) 1018 } 1019 res[orgConfig.Org][repo] = orgConfig.Filters 1020 } 1021 } 1022 return res 1023 } 1024 1025 func (goc *GerritOrgRepoConfigs) OptOutHelpRepos() map[string]sets.Set[string] { 1026 var res map[string]sets.Set[string] 1027 for _, orgConfig := range *goc { 1028 if !orgConfig.OptOutHelp { 1029 continue 1030 } 1031 if res == nil { 1032 res = make(map[string]sets.Set[string]) 1033 } 1034 res[orgConfig.Org] = res[orgConfig.Org].Union(sets.New[string](orgConfig.Repos...)) 1035 } 1036 return res 1037 } 1038 1039 // Horologium is config for the Horologium. 1040 type Horologium struct { 1041 // TickInterval is the interval in which we check if new jobs need to be 1042 // created. Defaults to one minute. 1043 TickInterval *metav1.Duration `json:"tick_interval,omitempty"` 1044 } 1045 1046 // JenkinsOperator is config for the jenkins-operator controller. 1047 type JenkinsOperator struct { 1048 Controller `json:",inline"` 1049 // LabelSelectorString compiles into LabelSelector at load time. 1050 // If set, this option needs to match --label-selector used by 1051 // the desired jenkins-operator. This option is considered 1052 // invalid when provided with a single jenkins-operator config. 1053 // 1054 // For label selector syntax, see below: 1055 // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors 1056 LabelSelectorString string `json:"label_selector,omitempty"` 1057 // LabelSelector is used so different jenkins-operator replicas 1058 // can use their own configuration. 1059 LabelSelector labels.Selector `json:"-"` 1060 } 1061 1062 // GitHubReporter holds the config for report behavior in github. 1063 type GitHubReporter struct { 1064 // JobTypesToReport is used to determine which type of prowjob 1065 // should be reported to github. 1066 // 1067 // defaults to both presubmit and postsubmit jobs. 1068 JobTypesToReport []prowapi.ProwJobType `json:"job_types_to_report,omitempty"` 1069 // NoCommentRepos is a list of orgs and org/repos for which failure report 1070 // comments should not be maintained. Status contexts will still be written. 1071 NoCommentRepos []string `json:"no_comment_repos,omitempty"` 1072 // SummaryCommentRepos is a list of orgs and org/repos for which failure report 1073 // comments is only sent when all jobs from current SHA are finished. Status 1074 // contexts will still be written. 1075 SummaryCommentRepos []string `json:"summary_comment_repos,omitempty"` 1076 } 1077 1078 // Sinker is config for the sinker controller. 1079 type Sinker struct { 1080 // ResyncPeriod is how often the controller will perform a garbage 1081 // collection. Defaults to one hour. 1082 ResyncPeriod *metav1.Duration `json:"resync_period,omitempty"` 1083 // MaxProwJobAge is how old a ProwJob can be before it is garbage-collected. 1084 // Defaults to one week. 1085 MaxProwJobAge *metav1.Duration `json:"max_prowjob_age,omitempty"` 1086 // MaxPodAge is how old a Pod can be before it is garbage-collected. 1087 // Defaults to one day. 1088 MaxPodAge *metav1.Duration `json:"max_pod_age,omitempty"` 1089 // TerminatedPodTTL is how long a Pod can live after termination before it is 1090 // garbage collected. 1091 // Defaults to matching MaxPodAge. 1092 TerminatedPodTTL *metav1.Duration `json:"terminated_pod_ttl,omitempty"` 1093 // ExcludeClusters are build clusters that don't want to be managed by sinker. 1094 ExcludeClusters []string `json:"exclude_clusters,omitempty"` 1095 } 1096 1097 // LensConfig names a specific lens, and optionally provides some configuration for it. 1098 type LensConfig struct { 1099 // Name is the name of the lens. 1100 Name string `json:"name"` 1101 // Config is some lens-specific configuration. Interpreting it is the responsibility of the 1102 // lens in question. 1103 Config json.RawMessage `json:"config,omitempty"` 1104 } 1105 1106 // LensFileConfig is a single entry under Lenses, describing how to configure a lens 1107 // to read a given set of files. 1108 type LensFileConfig struct { 1109 // RequiredFiles is a list of regexes of file paths that must all be present for a lens to appear. 1110 // The list entries are ANDed together, i.e. all of them are required. You can achieve an OR 1111 // by using a pipe in a regex. 1112 RequiredFiles []string `json:"required_files"` 1113 // OptionalFiles is a list of regexes of file paths that will be provided to the lens if they are 1114 // present, but will not preclude the lens being rendered by their absence. 1115 // The list entries are ORed together, so if only one of them is present it will be provided to 1116 // the lens even if the others are not. 1117 OptionalFiles []string `json:"optional_files,omitempty"` 1118 // Lens is the lens to use, alongside any lens-specific configuration. 1119 Lens LensConfig `json:"lens"` 1120 // RemoteConfig specifies how to access remote lenses. 1121 RemoteConfig *LensRemoteConfig `json:"remote_config,omitempty"` 1122 } 1123 1124 // LensRemoteConfig is the configuration for a remote lens. 1125 type LensRemoteConfig struct { 1126 // The endpoint for the lense. 1127 Endpoint string `json:"endpoint"` 1128 // The parsed endpoint. 1129 ParsedEndpoint *url.URL `json:"-"` 1130 // The endpoint for static resources. 1131 StaticRoot string `json:"static_root"` 1132 // The human-readable title for the lens. 1133 Title string `json:"title"` 1134 // Priority for lens ordering, lowest priority first. 1135 Priority *uint `json:"priority"` 1136 // HideTitle defines if we will keep showing the title after lens loads. 1137 HideTitle *bool `json:"hide_title"` 1138 } 1139 1140 // Spyglass holds config for Spyglass. 1141 type Spyglass struct { 1142 // Lenses is a list of lens configurations. 1143 Lenses []LensFileConfig `json:"lenses,omitempty"` 1144 // Viewers is deprecated, prefer Lenses instead. 1145 // Viewers was a map of Regexp strings to viewer names that defines which sets 1146 // of artifacts need to be consumed by which viewers. It is copied in to Lenses at load time. 1147 Viewers map[string][]string `json:"viewers,omitempty"` 1148 // RegexCache is a map of lens regexp strings to their compiled equivalents. 1149 RegexCache map[string]*regexp.Regexp `json:"-"` 1150 // SizeLimit is the max size artifact in bytes that Spyglass will attempt to 1151 // read in entirety. This will only affect viewers attempting to use 1152 // artifact.ReadAll(). To exclude outlier artifacts, set this limit to 1153 // expected file size + variance. To include all artifacts with high 1154 // probability, use 2*maximum observed artifact size. 1155 SizeLimit int64 `json:"size_limit,omitempty"` 1156 // GCSBrowserPrefix is used to generate a link to a human-usable GCS browser. 1157 // If left empty, the link will be not be shown. Otherwise, a GCS path (with no 1158 // prefix or scheme) will be appended to GCSBrowserPrefix and shown to the user. 1159 GCSBrowserPrefix string `json:"gcs_browser_prefix,omitempty"` 1160 // GCSBrowserPrefixesByRepo are used to generate a link to a human-usable GCS browser. 1161 // They are mapped by org, org/repo or '*' which is the default value. 1162 // These are the most specific and will override GCSBrowserPrefixesByBucket if both are resolved. 1163 GCSBrowserPrefixesByRepo GCSBrowserPrefixes `json:"gcs_browser_prefixes,omitempty"` 1164 // GCSBrowserPrefixesByBucket are used to generate a link to a human-usable GCS browser. 1165 // They are mapped by bucket name or '*' which is the default value. 1166 // They will only be utilized if there is not a GCSBrowserPrefixesByRepo for the org/repo. 1167 GCSBrowserPrefixesByBucket GCSBrowserPrefixes `json:"gcs_browser_prefixes_by_bucket,omitempty"` 1168 // If set, Announcement is used as a Go HTML template string to be displayed at the top of 1169 // each spyglass page. Using HTML in the template is acceptable. 1170 // Currently the only variable available is .ArtifactPath, which contains the GCS path for the job artifacts. 1171 Announcement string `json:"announcement,omitempty"` 1172 // TestGridConfig is the path to the TestGrid config proto. If the path begins with 1173 // "gs://" it is assumed to be a GCS reference, otherwise it is read from the local filesystem. 1174 // If left blank, TestGrid links will not appear. 1175 TestGridConfig string `json:"testgrid_config,omitempty"` 1176 // TestGridRoot is the root URL to the TestGrid frontend, e.g. "https://testgrid.k8s.io/". 1177 // If left blank, TestGrid links will not appear. 1178 TestGridRoot string `json:"testgrid_root,omitempty"` 1179 // HidePRHistLink allows prow hiding PR History link from deck, this is handy especially for 1180 // prow instances that only serves gerrit. 1181 // This might become obsolete once https://github.com/kubernetes/test-infra/issues/24130 is fixed. 1182 HidePRHistLink bool `json:"hide_pr_history_link,omitempty"` 1183 // PRHistLinkTemplate is the template for constructing href of `PR History` button, 1184 // by default it's "/pr-history?org={{.Org}}&repo={{.Repo}}&pr={{.Number}}" 1185 PRHistLinkTemplate string `json:"pr_history_link_template,omitempty"` 1186 // BucketAliases permits a naive URL rewriting functionality. 1187 // Keys represent aliases and their values are the authoritative 1188 // bucket names they will be substituted with 1189 BucketAliases map[string]string `json:"bucket_aliases,omitempty"` 1190 } 1191 1192 type GCSBrowserPrefixes map[string]string 1193 1194 // GetGCSBrowserPrefix determines the GCS Browser prefix by checking for a config in order of: 1195 // 1. If org (and optionally repo) is provided resolve the GCSBrowserPrefixesByRepo config. 1196 // 2. If bucket is provided resolve the GCSBrowserPrefixesByBucket config. 1197 // 3. If not found in either use the default from GCSBrowserPrefixesByRepo or GCSBrowserPrefixesByBucket if not found. 1198 func (s Spyglass) GetGCSBrowserPrefix(org, repo, bucket string) string { 1199 if org != "" { 1200 if prefix, ok := s.GCSBrowserPrefixesByRepo[fmt.Sprintf("%s/%s", org, repo)]; ok { 1201 return prefix 1202 } 1203 if prefix, ok := s.GCSBrowserPrefixesByRepo[org]; ok { 1204 return prefix 1205 } 1206 } 1207 if bucket != "" { 1208 if prefix, ok := s.GCSBrowserPrefixesByBucket[bucket]; ok { 1209 return prefix 1210 } 1211 } 1212 1213 // If we don't find anything specific use the default by repo, if that isn't present use the default by bucket. 1214 if prefix, ok := s.GCSBrowserPrefixesByRepo["*"]; ok { 1215 return prefix 1216 } 1217 1218 return s.GCSBrowserPrefixesByBucket["*"] 1219 } 1220 1221 // Deck holds config for deck. 1222 type Deck struct { 1223 // Spyglass specifies which viewers will be used for which artifacts when viewing a job in Deck. 1224 Spyglass Spyglass `json:"spyglass,omitempty"` 1225 // TideUpdatePeriod specifies how often Deck will fetch status from Tide. Defaults to 10s. 1226 TideUpdatePeriod *metav1.Duration `json:"tide_update_period,omitempty"` 1227 // HiddenRepos is a list of orgs and/or repos that should not be displayed by Deck. 1228 HiddenRepos []string `json:"hidden_repos,omitempty"` 1229 // ExternalAgentLogs ensures external agents can expose 1230 // their logs in prow. 1231 ExternalAgentLogs []ExternalAgentLog `json:"external_agent_logs,omitempty"` 1232 // Branding of the frontend 1233 Branding *Branding `json:"branding,omitempty"` 1234 // GoogleAnalytics, if specified, include a Google Analytics tracking code on each page. 1235 GoogleAnalytics string `json:"google_analytics,omitempty"` 1236 // RerunAuthConfigs is not deprecated but DefaultRerunAuthConfigs should be used in favor. 1237 // It remains a part of Deck for the purposes of backwards compatibility. 1238 // RerunAuthConfigs is a map of configs that specify who is able to trigger job reruns. The field 1239 // accepts a key of: `org/repo`, `org` or `*` (wildcard) to define what GitHub org (or repo) a particular 1240 // config applies to and a value of: `RerunAuthConfig` struct to define the users/groups authorized to rerun jobs. 1241 RerunAuthConfigs RerunAuthConfigs `json:"rerun_auth_configs,omitempty"` 1242 // DefaultRerunAuthConfigs is a list of DefaultRerunAuthConfigEntry structures that specify who can 1243 // trigger job reruns. Reruns are based on whether the entry's org/repo or cluster matches with the 1244 // expected fields in the given configuration. 1245 // 1246 // Each entry in the slice specifies Repo and Cluster regexp filter fields to 1247 // match against jobs and a corresponding RerunAuthConfig. The entry matching the job with the 1248 // most specification is for authentication purposes. 1249 // 1250 // This field is smarter than the RerunAuthConfigs, because each 1251 // entry includes additional Cluster regexp information that the old format 1252 // does not consider. 1253 // 1254 // This field is mutually exclusive with the RerunAuthConfigs field. 1255 DefaultRerunAuthConfigs []*DefaultRerunAuthConfigEntry `json:"default_rerun_auth_configs,omitempty"` 1256 // SkipStoragePathValidation skips validation that restricts artifact requests to specific buckets. 1257 // By default, buckets listed in the GCSConfiguration are automatically allowed. 1258 // Additional locations can be allowed via `AdditionalAllowedBuckets` fields. 1259 // When unspecified (nil), it defaults to false 1260 SkipStoragePathValidation *bool `json:"skip_storage_path_validation,omitempty"` 1261 // AdditionalAllowedBuckets is a list of storage buckets to allow in artifact requests 1262 // (in addition to those listed in the GCSConfiguration). 1263 // Setting this field requires "SkipStoragePathValidation" also be set to `false`. 1264 AdditionalAllowedBuckets []string `json:"additional_allowed_buckets,omitempty"` 1265 // AllKnownStorageBuckets contains all storage buckets configured in all of the 1266 // job configs. 1267 AllKnownStorageBuckets sets.Set[string] `json:"-"` 1268 } 1269 1270 // Validate performs validation and sanitization on the Deck object. 1271 func (d *Deck) Validate() error { 1272 if len(d.AdditionalAllowedBuckets) > 0 && !d.shouldValidateStorageBuckets() { 1273 return fmt.Errorf("deck.skip_storage_path_validation is enabled despite deck.additional_allowed_buckets being configured: %v", d.AdditionalAllowedBuckets) 1274 } 1275 1276 for k, config := range d.DefaultRerunAuthConfigs { 1277 if err := config.Config.Validate(); err != nil { 1278 return fmt.Errorf("default_rerun_auth_configs[%d]: %w", k, err) 1279 } 1280 } 1281 1282 return nil 1283 } 1284 1285 type notAllowedBucketError struct { 1286 err error 1287 } 1288 1289 func (ne notAllowedBucketError) Error() string { 1290 return fmt.Sprintf("bucket not in allowed list; you may allow it by including it in `deck.additional_allowed_buckets`: %s", ne.err.Error()) 1291 } 1292 1293 func (notAllowedBucketError) Is(err error) bool { 1294 _, ok := err.(notAllowedBucketError) 1295 return ok 1296 } 1297 1298 // NotAllowedBucketError wraps an error and return a notAllowedBucketError error. 1299 func NotAllowedBucketError(err error) error { 1300 return ¬AllowedBucketError{err: err} 1301 } 1302 1303 func IsNotAllowedBucketError(err error) bool { 1304 return errors.Is(err, notAllowedBucketError{}) 1305 } 1306 1307 // ValidateStorageBucket validates a storage bucket (unless the `Deck.SkipStoragePathValidation` field is true). 1308 // The bucket name must be included in any of the following: 1309 // 1. Any job's `.DecorationConfig.GCSConfiguration.Bucket` (except jobs defined externally via InRepoConfig). 1310 // 2. `Plank.DefaultDecorationConfigs.GCSConfiguration.Bucket`. 1311 // 3. `Deck.AdditionalAllowedBuckets`. 1312 func (c *Config) ValidateStorageBucket(bucketName string) error { 1313 if !c.Deck.shouldValidateStorageBuckets() { 1314 return nil 1315 } 1316 if alias, exists := c.Deck.Spyglass.BucketAliases[bucketName]; exists { 1317 bucketName = alias 1318 } 1319 if !c.Deck.AllKnownStorageBuckets.Has(bucketName) { 1320 return NotAllowedBucketError(fmt.Errorf("bucket %q not in allowed list (%v)", bucketName, sets.List(c.Deck.AllKnownStorageBuckets))) 1321 } 1322 return nil 1323 } 1324 1325 // shouldValidateStorageBuckets returns whether or not the Deck's storage path should be validated. 1326 // Validation could be either enabled by default or explicitly turned off. 1327 func (d *Deck) shouldValidateStorageBuckets() bool { 1328 if d.SkipStoragePathValidation == nil { 1329 return false 1330 } 1331 return !*d.SkipStoragePathValidation 1332 } 1333 1334 func calculateStorageBuckets(c *Config) sets.Set[string] { 1335 knownBuckets := sets.New[string](c.Deck.AdditionalAllowedBuckets...) 1336 for _, dc := range c.Plank.DefaultDecorationConfigs { 1337 if dc.Config != nil && dc.Config.GCSConfiguration != nil && dc.Config.GCSConfiguration.Bucket != "" { 1338 knownBuckets.Insert(stripProviderPrefixFromBucket(dc.Config.GCSConfiguration.Bucket)) 1339 } 1340 } 1341 for _, j := range c.Periodics { 1342 if j.DecorationConfig != nil && j.DecorationConfig.GCSConfiguration != nil { 1343 knownBuckets.Insert(stripProviderPrefixFromBucket(j.DecorationConfig.GCSConfiguration.Bucket)) 1344 } 1345 } 1346 for _, jobs := range c.PresubmitsStatic { 1347 for _, j := range jobs { 1348 if j.DecorationConfig != nil && j.DecorationConfig.GCSConfiguration != nil { 1349 knownBuckets.Insert(stripProviderPrefixFromBucket(j.DecorationConfig.GCSConfiguration.Bucket)) 1350 } 1351 } 1352 } 1353 for _, jobs := range c.PostsubmitsStatic { 1354 for _, j := range jobs { 1355 if j.DecorationConfig != nil && j.DecorationConfig.GCSConfiguration != nil { 1356 knownBuckets.Insert(stripProviderPrefixFromBucket(j.DecorationConfig.GCSConfiguration.Bucket)) 1357 } 1358 } 1359 } 1360 return knownBuckets 1361 } 1362 1363 func stripProviderPrefixFromBucket(bucket string) string { 1364 if split := strings.Split(bucket, "://"); len(split) == 2 { 1365 return split[1] 1366 } 1367 return bucket 1368 } 1369 1370 // ExternalAgentLog ensures an external agent like Jenkins can expose 1371 // its logs in prow. 1372 type ExternalAgentLog struct { 1373 // Agent is an external prow agent that supports exposing 1374 // logs via deck. 1375 Agent string `json:"agent,omitempty"` 1376 // SelectorString compiles into Selector at load time. 1377 SelectorString string `json:"selector,omitempty"` 1378 // Selector can be used in prow deployments where the workload has 1379 // been sharded between controllers of the same agent. For more info 1380 // see https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors. 1381 Selector labels.Selector `json:"-"` 1382 // URLTemplateString compiles into URLTemplate at load time. 1383 URLTemplateString string `json:"url_template,omitempty"` 1384 // URLTemplate is compiled at load time from URLTemplateString. It 1385 // will be passed a prowapi.ProwJob and the generated URL should provide 1386 // logs for the ProwJob. 1387 URLTemplate *template.Template `json:"-"` 1388 } 1389 1390 // Branding holds branding configuration for deck. 1391 type Branding struct { 1392 // Logo is the location of the logo that will be loaded in deck. 1393 Logo string `json:"logo,omitempty"` 1394 // Favicon is the location of the favicon that will be loaded in deck. 1395 Favicon string `json:"favicon,omitempty"` 1396 // BackgroundColor is the color of the background. 1397 BackgroundColor string `json:"background_color,omitempty"` 1398 // HeaderColor is the color of the header. 1399 HeaderColor string `json:"header_color,omitempty"` 1400 } 1401 1402 // RerunAuthConfigs represents the configs for rerun authorization in Deck. 1403 // Use `org/repo`, `org` or `*` as key and a `RerunAuthConfig` struct as value. 1404 type RerunAuthConfigs map[string]prowapi.RerunAuthConfig 1405 1406 type DefaultRerunAuthConfigEntry struct { 1407 // Matching/filtering fields. All filters must match for an entry to match. 1408 1409 // OrgRepo matches against the "org" or "org/repo" that the presubmit or postsubmit 1410 // is associated with. If the job is a periodic, extra_refs[0] is used. If the 1411 // job is a periodic without extra_refs, the empty string will be used. 1412 // If this field is omitted all jobs will match. 1413 OrgRepo string `json:"repo,omitempty"` 1414 // Cluster matches against the cluster alias of the build cluster that the 1415 // ProwJob is configured to run on. Recall that ProwJobs default to running on 1416 // the "default" build cluster if they omit the "cluster" field in config. 1417 Cluster string `json:"cluster,omitempty"` 1418 1419 // Config is the RerunAuthConfig to apply if the filter fields all match the 1420 // ProwJob. Note that when multiple entries match a ProwJob the entry with the 1421 // highest specification is used. 1422 Config *prowapi.RerunAuthConfig `json:"rerun_auth_configs,omitempty"` 1423 } 1424 1425 func (d *Deck) GetRerunAuthConfig(jobSpec *prowapi.ProwJobSpec) *prowapi.RerunAuthConfig { 1426 var config *prowapi.RerunAuthConfig 1427 1428 var orgRepo string 1429 if jobSpec.Refs != nil { 1430 orgRepo = jobSpec.Refs.OrgRepoString() 1431 } else if len(jobSpec.ExtraRefs) > 0 { 1432 orgRepo = jobSpec.ExtraRefs[0].OrgRepoString() 1433 } 1434 1435 for _, drac := range d.DefaultRerunAuthConfigs { 1436 if drac.matches(orgRepo, jobSpec.Cluster) { 1437 config = drac.Config 1438 } 1439 } 1440 1441 return config 1442 } 1443 1444 // defaultRerunAuthMapToSlice converts the old format 1445 // (map[string]*prowapi.RerunAuthConfig) to the new format 1446 // ([]*DefaultRerunAuthConfigEntry) or DefaultRerunAuthConfigs. 1447 func defaultRerunAuthMapToSlice(m map[string]prowapi.RerunAuthConfig) ([]*DefaultRerunAuthConfigEntry, error) { 1448 mLength := len(m) 1449 var entries []*DefaultRerunAuthConfigEntry 1450 add := func(repo string, rac prowapi.RerunAuthConfig) { 1451 entries = append(entries, &DefaultRerunAuthConfigEntry{ 1452 OrgRepo: repo, 1453 Cluster: "", 1454 Config: &rac, 1455 }) 1456 } 1457 1458 // Ensure "" comes first... 1459 if rac, ok := m[""]; ok { 1460 add("", rac) 1461 delete(m, "") 1462 } 1463 // Ensure "*" comes first... 1464 if rac, ok := m["*"]; ok { 1465 add("*", rac) 1466 delete(m, "*") 1467 } 1468 // then orgs... 1469 for key, rac := range m { 1470 if strings.Contains(key, "/") { 1471 continue 1472 } 1473 add(key, rac) 1474 delete(m, key) 1475 } 1476 // then repos. 1477 for key, rac := range m { 1478 add(key, rac) 1479 } 1480 1481 if mLength != len(entries) { 1482 return nil, fmt.Errorf("deck.rerun_auth_configs and deck.default_rerun_auth_configs are mutually exclusive, please use one or the other") 1483 } 1484 1485 return entries, nil 1486 } 1487 1488 // FinalizeDefaultRerunAuthConfigs prepares the entries of 1489 // Deck.DefaultRerunAuthConfigs for use in finalizing the job config. 1490 // It parses either d.RerunAuthConfigs or d.DefaultRerunAuthConfigEntries, not both. 1491 // Old format: map[string]*prowapi.RerunAuthConfig where the key is org, 1492 // 1493 // org/repo, or "*". 1494 // 1495 // New format: []*DefaultRerunAuthConfigEntry 1496 // If the old format is parsed it is converted to the new format, then all 1497 // filter regexp are compiled. 1498 func (d *Deck) FinalizeDefaultRerunAuthConfigs() error { 1499 mapped, sliced := len(d.RerunAuthConfigs) > 0, len(d.DefaultRerunAuthConfigs) > 0 1500 1501 // This case should be guarded against by prow config test. Checking here is 1502 // for cases where prow config test didn't run. 1503 if mapped && sliced { 1504 return fmt.Errorf("deck.rerun_auth_configs and deck.default_rerun_auth_configs are mutually exclusive, please use one or the other") 1505 } 1506 1507 // Set up DefaultRerunAuthConfigEntries. 1508 if mapped { 1509 var err error 1510 d.DefaultRerunAuthConfigs, err = defaultRerunAuthMapToSlice(d.RerunAuthConfigs) 1511 if err != nil { 1512 return err 1513 } 1514 } 1515 1516 return nil 1517 } 1518 1519 const ( 1520 defaultMaxOutstandingMessages = 10 1521 ) 1522 1523 // PubsubSubscriptions maps GCP project IDs to a list of subscription IDs. 1524 type PubsubSubscriptions map[string][]string 1525 1526 // PubSubTriggers contains pubsub configurations. 1527 type PubSubTriggers []PubSubTrigger 1528 1529 // PubSubTrigger contain pubsub configuration for a single project. 1530 type PubSubTrigger struct { 1531 Project string `json:"project"` 1532 Topics []string `json:"topics"` 1533 AllowedClusters []string `json:"allowed_clusters"` 1534 // MaxOutstandingMessages is the max number of messaged being processed, default is 10. 1535 MaxOutstandingMessages int `json:"max_outstanding_messages"` 1536 } 1537 1538 // GitHubOptions allows users to control how prow applications display GitHub website links. 1539 type GitHubOptions struct { 1540 // LinkURLFromConfig is the string representation of the link_url config parameter. 1541 // This config parameter allows users to override the default GitHub link url for all plugins. 1542 // If this option is not set, we assume "https://github.com". 1543 LinkURLFromConfig string `json:"link_url,omitempty"` 1544 1545 // LinkURL is the url representation of LinkURLFromConfig. This variable should be used 1546 // in all places internally. 1547 LinkURL *url.URL `json:"-"` 1548 } 1549 1550 // ManagedWebhookInfo contains metadata about the repo/org which is onboarded. 1551 type ManagedWebhookInfo struct { 1552 TokenCreatedAfter time.Time `json:"token_created_after"` 1553 } 1554 1555 // ManagedWebhooks contains information about all the repos/orgs which are onboarded with auto-generated tokens. 1556 type ManagedWebhooks struct { 1557 RespectLegacyGlobalToken bool `json:"respect_legacy_global_token"` 1558 // Controls whether org/repo invitation for prow bot should be automatically 1559 // accepted or not. Only admin level invitations related to orgs and repos 1560 // in the managed_webhooks config will be accepted and all other invitations 1561 // will be left pending. 1562 AutoAcceptInvitation bool `json:"auto_accept_invitation"` 1563 OrgRepoConfig map[string]ManagedWebhookInfo `json:"org_repo_config,omitempty"` 1564 } 1565 1566 // SlackReporter represents the config for the Slack reporter. The channel can be overridden 1567 // on the job via the .reporter_config.slack.channel property. 1568 type SlackReporter struct { 1569 JobTypesToReport []prowapi.ProwJobType `json:"job_types_to_report,omitempty"` 1570 prowapi.SlackReporterConfig `json:",inline"` 1571 } 1572 1573 // SlackReporterConfigs represents the config for the Slack reporter(s). 1574 // Use `org/repo`, `org` or `*` as key and an `SlackReporter` struct as value. 1575 type SlackReporterConfigs map[string]SlackReporter 1576 1577 func (cfg SlackReporterConfigs) mergeFrom(additional *SlackReporterConfigs) error { 1578 if additional == nil { 1579 return nil 1580 } 1581 1582 var errs []error 1583 for orgOrRepo, slackReporter := range *additional { 1584 if _, alreadyConfigured := cfg[orgOrRepo]; alreadyConfigured { 1585 errs = append(errs, fmt.Errorf("config for org or repo %s passed more than once", orgOrRepo)) 1586 continue 1587 } 1588 cfg[orgOrRepo] = slackReporter 1589 } 1590 1591 return utilerrors.NewAggregate(errs) 1592 } 1593 1594 func (cfg SlackReporterConfigs) GetSlackReporter(refs *prowapi.Refs) SlackReporter { 1595 if refs == nil { 1596 return cfg["*"] 1597 } 1598 1599 if slack, ok := cfg[fmt.Sprintf("%s/%s", refs.Org, refs.Repo)]; ok { 1600 return slack 1601 } 1602 1603 if slack, ok := cfg[refs.Org]; ok { 1604 return slack 1605 } 1606 1607 return cfg["*"] 1608 } 1609 1610 func (cfg SlackReporterConfigs) HasGlobalConfig() bool { 1611 _, exists := cfg["*"] 1612 return exists 1613 } 1614 1615 func (cfg *SlackReporter) DefaultAndValidate() error { 1616 // Default ReportTemplate. 1617 if cfg.ReportTemplate == "" { 1618 cfg.ReportTemplate = `Job {{.Spec.Job}} of type {{.Spec.Type}} ended with state {{.Status.State}}. <{{.Status.URL}}|View logs>` 1619 } 1620 1621 if cfg.Channel == "" { 1622 return errors.New("channel must be set") 1623 } 1624 1625 // Validate ReportTemplate. 1626 tmpl, err := template.New("").Parse(cfg.ReportTemplate) 1627 if err != nil { 1628 return fmt.Errorf("failed to parse template: %w", err) 1629 } 1630 if err := tmpl.Execute(&bytes.Buffer{}, &prowapi.ProwJob{}); err != nil { 1631 return fmt.Errorf("failed to execute report_template: %w", err) 1632 } 1633 1634 return nil 1635 } 1636 1637 // Load loads and parses the config at path. 1638 func Load(prowConfig, jobConfig string, supplementalProwConfigDirs []string, supplementalProwConfigsFileNameSuffix string, additionals ...func(*Config) error) (c *Config, err error) { 1639 return loadWithYamlOpts(nil, prowConfig, jobConfig, supplementalProwConfigDirs, supplementalProwConfigsFileNameSuffix, additionals...) 1640 } 1641 1642 // LoadStrict loads and parses the config at path. 1643 // Unlike Load it unmarshalls yaml with strict parsing. 1644 func LoadStrict(prowConfig, jobConfig string, supplementalProwConfigDirs []string, supplementalProwConfigsFileNameSuffix string, additionals ...func(*Config) error) (c *Config, err error) { 1645 return loadWithYamlOpts([]yaml.JSONOpt{yaml.DisallowUnknownFields}, prowConfig, jobConfig, supplementalProwConfigDirs, supplementalProwConfigsFileNameSuffix, additionals...) 1646 } 1647 1648 func loadWithYamlOpts(yamlOpts []yaml.JSONOpt, prowConfig, jobConfig string, supplementalProwConfigDirs []string, supplementalProwConfigsFileNameSuffix string, additionals ...func(*Config) error) (c *Config, err error) { 1649 // we never want config loading to take down the prow components. 1650 defer func() { 1651 if r := recover(); r != nil { 1652 c, err = nil, fmt.Errorf("panic loading config: %v\n%s", r, string(debug.Stack())) 1653 } 1654 }() 1655 c, err = loadConfig(prowConfig, jobConfig, supplementalProwConfigDirs, supplementalProwConfigsFileNameSuffix, yamlOpts...) 1656 if err != nil { 1657 return nil, err 1658 } 1659 if err := c.finalizeJobConfig(); err != nil { 1660 return nil, err 1661 } 1662 if err := c.validateComponentConfig(); err != nil { 1663 return nil, err 1664 } 1665 if err := c.ValidateJobConfig(); err != nil { 1666 return nil, err 1667 } 1668 1669 for _, additional := range additionals { 1670 if err := additional(c); err != nil { 1671 return nil, err 1672 } 1673 } 1674 return c, nil 1675 } 1676 1677 // ReadJobConfig reads the JobConfig yaml, but does not expand or validate it. 1678 func ReadJobConfig(jobConfig string, yamlOpts ...yaml.JSONOpt) (JobConfig, error) { 1679 stat, err := os.Stat(jobConfig) 1680 if err != nil { 1681 return JobConfig{}, err 1682 } 1683 1684 if !stat.IsDir() { 1685 // still support a single file. 1686 var jc JobConfig 1687 if err := yamlToConfig(jobConfig, &jc, yamlOpts...); err != nil { 1688 return JobConfig{}, err 1689 } 1690 return jc, nil 1691 } 1692 1693 prowIgnore, err := gitignore.NewRepositoryWithFile(jobConfig, ProwIgnoreFileName) 1694 if err != nil { 1695 return JobConfig{}, fmt.Errorf("failed to create `%s` parser: %w", ProwIgnoreFileName, err) 1696 } 1697 // we need to ensure all config files have unique basenames, 1698 // since updateconfig plugin will use basename as a key in the configmap. 1699 uniqueBasenames := sets.Set[string]{} 1700 1701 jobConfigCount := 0 1702 allStart := time.Now() 1703 jc := JobConfig{} 1704 var errs []error 1705 err = filepath.Walk(jobConfig, func(path string, info os.FileInfo, err error) error { 1706 if err != nil { 1707 logrus.WithError(err).Errorf("walking path %q.", path) 1708 // bad file should not stop us from parsing the directory. 1709 return nil 1710 } 1711 1712 if strings.HasPrefix(info.Name(), "..") { 1713 // kubernetes volumes also include files we 1714 // should not look be looking into for keys. 1715 if info.IsDir() { 1716 return filepath.SkipDir 1717 } 1718 return nil 1719 } 1720 if filepath.Ext(path) != ".yaml" && filepath.Ext(path) != ".yml" { 1721 return nil 1722 } 1723 // Use 'Match' directly because 'Ignore' and 'Include' don't work properly for repositories. 1724 match := prowIgnore.Match(path) 1725 if match != nil && match.Ignore() { 1726 return nil 1727 } 1728 1729 if info.IsDir() { 1730 return nil 1731 } 1732 1733 base := filepath.Base(path) 1734 if uniqueBasenames.Has(base) { 1735 errs = append(errs, fmt.Errorf("duplicated basename is not allowed: %s", base)) 1736 return nil 1737 } 1738 uniqueBasenames.Insert(base) 1739 1740 fileStart := time.Now() 1741 var subConfig JobConfig 1742 if err := yamlToConfig(path, &subConfig, yamlOpts...); err != nil { 1743 errs = append(errs, err) 1744 return nil 1745 } 1746 jc, err = mergeJobConfigs(jc, subConfig) 1747 if err == nil { 1748 logrus.WithField("jobConfig", path).WithField("duration", time.Since(fileStart)).Traceln("config loaded") 1749 jobConfigCount++ 1750 } else { 1751 errs = append(errs, err) 1752 } 1753 return nil 1754 }) 1755 err = utilerrors.NewAggregate(append(errs, err)) 1756 if err != nil { 1757 return JobConfig{}, err 1758 } 1759 logrus.WithField("count", jobConfigCount).WithField("duration", time.Since(allStart)).Traceln("jobConfigs loaded successfully") 1760 1761 return jc, nil 1762 } 1763 1764 // loadConfig loads one or multiple config files and returns a config object. 1765 func loadConfig(prowConfig, jobConfig string, additionalProwConfigDirs []string, supplementalProwConfigsFileNameSuffix string, yamlOpts ...yaml.JSONOpt) (*Config, error) { 1766 stat, err := os.Stat(prowConfig) 1767 if err != nil { 1768 return nil, err 1769 } 1770 1771 if stat.IsDir() { 1772 return nil, fmt.Errorf("prowConfig cannot be a dir - %s", prowConfig) 1773 } 1774 1775 var nc Config 1776 if err := yamlToConfig(prowConfig, &nc, yamlOpts...); err != nil { 1777 return nil, err 1778 } 1779 1780 prowConfigCount := 0 1781 allStart := time.Now() 1782 for _, additionalProwConfigDir := range additionalProwConfigDirs { 1783 var errs []error 1784 errs = append(errs, filepath.Walk(additionalProwConfigDir, func(path string, info os.FileInfo, err error) error { 1785 if err != nil { 1786 // Finish walking and handle all errors in bulk at the end, otherwise this is annoying as a user. 1787 errs = append(errs, err) 1788 return nil 1789 } 1790 // Kubernetes configmap mounts create symlinks for the configmap keys that point to files prefixed with '..'. 1791 // This allows it to do atomic changes by changing the symlink to a new target when the configmap content changes. 1792 // This means that we should ignore the '..'-prefixed files, otherwise we might end up reading a half-written file and will 1793 // get duplicate data. 1794 if strings.HasPrefix(info.Name(), "..") { 1795 if info.IsDir() { 1796 return filepath.SkipDir 1797 } 1798 return nil 1799 } 1800 1801 if info.IsDir() || !strings.HasSuffix(path, supplementalProwConfigsFileNameSuffix) { 1802 return nil 1803 } 1804 1805 fileStart := time.Now() 1806 var cfg ProwConfig 1807 if err := yamlToConfig(path, &cfg); err != nil { 1808 errs = append(errs, err) 1809 return nil 1810 } 1811 1812 if err := nc.ProwConfig.mergeFrom(&cfg); err != nil { 1813 errs = append(errs, fmt.Errorf("failed to merge in config from %s: %w", path, err)) 1814 } else { 1815 logrus.WithField("prowConfig", path).WithField("duration", time.Since(fileStart)).Traceln("config loaded") 1816 prowConfigCount++ 1817 } 1818 1819 return nil 1820 })) 1821 1822 if err := utilerrors.NewAggregate(errs); err != nil { 1823 return nil, err 1824 } 1825 } 1826 logrus.WithField("count", prowConfigCount).WithField("duration", time.Since(allStart)).Traceln("prowConfigs loaded successfully") 1827 1828 if err := parseProwConfig(&nc); err != nil { 1829 return nil, err 1830 } 1831 1832 versionFilePath := filepath.Join(path.Dir(prowConfig), ConfigVersionFileName) 1833 if _, errAccess := os.Stat(versionFilePath); errAccess == nil { 1834 content, err := os.ReadFile(versionFilePath) 1835 if err != nil { 1836 return nil, fmt.Errorf("failed to read versionfile %s: %w", versionFilePath, err) 1837 } 1838 nc.ConfigVersionSHA = string(content) 1839 } 1840 1841 nc.AllRepos = sets.Set[string]{} 1842 for _, query := range nc.Tide.Queries { 1843 for _, repo := range query.Repos { 1844 nc.AllRepos.Insert(repo) 1845 } 1846 } 1847 1848 // For production, use these functions for getting ProwYAML values. In 1849 // tests, we can override these fields with mocked versions. 1850 nc.ProwYAMLGetter = prowYAMLGetter 1851 nc.ProwYAMLGetterWithDefaults = prowYAMLGetterWithDefaults 1852 1853 if deduplicatedTideQueries, err := nc.deduplicateTideQueries(nc.Tide.Queries); err != nil { 1854 logrus.WithError(err).Error("failed to deduplicate tide queriees") 1855 } else { 1856 nc.Tide.Queries = deduplicatedTideQueries 1857 } 1858 1859 if nc.InRepoConfig.AllowedClusters == nil { 1860 nc.InRepoConfig.AllowedClusters = map[string][]string{} 1861 } 1862 1863 // Respect `"*": []`, which disabled default global cluster. 1864 if nc.InRepoConfig.AllowedClusters["*"] == nil { 1865 nc.InRepoConfig.AllowedClusters["*"] = []string{kube.DefaultClusterAlias} 1866 } 1867 1868 // merge pubsub configs. 1869 if nc.PubSubSubscriptions != nil { 1870 if nc.PubSubTriggers != nil { 1871 return nil, errors.New("pubsub_subscriptions and pubsub_triggers are mutually exclusive") 1872 } 1873 for proj, topics := range nc.PubSubSubscriptions { 1874 nc.PubSubTriggers = append(nc.PubSubTriggers, PubSubTrigger{ 1875 Project: proj, 1876 Topics: topics, 1877 AllowedClusters: []string{"*"}, 1878 }) 1879 } 1880 } 1881 for i, trigger := range nc.PubSubTriggers { 1882 if trigger.MaxOutstandingMessages == 0 { 1883 nc.PubSubTriggers[i].MaxOutstandingMessages = defaultMaxOutstandingMessages 1884 } 1885 } 1886 1887 // TODO(krzyzacy): temporary allow empty jobconfig 1888 // also temporary allow job config in prow config. 1889 if jobConfig == "" { 1890 return &nc, nil 1891 } 1892 1893 jc, err := ReadJobConfig(jobConfig, yamlOpts...) 1894 if err != nil { 1895 return nil, err 1896 } 1897 if err := nc.mergeJobConfig(jc); err != nil { 1898 return nil, err 1899 } 1900 1901 return &nc, nil 1902 } 1903 1904 // yamlToConfig converts a yaml file into a Config object. 1905 func yamlToConfig(path string, nc interface{}, opts ...yaml.JSONOpt) error { 1906 b, err := ReadFileMaybeGZIP(path) 1907 if err != nil { 1908 return fmt.Errorf("error reading %s: %w", path, err) 1909 } 1910 if err := yaml.Unmarshal(b, nc, opts...); err != nil { 1911 return fmt.Errorf("error unmarshaling %s: %w", path, err) 1912 } 1913 var jc *JobConfig 1914 switch v := nc.(type) { 1915 case *JobConfig: 1916 jc = v 1917 case *Config: 1918 jc = &v.JobConfig 1919 default: 1920 // No job config, skip inserting filepaths into the jobs. 1921 return nil 1922 } 1923 1924 for rep := range jc.PresubmitsStatic { 1925 fix := func(job *Presubmit) { 1926 job.SourcePath = path 1927 } 1928 for i := range jc.PresubmitsStatic[rep] { 1929 fix(&jc.PresubmitsStatic[rep][i]) 1930 } 1931 } 1932 for rep := range jc.PostsubmitsStatic { 1933 fix := func(job *Postsubmit) { 1934 job.SourcePath = path 1935 } 1936 for i := range jc.PostsubmitsStatic[rep] { 1937 fix(&jc.PostsubmitsStatic[rep][i]) 1938 } 1939 } 1940 1941 fix := func(job *Periodic) { 1942 job.SourcePath = path 1943 } 1944 for i := range jc.Periodics { 1945 fix(&jc.Periodics[i]) 1946 } 1947 return nil 1948 } 1949 1950 // ReadFileMaybeGZIP wraps os.ReadFile, returning the decompressed contents 1951 // if the file is gzipped, or otherwise the raw contents. 1952 func ReadFileMaybeGZIP(path string) ([]byte, error) { 1953 b, err := os.ReadFile(path) 1954 if err != nil { 1955 return nil, err 1956 } 1957 // check if file contains gzip header: http://www.zlib.org/rfc-gzip.html. 1958 if !bytes.HasPrefix(b, []byte("\x1F\x8B")) { 1959 // go ahead and return the contents if not gzipped. 1960 return b, nil 1961 } 1962 // otherwise decode. 1963 gzipReader, err := gzip.NewReader(bytes.NewBuffer(b)) 1964 if err != nil { 1965 return nil, err 1966 } 1967 return io.ReadAll(gzipReader) 1968 } 1969 1970 func (c *Config) mergeJobConfig(jc JobConfig) error { 1971 m, err := mergeJobConfigs(JobConfig{ 1972 Presets: c.Presets, 1973 PresubmitsStatic: c.PresubmitsStatic, 1974 Periodics: c.Periodics, 1975 PostsubmitsStatic: c.PostsubmitsStatic, 1976 }, jc) 1977 if err != nil { 1978 return err 1979 } 1980 c.Presets = m.Presets 1981 c.PresubmitsStatic = m.PresubmitsStatic 1982 c.Periodics = m.Periodics 1983 c.PostsubmitsStatic = m.PostsubmitsStatic 1984 return nil 1985 } 1986 1987 // mergeJobConfigs merges two JobConfig together. 1988 // It will try to merge: 1989 // - Presubmits 1990 // - Postsubmits 1991 // - Periodics 1992 // - Presets 1993 func mergeJobConfigs(a, b JobConfig) (JobConfig, error) { 1994 // Merge everything. 1995 // *** Presets *** 1996 c := JobConfig{} 1997 c.Presets = append(a.Presets, b.Presets...) 1998 1999 // validate no duplicated preset key-value pairs. 2000 validLabels := map[string]bool{} 2001 for _, preset := range c.Presets { 2002 for label, val := range preset.Labels { 2003 pair := label + ":" + val 2004 if _, ok := validLabels[pair]; ok { 2005 return JobConfig{}, fmt.Errorf("duplicated preset 'label:value' pair : %s", pair) 2006 } 2007 validLabels[pair] = true 2008 } 2009 } 2010 2011 // *** Periodics *** 2012 c.Periodics = append(a.Periodics, b.Periodics...) 2013 2014 // *** Presubmits *** 2015 c.PresubmitsStatic = make(map[string][]Presubmit) 2016 for repo, jobs := range a.PresubmitsStatic { 2017 c.PresubmitsStatic[repo] = jobs 2018 } 2019 for repo, jobs := range b.PresubmitsStatic { 2020 c.PresubmitsStatic[repo] = append(c.PresubmitsStatic[repo], jobs...) 2021 } 2022 2023 // *** Postsubmits *** 2024 c.PostsubmitsStatic = make(map[string][]Postsubmit) 2025 for repo, jobs := range a.PostsubmitsStatic { 2026 c.PostsubmitsStatic[repo] = jobs 2027 } 2028 for repo, jobs := range b.PostsubmitsStatic { 2029 c.PostsubmitsStatic[repo] = append(c.PostsubmitsStatic[repo], jobs...) 2030 } 2031 return c, nil 2032 } 2033 2034 func shouldDecorate(c *JobConfig, util *UtilityConfig) bool { 2035 if util.Decorate != nil { 2036 return *util.Decorate 2037 } else { 2038 b := c.DecorateAllJobs 2039 util.Decorate = &b 2040 } 2041 return c.DecorateAllJobs 2042 } 2043 2044 func setPresubmitProwJobDefaults(c *Config, ps *Presubmit, repo string) { 2045 ps.ProwJobDefault = c.mergeProwJobDefault(repo, ps.Cluster, ps.ProwJobDefault) 2046 } 2047 2048 func setPostsubmitProwJobDefaults(c *Config, ps *Postsubmit, repo string) { 2049 ps.ProwJobDefault = c.mergeProwJobDefault(repo, ps.Cluster, ps.ProwJobDefault) 2050 } 2051 2052 func setPeriodicProwJobDefaults(c *Config, ps *Periodic) { 2053 var repo string 2054 if len(ps.UtilityConfig.ExtraRefs) > 0 { 2055 repo = fmt.Sprintf("%s/%s", ps.UtilityConfig.ExtraRefs[0].Org, ps.UtilityConfig.ExtraRefs[0].Repo) 2056 } 2057 2058 ps.ProwJobDefault = c.mergeProwJobDefault(repo, ps.Cluster, ps.ProwJobDefault) 2059 } 2060 func setPresubmitDecorationDefaults(c *Config, ps *Presubmit, repo string) { 2061 if shouldDecorate(&c.JobConfig, &ps.JobBase.UtilityConfig) { 2062 ps.DecorationConfig = c.Plank.mergeDefaultDecorationConfig(repo, ps.Cluster, ps.DecorationConfig) 2063 } 2064 } 2065 2066 func setPostsubmitDecorationDefaults(c *Config, ps *Postsubmit, repo string) { 2067 if shouldDecorate(&c.JobConfig, &ps.JobBase.UtilityConfig) { 2068 ps.DecorationConfig = c.Plank.mergeDefaultDecorationConfig(repo, ps.Cluster, ps.DecorationConfig) 2069 } 2070 } 2071 2072 func setPeriodicDecorationDefaults(c *Config, ps *Periodic) { 2073 if shouldDecorate(&c.JobConfig, &ps.JobBase.UtilityConfig) { 2074 var repo string 2075 if len(ps.UtilityConfig.ExtraRefs) > 0 { 2076 repo = fmt.Sprintf("%s/%s", ps.UtilityConfig.ExtraRefs[0].Org, ps.UtilityConfig.ExtraRefs[0].Repo) 2077 } 2078 2079 ps.DecorationConfig = c.Plank.mergeDefaultDecorationConfig(repo, ps.Cluster, ps.DecorationConfig) 2080 } 2081 } 2082 2083 // defaultPresubmits defaults the presubmits for one repo. 2084 func defaultPresubmits(presubmits []Presubmit, additionalPresets []Preset, c *Config, repo string) error { 2085 c.defaultPresubmitFields(presubmits) 2086 var errs []error 2087 for idx, ps := range presubmits { 2088 setPresubmitDecorationDefaults(c, &presubmits[idx], repo) 2089 setPresubmitProwJobDefaults(c, &presubmits[idx], repo) 2090 if err := resolvePresets(ps.Name, ps.Labels, ps.Spec, append(c.Presets, additionalPresets...)); err != nil { 2091 errs = append(errs, err) 2092 } 2093 } 2094 if err := SetPresubmitRegexes(presubmits); err != nil { 2095 errs = append(errs, fmt.Errorf("could not set regex: %w", err)) 2096 } 2097 2098 return utilerrors.NewAggregate(errs) 2099 } 2100 2101 // defaultPostsubmits defaults the postsubmits for one repo. 2102 func defaultPostsubmits(postsubmits []Postsubmit, additionalPresets []Preset, c *Config, repo string) error { 2103 c.defaultPostsubmitFields(postsubmits) 2104 var errs []error 2105 for idx, ps := range postsubmits { 2106 setPostsubmitDecorationDefaults(c, &postsubmits[idx], repo) 2107 setPostsubmitProwJobDefaults(c, &postsubmits[idx], repo) 2108 if err := resolvePresets(ps.Name, ps.Labels, ps.Spec, append(c.Presets, additionalPresets...)); err != nil { 2109 errs = append(errs, err) 2110 } 2111 } 2112 if err := SetPostsubmitRegexes(postsubmits); err != nil { 2113 errs = append(errs, fmt.Errorf("could not set regex: %w", err)) 2114 } 2115 return utilerrors.NewAggregate(errs) 2116 } 2117 2118 // DefaultPeriodic defaults (mutates) a single Periodic. 2119 func (c *Config) DefaultPeriodic(periodic *Periodic) error { 2120 c.defaultPeriodicFields(periodic) 2121 setPeriodicDecorationDefaults(c, periodic) 2122 setPeriodicProwJobDefaults(c, periodic) 2123 return resolvePresets(periodic.Name, periodic.Labels, periodic.Spec, c.Presets) 2124 } 2125 2126 // defaultPeriodics defaults c.Periodics. 2127 func defaultPeriodics(c *Config) error { 2128 var errs []error 2129 for i := range c.Periodics { 2130 errs = append(errs, c.DefaultPeriodic(&c.Periodics[i])) 2131 } 2132 return utilerrors.NewAggregate(errs) 2133 } 2134 2135 // finalizeJobConfig mutates and fixes entries for jobspecs. 2136 func (c *Config) finalizeJobConfig() error { 2137 if err := c.Plank.FinalizeDefaultDecorationConfigs(); err != nil { 2138 return err 2139 } 2140 2141 for repo, jobs := range c.PresubmitsStatic { 2142 if err := defaultPresubmits(jobs, nil, c, repo); err != nil { 2143 return err 2144 } 2145 c.AllRepos.Insert(repo) 2146 } 2147 2148 for repo, jobs := range c.PostsubmitsStatic { 2149 if err := defaultPostsubmits(jobs, nil, c, repo); err != nil { 2150 return err 2151 } 2152 c.AllRepos.Insert(repo) 2153 } 2154 2155 if err := defaultPeriodics(c); err != nil { 2156 return err 2157 } 2158 2159 return nil 2160 } 2161 2162 // validateComponentConfig validates the various infrastructure components' configurations. 2163 func (c *Config) validateComponentConfig() error { 2164 for k, v := range c.Plank.JobURLPrefixConfig { 2165 if _, err := url.Parse(v); err != nil { 2166 return fmt.Errorf(`invalid value for Planks job_url_prefix_config["%s"]: %v`, k, err) 2167 } 2168 } 2169 if c.Gerrit.DeckURL != "" { 2170 if _, err := url.Parse(c.Gerrit.DeckURL); err != nil { 2171 return fmt.Errorf("invalid value for gerrit.deck_url: %v", err) 2172 } 2173 } 2174 2175 var validationErrs []error 2176 if c.ManagedWebhooks.OrgRepoConfig != nil { 2177 for repoName, repoValue := range c.ManagedWebhooks.OrgRepoConfig { 2178 if repoValue.TokenCreatedAfter.After(time.Now()) { 2179 validationErrs = append(validationErrs, fmt.Errorf("token_created_after %s can be no later than current time for repo/org %s", repoValue.TokenCreatedAfter, repoName)) 2180 } 2181 } 2182 if len(validationErrs) > 0 { 2183 return utilerrors.NewAggregate(validationErrs) 2184 } 2185 } 2186 2187 if c.SlackReporterConfigs != nil { 2188 for k, config := range c.SlackReporterConfigs { 2189 if err := config.DefaultAndValidate(); err != nil { 2190 return fmt.Errorf("failed to validate slackreporter config: %w", err) 2191 } 2192 c.SlackReporterConfigs[k] = config 2193 } 2194 } 2195 2196 if err := c.Deck.FinalizeDefaultRerunAuthConfigs(); err != nil { 2197 return err 2198 } 2199 2200 if err := c.Deck.Validate(); err != nil { 2201 return err 2202 } 2203 2204 if err := c.Gangway.Validate(); err != nil { 2205 return err 2206 } 2207 2208 if err := c.Moonraker.Validate(); err != nil { 2209 return err 2210 } 2211 2212 return nil 2213 } 2214 2215 var ( 2216 jobNameRegex = regexp.MustCompile(`^[A-Za-z0-9-._]+$`) 2217 jobNameRegexJenkins = regexp.MustCompile(`^[A-Za-z0-9-._]([A-Za-z0-9-._/]*[A-Za-z0-9-_])?$`) 2218 ) 2219 2220 func validateJobName(v JobBase) error { 2221 nameRegex := jobNameRegex 2222 if v.Agent == string(prowapi.JenkinsAgent) { 2223 nameRegex = jobNameRegexJenkins 2224 } 2225 2226 if !nameRegex.MatchString(v.Name) { 2227 return fmt.Errorf("name: must match regex %q", nameRegex.String()) 2228 } 2229 2230 return nil 2231 } 2232 2233 func (c Config) validateJobBase(v JobBase, jobType prowapi.ProwJobType) error { 2234 if err := validateJobName(v); err != nil { 2235 return err 2236 } 2237 2238 // Ensure max_concurrency is non-negative. 2239 if v.MaxConcurrency < 0 { 2240 return fmt.Errorf("max_concurrency: %d must be a non-negative number", v.MaxConcurrency) 2241 } 2242 if err := validateAgent(v, c.PodNamespace); err != nil { 2243 return err 2244 } 2245 if err := validatePodSpec(jobType, v.Spec, v.DecorationConfig); err != nil { 2246 return err 2247 } 2248 if v.Agent == prowapi.TektonAgent { 2249 pipelineRunSpec, err := v.GetPipelineRunSpec() 2250 if err != nil { 2251 return err 2252 } 2253 if err := ValidatePipelineRunSpec(jobType, v.ExtraRefs, pipelineRunSpec); err != nil { 2254 return err 2255 } 2256 } 2257 if err := validateLabels(v.Labels); err != nil { 2258 return err 2259 } 2260 if err := validateAnnotation(v.Annotations); err != nil { 2261 return err 2262 } 2263 validJobQueueNames := sets.KeySet[string](c.Plank.JobQueueCapacities) 2264 if err := validateJobQueueName(v.JobQueueName, validJobQueueNames); err != nil { 2265 return err 2266 } 2267 if v.Spec == nil || len(v.Spec.Containers) == 0 { 2268 return nil // jenkins jobs have no spec. 2269 } 2270 if err := v.RerunAuthConfig.Validate(); err != nil { 2271 return err 2272 } 2273 if err := v.UtilityConfig.Validate(); err != nil { 2274 return err 2275 } 2276 for i := range v.Spec.Containers { 2277 if err := validateDecoration(v.Spec.Containers[i], v.DecorationConfig); err != nil { 2278 return err 2279 } 2280 } 2281 return nil 2282 } 2283 2284 // validatePresubmits validates the presubmits for one repo. 2285 func (c Config) validatePresubmits(presubmits []Presubmit) error { 2286 validPresubmits := map[string][]Presubmit{} 2287 duplicatePresubmits := sets.New[string]() 2288 var errs []error 2289 for _, ps := range presubmits { 2290 // Checking that no duplicate job in prow config exists on the same branch. 2291 for _, existingJob := range validPresubmits[ps.Name] { 2292 if existingJob.Brancher.Intersects(ps.Brancher) { 2293 duplicatePresubmits.Insert(ps.Name) 2294 } 2295 } 2296 for _, otherPS := range presubmits { 2297 if otherPS.Name == ps.Name || !otherPS.Brancher.Intersects(ps.Brancher) { 2298 continue 2299 } 2300 if otherPS.Context == ps.Context { 2301 errs = append(errs, fmt.Errorf("jobs %s and %s report to the same GitHub context %q", otherPS.Name, ps.Name, otherPS.Context)) 2302 } 2303 } 2304 2305 if err := c.validateJobBase(ps.JobBase, prowapi.PresubmitJob); err != nil { 2306 errs = append(errs, fmt.Errorf("invalid presubmit job %s: %w", ps.Name, err)) 2307 } 2308 if err := validateTriggering(ps); err != nil { 2309 errs = append(errs, err) 2310 } 2311 if err := validateReporting(ps.JobBase, ps.Reporter); err != nil { 2312 errs = append(errs, fmt.Errorf("invalid presubmit job %s: %w", ps.Name, err)) 2313 } 2314 validPresubmits[ps.Name] = append(validPresubmits[ps.Name], ps) 2315 } 2316 if duplicatePresubmits.Len() > 0 { 2317 errs = append(errs, fmt.Errorf("duplicated presubmit jobs (consider both inrepo and central config): %v", sortStringSlice(duplicatePresubmits.UnsortedList()))) 2318 } 2319 2320 return utilerrors.NewAggregate(errs) 2321 } 2322 2323 // ValidateRefs validates the extra refs on a presubmit for one repo. 2324 func ValidateRefs(repo string, jobBase JobBase) error { 2325 gitRefs := map[string]int{ 2326 repo: 1, 2327 } 2328 for _, ref := range jobBase.UtilityConfig.ExtraRefs { 2329 gitRefs[fmt.Sprintf("%s/%s", ref.Org, ref.Repo)]++ 2330 } 2331 2332 dupes := sets.New[string]() 2333 for gitRef, count := range gitRefs { 2334 if count > 1 { 2335 dupes.Insert(gitRef) 2336 } 2337 } 2338 2339 if dupes.Len() > 0 { 2340 return fmt.Errorf("invalid job %s on repo %s: the following refs specified more than once: %s", 2341 jobBase.Name, repo, strings.Join(sets.List(dupes), ",")) 2342 } 2343 return nil 2344 } 2345 2346 // validatePostsubmits validates the postsubmits for one repo. 2347 func (c Config) validatePostsubmits(postsubmits []Postsubmit) error { 2348 validPostsubmits := map[string][]Postsubmit{} 2349 duplicatePostsubmits := sets.New[string]() 2350 2351 var errs []error 2352 for _, ps := range postsubmits { 2353 // Checking that no duplicate job in prow config exists on the same repo / branch. 2354 for _, existingJob := range validPostsubmits[ps.Name] { 2355 if existingJob.Brancher.Intersects(ps.Brancher) { 2356 duplicatePostsubmits.Insert(ps.Name) 2357 } 2358 } 2359 for _, otherPS := range postsubmits { 2360 if otherPS.Name == ps.Name || !otherPS.Brancher.Intersects(ps.Brancher) { 2361 continue 2362 } 2363 if otherPS.Context == ps.Context { 2364 errs = append(errs, fmt.Errorf("jobs %s and %s report to the same GitHub context %q", otherPS.Name, ps.Name, otherPS.Context)) 2365 } 2366 } 2367 2368 if err := c.validateJobBase(ps.JobBase, prowapi.PostsubmitJob); err != nil { 2369 errs = append(errs, fmt.Errorf("invalid postsubmit job %s: %w", ps.Name, err)) 2370 } 2371 if err := validateAlwaysRun(ps); err != nil { 2372 errs = append(errs, err) 2373 } 2374 if err := validateReporting(ps.JobBase, ps.Reporter); err != nil { 2375 errs = append(errs, fmt.Errorf("invalid postsubmit job %s: %w", ps.Name, err)) 2376 } 2377 validPostsubmits[ps.Name] = append(validPostsubmits[ps.Name], ps) 2378 } 2379 if duplicatePostsubmits.Len() > 0 { 2380 errs = append(errs, fmt.Errorf("duplicated postsubmit jobs (consider both inrepo and central config): %v", sortStringSlice(duplicatePostsubmits.UnsortedList()))) 2381 } 2382 2383 return utilerrors.NewAggregate(errs) 2384 } 2385 2386 // validatePeriodics validates a set of periodics. 2387 func (c Config) validatePeriodics(periodics []Periodic) error { 2388 var errs []error 2389 2390 // validate no duplicated periodics. 2391 validPeriodics := sets.New[string]() 2392 // Ensure that the periodic durations are valid and specs exist. 2393 for j, p := range periodics { 2394 if validPeriodics.Has(p.Name) { 2395 errs = append(errs, fmt.Errorf("duplicated periodic job: %s", p.Name)) 2396 } 2397 validPeriodics.Insert(p.Name) 2398 if err := c.validateJobBase(p.JobBase, prowapi.PeriodicJob); err != nil { 2399 errs = append(errs, fmt.Errorf("invalid periodic job %s: %w", p.Name, err)) 2400 } 2401 2402 // Validate mutually exclusive properties 2403 seen := 0 2404 if p.Cron != "" { 2405 seen += 1 2406 } 2407 if p.Interval != "" { 2408 seen += 1 2409 } 2410 if p.MinimumInterval != "" { 2411 seen += 1 2412 } 2413 if seen > 1 { 2414 errs = append(errs, fmt.Errorf("cron, interval, and minimum_interval are mutually exclusive in periodic %s", p.Name)) 2415 continue 2416 } 2417 if seen == 0 { 2418 errs = append(errs, fmt.Errorf("at least one of cron, interval, or minimum_interval must be set in periodic %s", p.Name)) 2419 continue 2420 } 2421 2422 if p.Cron != "" { 2423 if _, err := cron.Parse(p.Cron); err != nil { 2424 errs = append(errs, fmt.Errorf("invalid cron string %s in periodic %s: %w", p.Cron, p.Name, err)) 2425 } 2426 } 2427 2428 // Set the interval on the periodic jobs. It doesn't make sense to do this 2429 // for child jobs. 2430 if p.Interval != "" { 2431 d, err := time.ParseDuration(periodics[j].Interval) 2432 if err != nil { 2433 errs = append(errs, fmt.Errorf("cannot parse duration for %s: %w", periodics[j].Name, err)) 2434 } 2435 periodics[j].interval = d 2436 } 2437 2438 if p.MinimumInterval != "" { 2439 d, err := time.ParseDuration(periodics[j].MinimumInterval) 2440 if err != nil { 2441 errs = append(errs, fmt.Errorf("cannot parse duration for %s: %w", periodics[j].Name, err)) 2442 } 2443 periodics[j].minimum_interval = d 2444 } 2445 2446 } 2447 2448 return utilerrors.NewAggregate(errs) 2449 } 2450 2451 // ValidateJobConfig validates if all the jobspecs/presets are valid 2452 // if you are mutating the jobs, please add it to finalizeJobConfig above. 2453 func (c *Config) ValidateJobConfig() error { 2454 2455 var errs []error 2456 2457 // Validate presubmits. 2458 for _, jobs := range c.PresubmitsStatic { 2459 if err := c.validatePresubmits(jobs); err != nil { 2460 errs = append(errs, err) 2461 } 2462 } 2463 2464 // Validate postsubmits. 2465 for _, jobs := range c.PostsubmitsStatic { 2466 if err := c.validatePostsubmits(jobs); err != nil { 2467 errs = append(errs, err) 2468 } 2469 } 2470 2471 // Validate periodics. 2472 if err := c.validatePeriodics(c.Periodics); err != nil { 2473 errs = append(errs, err) 2474 } 2475 2476 c.Deck.AllKnownStorageBuckets = calculateStorageBuckets(c) 2477 2478 return utilerrors.NewAggregate(errs) 2479 } 2480 2481 func parseProwConfig(c *Config) error { 2482 if err := ValidateController(&c.Plank.Controller); err != nil { 2483 return fmt.Errorf("validating plank config: %w", err) 2484 } 2485 2486 if c.Plank.PodPendingTimeout == nil { 2487 c.Plank.PodPendingTimeout = &metav1.Duration{Duration: 10 * time.Minute} 2488 } 2489 2490 if c.Plank.PodRunningTimeout == nil { 2491 c.Plank.PodRunningTimeout = &metav1.Duration{Duration: 48 * time.Hour} 2492 } 2493 2494 if c.Plank.PodUnscheduledTimeout == nil { 2495 c.Plank.PodUnscheduledTimeout = &metav1.Duration{Duration: 5 * time.Minute} 2496 } 2497 2498 if err := c.Gerrit.DefaultAndValidate(); err != nil { 2499 return fmt.Errorf("validating gerrit config: %w", err) 2500 } 2501 2502 if c.Tide.Gerrit != nil { 2503 if c.Tide.Gerrit.RateLimit == 0 { 2504 c.Tide.Gerrit.RateLimit = 5 2505 } 2506 } 2507 2508 if len(c.GitHubReporter.JobTypesToReport) == 0 { 2509 c.GitHubReporter.JobTypesToReport = append(c.GitHubReporter.JobTypesToReport, prowapi.PresubmitJob, prowapi.PostsubmitJob) 2510 } 2511 2512 // validate entries are valid job types. 2513 // Currently only presubmit and postsubmit can be reported to github. 2514 for _, t := range c.GitHubReporter.JobTypesToReport { 2515 if t != prowapi.PresubmitJob && t != prowapi.PostsubmitJob { 2516 return fmt.Errorf("invalid job_types_to_report: %v", t) 2517 } 2518 } 2519 2520 // jenkins operator controller template functions. 2521 // reference: 2522 // - https://helm.sh/docs/chart_template_guide/function_list/#string-functions 2523 // - https://github.com/Masterminds/sprig 2524 // 2525 // We could use sprig.FuncMap() instead in feature. 2526 jenkinsFuncMap := template.FuncMap{ 2527 "replace": func(old, new, src string) string { 2528 return strings.Replace(src, old, new, -1) 2529 }, 2530 } 2531 2532 for i := range c.JenkinsOperators { 2533 if err := ValidateController(&c.JenkinsOperators[i].Controller, jenkinsFuncMap); err != nil { 2534 return fmt.Errorf("validating jenkins_operators config: %w", err) 2535 } 2536 sel, err := labels.Parse(c.JenkinsOperators[i].LabelSelectorString) 2537 if err != nil { 2538 return fmt.Errorf("invalid jenkins_operators.label_selector option: %w", err) 2539 } 2540 c.JenkinsOperators[i].LabelSelector = sel 2541 // TODO: Invalidate overlapping selectors more. 2542 if len(c.JenkinsOperators) > 1 && c.JenkinsOperators[i].LabelSelectorString == "" { 2543 return errors.New("selector overlap: cannot use an empty label_selector with multiple selectors") 2544 } 2545 if len(c.JenkinsOperators) == 1 && c.JenkinsOperators[0].LabelSelectorString != "" { 2546 return errors.New("label_selector is invalid when used for a single jenkins-operator") 2547 } 2548 } 2549 2550 for i, agentToTmpl := range c.Deck.ExternalAgentLogs { 2551 urlTemplate, err := template.New(agentToTmpl.Agent).Parse(agentToTmpl.URLTemplateString) 2552 if err != nil { 2553 return fmt.Errorf("parsing template for agent %q: %w", agentToTmpl.Agent, err) 2554 } 2555 c.Deck.ExternalAgentLogs[i].URLTemplate = urlTemplate 2556 // we need to validate selectors used by deck since these are not 2557 // sent to the api server. 2558 s, err := labels.Parse(c.Deck.ExternalAgentLogs[i].SelectorString) 2559 if err != nil { 2560 return fmt.Errorf("error parsing selector %q: %w", c.Deck.ExternalAgentLogs[i].SelectorString, err) 2561 } 2562 c.Deck.ExternalAgentLogs[i].Selector = s 2563 } 2564 2565 if c.Deck.TideUpdatePeriod == nil { 2566 c.Deck.TideUpdatePeriod = &metav1.Duration{Duration: time.Second * 10} 2567 } 2568 2569 if c.Deck.Spyglass.SizeLimit == 0 { 2570 c.Deck.Spyglass.SizeLimit = 100e6 2571 } else if c.Deck.Spyglass.SizeLimit <= 0 { 2572 return fmt.Errorf("invalid value for deck.spyglass.size_limit, must be >=0") 2573 } 2574 2575 // Migrate the old `viewers` format to the new `lenses` format. 2576 var oldLenses []LensFileConfig 2577 for regex, viewers := range c.Deck.Spyglass.Viewers { 2578 for _, viewer := range viewers { 2579 lfc := LensFileConfig{ 2580 RequiredFiles: []string{regex}, 2581 Lens: LensConfig{ 2582 Name: viewer, 2583 }, 2584 } 2585 oldLenses = append(oldLenses, lfc) 2586 } 2587 } 2588 // Ensure the ordering is stable, because these are referenced by index elsewhere. 2589 sort.Slice(oldLenses, func(i, j int) bool { return oldLenses[i].Lens.Name < oldLenses[j].Lens.Name }) 2590 c.Deck.Spyglass.Lenses = append(c.Deck.Spyglass.Lenses, oldLenses...) 2591 2592 // Parse and cache all our regexes upfront. 2593 c.Deck.Spyglass.RegexCache = make(map[string]*regexp.Regexp) 2594 for _, lens := range c.Deck.Spyglass.Lenses { 2595 toCompile := append(lens.OptionalFiles, lens.RequiredFiles...) 2596 for _, v := range toCompile { 2597 if _, ok := c.Deck.Spyglass.RegexCache[v]; ok { 2598 continue 2599 } 2600 r, err := regexp.Compile(v) 2601 if err != nil { 2602 return fmt.Errorf("cannot compile regexp %q, err: %w", v, err) 2603 } 2604 c.Deck.Spyglass.RegexCache[v] = r 2605 } 2606 } 2607 2608 // Map old viewer names to the new ones for backwards compatibility. 2609 // TODO(Katharine, #10274): remove this, eventually. 2610 oldViewers := map[string]string{ 2611 "build-log-viewer": "buildlog", 2612 "metadata-viewer": "metadata", 2613 "junit-viewer": "junit", 2614 } 2615 2616 for re, viewers := range c.Deck.Spyglass.Viewers { 2617 for i, v := range viewers { 2618 if rename, ok := oldViewers[v]; ok { 2619 c.Deck.Spyglass.Viewers[re][i] = rename 2620 } 2621 } 2622 } 2623 2624 if c.Deck.Spyglass.GCSBrowserPrefixesByRepo == nil { 2625 c.Deck.Spyglass.GCSBrowserPrefixesByRepo = make(map[string]string) 2626 } 2627 2628 _, defaultByRepoExists := c.Deck.Spyglass.GCSBrowserPrefixesByRepo["*"] 2629 if defaultByRepoExists && c.Deck.Spyglass.GCSBrowserPrefix != "" { 2630 return fmt.Errorf("both gcs_browser_prefix and gcs_browser_prefixes['*'] are specified.") 2631 } 2632 if !defaultByRepoExists { 2633 c.Deck.Spyglass.GCSBrowserPrefixesByRepo["*"] = c.Deck.Spyglass.GCSBrowserPrefix 2634 } 2635 2636 if c.Deck.Spyglass.GCSBrowserPrefixesByBucket == nil { 2637 c.Deck.Spyglass.GCSBrowserPrefixesByBucket = make(map[string]string) 2638 } 2639 2640 _, defaultByBucketExists := c.Deck.Spyglass.GCSBrowserPrefixesByBucket["*"] 2641 if defaultByBucketExists && c.Deck.Spyglass.GCSBrowserPrefix != "" { 2642 return fmt.Errorf("both gcs_browser_prefix and gcs_browser_prefixes_by_bucket['*'] are specified.") 2643 } 2644 if !defaultByBucketExists { 2645 c.Deck.Spyglass.GCSBrowserPrefixesByBucket["*"] = c.Deck.Spyglass.GCSBrowserPrefix 2646 } 2647 2648 if c.Deck.Spyglass.BucketAliases == nil { 2649 c.Deck.Spyglass.BucketAliases = make(map[string]string) 2650 } 2651 2652 if c.PushGateway.Interval == nil { 2653 c.PushGateway.Interval = &metav1.Duration{Duration: time.Minute} 2654 } 2655 2656 if c.Sinker.ResyncPeriod == nil { 2657 c.Sinker.ResyncPeriod = &metav1.Duration{Duration: time.Hour} 2658 } 2659 2660 if c.Sinker.MaxProwJobAge == nil { 2661 c.Sinker.MaxProwJobAge = &metav1.Duration{Duration: 7 * 24 * time.Hour} 2662 } 2663 2664 if c.Sinker.MaxPodAge == nil { 2665 c.Sinker.MaxPodAge = &metav1.Duration{Duration: 24 * time.Hour} 2666 } 2667 2668 if c.Sinker.TerminatedPodTTL == nil { 2669 c.Sinker.TerminatedPodTTL = &metav1.Duration{Duration: c.Sinker.MaxPodAge.Duration} 2670 } 2671 2672 if c.Tide.SyncPeriod == nil { 2673 c.Tide.SyncPeriod = &metav1.Duration{Duration: time.Minute} 2674 } 2675 2676 if c.Tide.StatusUpdatePeriod == nil { 2677 c.Tide.StatusUpdatePeriod = c.Tide.SyncPeriod 2678 } 2679 2680 if c.Tide.MaxGoroutines == 0 { 2681 c.Tide.MaxGoroutines = 20 2682 } 2683 if c.Tide.MaxGoroutines <= 0 { 2684 return fmt.Errorf("tide has invalid max_goroutines (%d), it needs to be a positive number", c.Tide.MaxGoroutines) 2685 } 2686 2687 if len(c.Tide.TargetURLs) > 0 && c.Tide.TargetURL != "" { 2688 return fmt.Errorf("tide.target_url and tide.target_urls are mutually exclusive") 2689 } 2690 2691 if c.Tide.TargetURLs == nil { 2692 c.Tide.TargetURLs = map[string]string{} 2693 } 2694 if c.Tide.TargetURL != "" { 2695 c.Tide.TargetURLs["*"] = c.Tide.TargetURL 2696 } 2697 2698 if c.Tide.PRStatusBaseURLs == nil { 2699 c.Tide.PRStatusBaseURLs = map[string]string{} 2700 } 2701 2702 if len(c.Tide.PRStatusBaseURL) > 0 { 2703 if len(c.Tide.PRStatusBaseURLs) > 0 { 2704 return fmt.Errorf("both pr_status_base_url and pr_status_base_urls are defined") 2705 } else { 2706 logrus.Warning("The `pr_status_base_url` setting is deprecated and it has been replaced by `pr_status_base_urls`. It will be removed in June 2020") 2707 c.Tide.PRStatusBaseURLs["*"] = c.Tide.PRStatusBaseURL 2708 } 2709 } 2710 2711 if len(c.Tide.PRStatusBaseURLs) > 0 { 2712 if _, ok := c.Tide.PRStatusBaseURLs["*"]; !ok { 2713 return fmt.Errorf("pr_status_base_urls is defined but the default value ('*') is missing") 2714 } 2715 } 2716 2717 if err := parseTideMergeType(c.Tide.MergeType); err != nil { 2718 return fmt.Errorf("tide merge type: %w", err) 2719 } 2720 2721 for name, templates := range c.Tide.MergeTemplate { 2722 if templates.TitleTemplate != "" { 2723 titleTemplate, err := template.New("CommitTitle").Parse(templates.TitleTemplate) 2724 2725 if err != nil { 2726 return fmt.Errorf("parsing template for commit title: %w", err) 2727 } 2728 2729 templates.Title = titleTemplate 2730 } 2731 2732 if templates.BodyTemplate != "" { 2733 bodyTemplate, err := template.New("CommitBody").Parse(templates.BodyTemplate) 2734 2735 if err != nil { 2736 return fmt.Errorf("parsing template for commit body: %w", err) 2737 } 2738 2739 templates.Body = bodyTemplate 2740 } 2741 2742 c.Tide.MergeTemplate[name] = templates 2743 } 2744 2745 for i, tq := range c.Tide.Queries { 2746 if err := tq.Validate(); err != nil { 2747 return fmt.Errorf("tide query (index %d) is invalid: %w", i, err) 2748 } 2749 } 2750 2751 if c.ProwJobNamespace == "" { 2752 c.ProwJobNamespace = "default" 2753 } 2754 if c.PodNamespace == "" { 2755 c.PodNamespace = "default" 2756 } 2757 2758 if c.Plank.JobURLPrefixConfig == nil { 2759 c.Plank.JobURLPrefixConfig = map[string]string{} 2760 } 2761 2762 if c.GitHubOptions.LinkURLFromConfig == "" { 2763 c.GitHubOptions.LinkURLFromConfig = "https://github.com" 2764 } 2765 linkURL, err := url.Parse(c.GitHubOptions.LinkURLFromConfig) 2766 if err != nil { 2767 return fmt.Errorf("unable to parse github.link_url, might not be a valid url: %w", err) 2768 } 2769 c.GitHubOptions.LinkURL = linkURL 2770 2771 if c.StatusErrorLink == "" { 2772 c.StatusErrorLink = "https://github.com/kubernetes/test-infra/issues" 2773 } 2774 2775 if c.LogLevel == "" { 2776 c.LogLevel = "info" 2777 } 2778 lvl, err := logrus.ParseLevel(c.LogLevel) 2779 if err != nil { 2780 return err 2781 } 2782 logrus.SetLevel(lvl) 2783 2784 // Avoid using a job timeout of infinity by setting the default value to 24 hours. 2785 if c.DefaultJobTimeout == nil { 2786 c.DefaultJobTimeout = &metav1.Duration{Duration: DefaultJobTimeout} 2787 } 2788 2789 // Ensure Policy.Include and Policy.Exclude are mutually exclusive. 2790 if len(c.BranchProtection.Include) > 0 && len(c.BranchProtection.Exclude) > 0 { 2791 return fmt.Errorf("Forbidden to set both Policy.Include and Policy.Exclude, Please use either Include or Exclude!") 2792 } 2793 2794 // Avoid using a Moonraker client timeout of infinity (default behavior of 2795 // https://pkg.go.dev/net/http#Client) by setting a default value. 2796 if c.Moonraker.ClientTimeout == nil { 2797 c.Moonraker.ClientTimeout = &metav1.Duration{Duration: DefaultMoonrakerClientTimeout} 2798 } 2799 2800 return nil 2801 } 2802 2803 // parseTideMergeType function parses a tide merge configuration and sets regexps out of every branch name. 2804 func parseTideMergeType(tideMergeTypes map[string]TideOrgMergeType) utilerrors.Aggregate { 2805 isTideMergeTypeValid := func(mm types.PullRequestMergeType) bool { 2806 return mm == types.MergeMerge || mm == types.MergeRebase || mm == types.MergeSquash 2807 } 2808 mergeTypeErrs := make([]error, 0) 2809 for org, orgConfig := range tideMergeTypes { 2810 // Validate orgs 2811 if orgConfig.MergeType != "" && !isTideMergeTypeValid(orgConfig.MergeType) { 2812 mergeTypeErrs = append(mergeTypeErrs, 2813 fmt.Errorf("merge type %q for %s is not a valid type", orgConfig.MergeType, org)) 2814 } 2815 for repo, repoConfig := range orgConfig.Repos { 2816 // Validate repos 2817 if repoConfig.MergeType != "" && !isTideMergeTypeValid(repoConfig.MergeType) { 2818 mergeTypeErrs = append(mergeTypeErrs, 2819 fmt.Errorf("merge type %q for %s/%s is not a valid type", repoConfig.MergeType, org, repo)) 2820 } 2821 for branch, branchConfig := range repoConfig.Branches { 2822 // Validate branches 2823 regexpr, err := regexp.Compile(branch) 2824 if err != nil { 2825 mergeTypeErrs = append(mergeTypeErrs, fmt.Errorf("regex %q is not valid", branch)) 2826 } else { 2827 branchConfig.Regexpr = regexpr 2828 } 2829 if !isTideMergeTypeValid(branchConfig.MergeType) { 2830 mergeTypeErrs = append(mergeTypeErrs, 2831 fmt.Errorf("merge type %q for %s/%s@%s is not a valid type", 2832 branchConfig.MergeType, org, repo, branch)) 2833 } 2834 repoConfig.Branches[branch] = branchConfig 2835 } 2836 } 2837 } 2838 return utilerrors.NewAggregate(mergeTypeErrs) 2839 } 2840 2841 func validateLabels(labels map[string]string) error { 2842 for label, value := range labels { 2843 for _, prowLabel := range decorate.Labels() { 2844 if label == prowLabel { 2845 return fmt.Errorf("label %s is reserved for decoration", label) 2846 } 2847 } 2848 if errs := validation.IsQualifiedName(label); len(errs) != 0 { 2849 return fmt.Errorf("invalid label %s: %v", label, errs) 2850 } 2851 if errs := validation.IsValidLabelValue(labels[label]); len(errs) != 0 { 2852 return fmt.Errorf("label %s has invalid value %s: %v", label, value, errs) 2853 } 2854 } 2855 return nil 2856 } 2857 2858 func validateAnnotation(a map[string]string) error { 2859 for key := range a { 2860 if errs := validation.IsQualifiedName(key); len(errs) > 0 { 2861 return fmt.Errorf("invalid annotation key %q: %v", key, errs) 2862 } 2863 } 2864 return nil 2865 } 2866 2867 func validateJobQueueName(name string, validNames sets.Set[string]) error { 2868 if name != "" && !validNames.Has(name) { 2869 return fmt.Errorf("invalid job queue name %s", name) 2870 } 2871 return nil 2872 } 2873 2874 func validateAgent(v JobBase, podNamespace string) error { 2875 k := string(prowapi.KubernetesAgent) 2876 j := string(prowapi.JenkinsAgent) 2877 p := string(prowapi.TektonAgent) 2878 agents := sets.New[string](k, j, p) 2879 agent := v.Agent 2880 switch { 2881 case !agents.Has(agent): 2882 logrus.Warningf("agent %s is unknown and cannot be validated: use at your own risk", agent) 2883 return nil 2884 case v.Spec != nil && agent != k: 2885 return fmt.Errorf("job specs require agent: %s (found %q)", k, agent) 2886 case agent == k && v.Spec == nil: 2887 return errors.New("kubernetes jobs require a spec") 2888 case v.HasPipelineRunSpec() && agent != p: 2889 return fmt.Errorf("job pipeline_run_spec require agent: %s (found %q)", p, agent) 2890 case agent == p && !v.HasPipelineRunSpec(): 2891 return fmt.Errorf("agent: %s jobs require a pipeline_run_spec", p) 2892 case v.DecorationConfig != nil && agent != k: 2893 // TODO(fejta): only source decoration supported... 2894 return fmt.Errorf("decoration requires agent: %s (found %q)", k, agent) 2895 case v.ErrorOnEviction && agent != k: 2896 return fmt.Errorf("error_on_eviction only applies to agent: %s (found %q)", k, agent) 2897 case v.Namespace == nil || *v.Namespace == "": 2898 return fmt.Errorf("failed to default namespace") 2899 case *v.Namespace != podNamespace && agent != p: 2900 // TODO(fejta): update plank to allow this (depends on client change). 2901 return fmt.Errorf("namespace customization requires agent: %s (found %q)", p, agent) 2902 } 2903 return nil 2904 } 2905 2906 func validateDecoration(container v1.Container, config *prowapi.DecorationConfig) error { 2907 if config == nil { 2908 return nil 2909 } 2910 2911 if err := config.Validate(); err != nil { 2912 return fmt.Errorf("invalid decoration config: %w", err) 2913 } 2914 var args []string 2915 args = append(append(args, container.Command...), container.Args...) 2916 if len(args) == 0 || args[0] == "" { 2917 return errors.New("decorated job containers must specify command and/or args") 2918 } 2919 return nil 2920 } 2921 2922 func resolvePresets(name string, labels map[string]string, spec *v1.PodSpec, presets []Preset) error { 2923 for _, preset := range presets { 2924 if spec != nil { 2925 if err := mergePreset(preset, labels, spec.Containers, &spec.Volumes); err != nil { 2926 return fmt.Errorf("job %s failed to merge presets for podspec: %w", name, err) 2927 } 2928 } 2929 } 2930 2931 return nil 2932 } 2933 2934 var ReProwExtraRef = regexp.MustCompile(`PROW_EXTRA_GIT_REF_(\d+)`) 2935 2936 func ValidatePipelineRunSpec(jobType prowapi.ProwJobType, extraRefs []prowapi.Refs, spec *pipelinev1beta1.PipelineRunSpec) error { 2937 if spec == nil { 2938 return nil 2939 } 2940 // Validate that that the refs match what is requested by the job. 2941 // The implicit git ref is optional to use, but any extra refs specified must 2942 // be used or removed. (Specifying an unused extra ref must always be 2943 // unintentional so we want to warn the user.) 2944 extraIndexes := sets.NewInt() 2945 if spec.PipelineSpec != nil { 2946 for _, task := range spec.PipelineSpec.Tasks { 2947 // Validate that periodic jobs don't request an implicit git ref. 2948 if jobType == prowapi.PeriodicJob && task.TaskRef.Name == ProwImplicitGitResource { 2949 return fmt.Errorf("periodic jobs do not have an implicit git ref to replace %s", ProwImplicitGitResource) 2950 } 2951 2952 match := ReProwExtraRef.FindStringSubmatch(task.TaskRef.Name) 2953 if len(match) != 2 { 2954 continue 2955 } 2956 if len(match[1]) > 1 && match[1][0] == '0' { 2957 return fmt.Errorf("task %q: leading zeros are not allowed in PROW_EXTRA_GIT_REF_* indexes", task.Name) 2958 } 2959 i, _ := strconv.Atoi(match[1]) // This can't error based on the regexp. 2960 extraIndexes.Insert(i) 2961 } 2962 } 2963 2964 for i := range extraRefs { 2965 if !extraIndexes.Has(i) { 2966 return fmt.Errorf("extra_refs[%d] is not used; some task must reference PROW_EXTRA_GIT_REF_%d", i, i) 2967 } 2968 } 2969 if len(extraRefs) != extraIndexes.Len() { 2970 strs := make([]string, 0, extraIndexes.Len()) 2971 for i := range extraIndexes { 2972 strs = append(strs, strconv.Itoa(i)) 2973 } 2974 return fmt.Errorf("%d extra_refs are specified, but the following PROW_EXTRA_GIT_REF_* indexes are used: %s", len(extraRefs), strings.Join(strs, ", ")) 2975 } 2976 return nil 2977 } 2978 2979 func validatePodSpec(jobType prowapi.ProwJobType, spec *v1.PodSpec, decorationConfig *prowapi.DecorationConfig) error { 2980 if spec == nil { 2981 return nil 2982 } 2983 2984 var errs []error 2985 2986 if len(spec.InitContainers) != 0 { 2987 errs = append(errs, errors.New("pod spec may not use init containers")) 2988 } 2989 2990 if n := len(spec.Containers); n < 1 { 2991 // We must return here to not cause an out of bounds panic in the remaining validation. 2992 return utilerrors.NewAggregate(append(errs, fmt.Errorf("pod spec must specify at least 1 container, found: %d", n))) 2993 } 2994 2995 if n := len(spec.Containers); n > 1 && decorationConfig == nil { 2996 return utilerrors.NewAggregate(append(errs, fmt.Errorf("pod utility decoration must be enabled to use multiple containers: %d", n))) 2997 } 2998 2999 if len(spec.Containers) > 1 { 3000 containerNames := sets.Set[string]{} 3001 for _, container := range spec.Containers { 3002 if container.Name == "" { 3003 errs = append(errs, fmt.Errorf("container does not have name. all containers must have names when defining multiple containers")) 3004 } 3005 3006 if containerNames.Has(container.Name) { 3007 errs = append(errs, fmt.Errorf("container named %q is defined more than once", container.Name)) 3008 } 3009 containerNames.Insert(container.Name) 3010 3011 if decorate.PodUtilsContainerNames().Has(container.Name) { 3012 errs = append(errs, fmt.Errorf("container name %s is a reserved for decoration. please specify a different container name that does not conflict with pod utility container names", container.Name)) 3013 } 3014 } 3015 } 3016 3017 for i := range spec.Containers { 3018 envNames := sets.Set[string]{} 3019 for _, env := range spec.Containers[i].Env { 3020 if envNames.Has(env.Name) { 3021 errs = append(errs, fmt.Errorf("env var named %q is defined more than once", env.Name)) 3022 } 3023 envNames.Insert(env.Name) 3024 3025 for _, prowEnv := range downwardapi.EnvForType(jobType) { 3026 if env.Name == prowEnv { 3027 // TODO(fejta): consider allowing this. 3028 errs = append(errs, fmt.Errorf("env %s is reserved", env.Name)) 3029 } 3030 } 3031 } 3032 } 3033 3034 volumeNames := sets.Set[string]{} 3035 decoratedVolumeNames := decorate.VolumeMounts(decorationConfig) 3036 for _, volume := range spec.Volumes { 3037 if volumeNames.Has(volume.Name) { 3038 errs = append(errs, fmt.Errorf("volume named %q is defined more than once", volume.Name)) 3039 } 3040 volumeNames.Insert(volume.Name) 3041 3042 if decoratedVolumeNames.Has(volume.Name) { 3043 errs = append(errs, fmt.Errorf("volume %s is a reserved for decoration", volume.Name)) 3044 } 3045 } 3046 3047 for i := range spec.Containers { 3048 for _, mount := range spec.Containers[i].VolumeMounts { 3049 if !volumeNames.Has(mount.Name) && !decoratedVolumeNames.Has(mount.Name) { 3050 errs = append(errs, fmt.Errorf("volumeMount named %q is undefined", mount.Name)) 3051 } 3052 if decorate.VolumeMountsOnTestContainer().Has(mount.Name) { 3053 errs = append(errs, fmt.Errorf("volumeMount name %s is reserved for decoration", mount.Name)) 3054 } 3055 if decorate.VolumeMountPathsOnTestContainer().Has(mount.MountPath) { 3056 errs = append(errs, fmt.Errorf("mount %s at %s conflicts with decoration mount", mount.Name, mount.MountPath)) 3057 } 3058 } 3059 } 3060 3061 return utilerrors.NewAggregate(errs) 3062 } 3063 3064 func validateAlwaysRun(job Postsubmit) error { 3065 if job.AlwaysRun != nil && *job.AlwaysRun { 3066 if job.RunIfChanged != "" { 3067 return fmt.Errorf("job %s is set to always run but also declares run_if_changed targets, which are mutually exclusive", job.Name) 3068 } 3069 if job.SkipIfOnlyChanged != "" { 3070 return fmt.Errorf("job %s is set to always run but also declares skip_if_only_changed targets, which are mutually exclusive", job.Name) 3071 } 3072 } 3073 if job.RunIfChanged != "" && job.SkipIfOnlyChanged != "" { 3074 return fmt.Errorf("job %s declares run_if_changed and skip_if_only_changed, which are mutually exclusive", job.Name) 3075 } 3076 return nil 3077 } 3078 3079 func validateTriggering(job Presubmit) error { 3080 if job.AlwaysRun { 3081 if job.RunIfChanged != "" { 3082 return fmt.Errorf("job %s is set to always run but also declares run_if_changed targets, which are mutually exclusive", job.Name) 3083 } 3084 if job.SkipIfOnlyChanged != "" { 3085 return fmt.Errorf("job %s is set to always run but also declares skip_if_only_changed targets, which are mutually exclusive", job.Name) 3086 } 3087 } 3088 if job.RunIfChanged != "" && job.SkipIfOnlyChanged != "" { 3089 return fmt.Errorf("job %s declares run_if_changed and skip_if_only_changed, which are mutually exclusive", job.Name) 3090 } 3091 3092 if (job.Trigger != "" && job.RerunCommand == "") || (job.Trigger == "" && job.RerunCommand != "") { 3093 return fmt.Errorf("either both of job.Trigger and job.RerunCommand must be set, wasnt the case for job %q", job.Name) 3094 } 3095 3096 return nil 3097 } 3098 3099 func validateReporting(j JobBase, r Reporter) error { 3100 if !r.SkipReport && r.Context == "" { 3101 return errors.New("job is set to report but has no context configured") 3102 } 3103 if !r.SkipReport { 3104 return nil 3105 } 3106 for label, value := range j.Labels { 3107 if label == kube.GerritReportLabel && value != "" { 3108 return fmt.Errorf("gerrit report label %s set to non-empty string but job is configured to skip reporting.", label) 3109 } 3110 } 3111 return nil 3112 } 3113 3114 // ValidateController validates the provided controller config. 3115 func ValidateController(c *Controller, templateFuncMaps ...template.FuncMap) error { 3116 tmpl := template.New("JobURL") 3117 for _, fm := range templateFuncMaps { 3118 _ = tmpl.Funcs(fm) 3119 } 3120 urlTmpl, err := tmpl.Parse(c.JobURLTemplateString) 3121 if err != nil { 3122 return fmt.Errorf("parsing template: %w", err) 3123 } 3124 c.JobURLTemplate = urlTmpl 3125 3126 if err := defaultAndValidateReportTemplate(c); err != nil { 3127 return err 3128 } 3129 if c.MaxConcurrency < 0 { 3130 return fmt.Errorf("controller has invalid max_concurrency (%d), it needs to be a non-negative number", c.MaxConcurrency) 3131 } 3132 if c.MaxGoroutines == 0 { 3133 c.MaxGoroutines = 20 3134 } 3135 if c.MaxGoroutines <= 0 { 3136 return fmt.Errorf("controller has invalid max_goroutines (%d), it needs to be a positive number", c.MaxGoroutines) 3137 } 3138 return nil 3139 } 3140 3141 func defaultAndValidateReportTemplate(c *Controller) error { 3142 if c.ReportTemplateString == "" && c.ReportTemplateStrings == nil { 3143 return nil 3144 } 3145 3146 if c.ReportTemplateString != "" { 3147 if len(c.ReportTemplateStrings) > 0 { 3148 return errors.New("both report_template and report_templates are specified") 3149 } 3150 3151 logrus.Warning("report_template is deprecated and it will be removed on September 2020. It will be replaced with report_templates['*']") 3152 c.ReportTemplateStrings = make(map[string]string) 3153 c.ReportTemplateStrings["*"] = c.ReportTemplateString 3154 } 3155 3156 c.ReportTemplates = make(map[string]*template.Template) 3157 for orgRepo, value := range c.ReportTemplateStrings { 3158 reportTmpl, err := template.New("Report").Parse(value) 3159 if err != nil { 3160 return fmt.Errorf("error while parsing template for %s: %w", orgRepo, err) 3161 } 3162 c.ReportTemplates[orgRepo] = reportTmpl 3163 } 3164 3165 return nil 3166 } 3167 3168 // DefaultTriggerFor returns the default regexp string used to match comments 3169 // that should trigger the job with this name. 3170 func DefaultTriggerFor(name string) string { 3171 return fmt.Sprintf(`(?m)^/test( | .* )%s,?($|\s.*)`, name) 3172 } 3173 3174 // DefaultRerunCommandFor returns the default rerun command for the job with 3175 // this name. 3176 func DefaultRerunCommandFor(name string) string { 3177 return fmt.Sprintf("/test %s", name) 3178 } 3179 3180 // defaultJobBase configures common parameters, currently Agent and Namespace. 3181 func (c *ProwConfig) defaultJobBase(base *JobBase) { 3182 if base.Agent == "" { // Use kubernetes by default. 3183 base.Agent = string(prowapi.KubernetesAgent) 3184 } 3185 if base.Namespace == nil || *base.Namespace == "" { 3186 s := c.PodNamespace 3187 base.Namespace = &s 3188 } 3189 if base.Cluster == "" { 3190 base.Cluster = kube.DefaultClusterAlias 3191 } 3192 } 3193 3194 func (c *ProwConfig) defaultPresubmitFields(js []Presubmit) { 3195 for i := range js { 3196 c.defaultJobBase(&js[i].JobBase) 3197 if js[i].Context == "" { 3198 js[i].Context = js[i].Name 3199 } 3200 // Default the values of Trigger and RerunCommand if both fields are 3201 // specified. Otherwise let validation fail as both or neither should have 3202 // been specified. 3203 if js[i].Trigger == "" && js[i].RerunCommand == "" { 3204 js[i].Trigger = DefaultTriggerFor(js[i].Name) 3205 js[i].RerunCommand = DefaultRerunCommandFor(js[i].Name) 3206 } 3207 } 3208 } 3209 3210 func (c *ProwConfig) defaultPostsubmitFields(js []Postsubmit) { 3211 for i := range js { 3212 c.defaultJobBase(&js[i].JobBase) 3213 if js[i].Context == "" { 3214 js[i].Context = js[i].Name 3215 } 3216 } 3217 } 3218 3219 func (c *ProwConfig) defaultPeriodicFields(js *Periodic) { 3220 c.defaultJobBase(&js.JobBase) 3221 } 3222 3223 // SetPresubmitRegexes compiles and validates all the regular expressions for 3224 // the provided presubmits. 3225 func SetPresubmitRegexes(js []Presubmit) error { 3226 for i, j := range js { 3227 if re, err := regexp.Compile(j.Trigger); err == nil { 3228 js[i].re = &CopyableRegexp{re} 3229 } else { 3230 return fmt.Errorf("could not compile trigger regex for %s: %w", j.Name, err) 3231 } 3232 if !js[i].re.MatchString(j.RerunCommand) { 3233 return fmt.Errorf("for job %s, rerun command \"%s\" does not match trigger \"%s\"", j.Name, j.RerunCommand, j.Trigger) 3234 } 3235 b, err := setBrancherRegexes(j.Brancher) 3236 if err != nil { 3237 return fmt.Errorf("could not set branch regexes for %s: %w", j.Name, err) 3238 } 3239 js[i].Brancher = b 3240 3241 c, err := setChangeRegexes(j.RegexpChangeMatcher) 3242 if err != nil { 3243 return fmt.Errorf("could not set change regexes for %s: %w", j.Name, err) 3244 } 3245 js[i].RegexpChangeMatcher = c 3246 } 3247 return nil 3248 } 3249 3250 // setBrancherRegexes compiles and validates all the regular expressions for 3251 // the provided branch specifiers. 3252 func setBrancherRegexes(br Brancher) (Brancher, error) { 3253 if len(br.Branches) > 0 { 3254 if re, err := regexp.Compile(strings.Join(br.Branches, `|`)); err == nil { 3255 br.re = &CopyableRegexp{re} 3256 } else { 3257 return br, fmt.Errorf("could not compile positive branch regex: %w", err) 3258 } 3259 } 3260 if len(br.SkipBranches) > 0 { 3261 if re, err := regexp.Compile(strings.Join(br.SkipBranches, `|`)); err == nil { 3262 br.reSkip = &CopyableRegexp{re} 3263 } else { 3264 return br, fmt.Errorf("could not compile negative branch regex: %w", err) 3265 } 3266 } 3267 return br, nil 3268 } 3269 3270 func setChangeRegexes(cm RegexpChangeMatcher) (RegexpChangeMatcher, error) { 3271 var reString, propName string 3272 if reString = cm.RunIfChanged; reString != "" { 3273 propName = "run_if_changed" 3274 } else if reString = cm.SkipIfOnlyChanged; reString != "" { 3275 propName = "skip_if_only_changed" 3276 } 3277 if reString != "" { 3278 re, err := regexp.Compile(reString) 3279 if err != nil { 3280 return cm, fmt.Errorf("could not compile %s regex: %w", propName, err) 3281 } 3282 cm.reChanges = &CopyableRegexp{re} 3283 } 3284 return cm, nil 3285 } 3286 3287 // SetPostsubmitRegexes compiles and validates all the regular expressions for 3288 // the provided postsubmits. 3289 func SetPostsubmitRegexes(ps []Postsubmit) error { 3290 for i, j := range ps { 3291 b, err := setBrancherRegexes(j.Brancher) 3292 if err != nil { 3293 return fmt.Errorf("could not set branch regexes for %s: %w", j.Name, err) 3294 } 3295 ps[i].Brancher = b 3296 c, err := setChangeRegexes(j.RegexpChangeMatcher) 3297 if err != nil { 3298 return fmt.Errorf("could not set change regexes for %s: %w", j.Name, err) 3299 } 3300 ps[i].RegexpChangeMatcher = c 3301 } 3302 return nil 3303 } 3304 3305 // OrgRepo supercedes org/repo string handling. 3306 type OrgRepo struct { 3307 Org string 3308 Repo string 3309 } 3310 3311 func (repo OrgRepo) String() string { 3312 return fmt.Sprintf("%s/%s", repo.Org, repo.Repo) 3313 } 3314 3315 // NewOrgRepo creates a OrgRepo from org/repo string. 3316 func NewOrgRepo(orgRepo string) *OrgRepo { 3317 org, repo, err := SplitRepoName(orgRepo) 3318 // SplitRepoName errors when Unable to split to Org/Repo 3319 // If we error, that means there is no slash, so org == OrgRepo. 3320 if err != nil { 3321 return &OrgRepo{Org: orgRepo} 3322 } 3323 return &OrgRepo{Org: org, Repo: repo} 3324 } 3325 3326 // OrgReposToStrings converts a list of OrgRepo to its String() equivalent. 3327 func OrgReposToStrings(vs []OrgRepo) []string { 3328 vsm := make([]string, len(vs)) 3329 for i, v := range vs { 3330 vsm[i] = v.String() 3331 } 3332 return vsm 3333 } 3334 3335 // StringsToOrgRepos converts a list of org/repo strings to its OrgRepo equivalent. 3336 func StringsToOrgRepos(vs []string) []OrgRepo { 3337 vsm := make([]OrgRepo, len(vs)) 3338 for i, v := range vs { 3339 vsm[i] = *NewOrgRepo(v) 3340 } 3341 return vsm 3342 } 3343 3344 // mergeFrom merges two prow configs. It must be called _before_ doing any 3345 // defaulting. 3346 // If you extend this, please also extend HasConfigFor accordingly. 3347 func (pc *ProwConfig) mergeFrom(additional *ProwConfig) error { 3348 emptyReference := &ProwConfig{ 3349 BranchProtection: additional.BranchProtection, 3350 Tide: Tide{TideGitHubConfig: TideGitHubConfig{MergeType: additional.Tide.MergeType, Queries: additional.Tide.Queries}}, 3351 SlackReporterConfigs: additional.SlackReporterConfigs, 3352 } 3353 3354 var errs []error 3355 if diff := cmp.Diff(additional, emptyReference, DefaultDiffOpts...); diff != "" { 3356 errs = append(errs, fmt.Errorf("only 'branch-protection', 'slack_reporter_configs', 'tide.merge_method' and 'tide.queries' may be set via additional config, all other fields have no merging logic yet. Diff: %s", diff)) 3357 } 3358 if err := pc.BranchProtection.merge(&additional.BranchProtection); err != nil { 3359 errs = append(errs, fmt.Errorf("failed to merge branch protection config: %w", err)) 3360 } 3361 if err := pc.Tide.mergeFrom(&additional.Tide); err != nil { 3362 errs = append(errs, fmt.Errorf("failed to merge tide config: %w", err)) 3363 } 3364 3365 if pc.SlackReporterConfigs == nil { 3366 pc.SlackReporterConfigs = additional.SlackReporterConfigs 3367 } else if err := pc.SlackReporterConfigs.mergeFrom(&additional.SlackReporterConfigs); err != nil { 3368 errs = append(errs, fmt.Errorf("failed to merge slack-reporter config: %w", err)) 3369 } 3370 3371 return utilerrors.NewAggregate(errs) 3372 } 3373 3374 // ContextDescriptionWithBaseSha is used by the GitHub reporting to store the baseSHA of a context 3375 // in the status context description. Tide will read this if present using the BaseSHAFromContextDescription 3376 // func. Storing the baseSHA in the status context allows us to store job results pretty much forever, 3377 // instead of having to rerun everything after sinker cleaned up the ProwJobs. 3378 func ContextDescriptionWithBaseSha(humanReadable, baseSHA string) string { 3379 var suffix string 3380 if baseSHA != "" { 3381 suffix = contextDescriptionBaseSHADelimiter + baseSHA 3382 // Leftpad the baseSHA suffix so its shown at a stable position on the right side in the GitHub UI. 3383 // The GitHub UI will also trim it on the right side and replace some part of it with '...'. The 3384 // API always returns the full string. 3385 if len(humanReadable+suffix) < contextDescriptionMaxLen { 3386 for i := 0; i < contextDescriptionMaxLen-len(humanReadable+suffix); i++ { 3387 // This looks like a standard space but is U+2001, because GitHub seems to deduplicate normal 3388 // spaces in their frontend. 3389 suffix = " " + suffix 3390 } 3391 } 3392 } 3393 return truncate(humanReadable, contextDescriptionMaxLen-len(suffix)) + suffix 3394 } 3395 3396 // BaseSHAFromContextDescription is used by Tide to decode a baseSHA from a github status context 3397 // description created via ContextDescriptionWithBaseSha. It will return an empty string if no 3398 // valid sha was found. 3399 func BaseSHAFromContextDescription(description string) string { 3400 split := strings.Split(description, contextDescriptionBaseSHADelimiter) 3401 // SHA1s are always 40 digits long. 3402 if len(split) != 2 || len(split[1]) != 40 { 3403 // Fallback to deprecated one if available. 3404 if split = strings.Split(description, contextDescriptionBaseSHADelimiterDeprecated); len(split) == 2 && len(split[1]) == 40 { 3405 return split[1] 3406 } 3407 return "" 3408 } 3409 return split[1] 3410 } 3411 3412 const ( 3413 contextDescriptionBaseSHADelimiter = " BaseSHA:" 3414 contextDescriptionBaseSHADelimiterDeprecated = " Basesha:" 3415 contextDescriptionMaxLen = 140 // https://developer.github.com/v3/repos/deployments/#parameters-2 3416 elide = " ... " 3417 ) 3418 3419 // truncate converts "really long messages" into "really ... messages". 3420 func truncate(in string, maxLen int) string { 3421 half := (maxLen - len(elide)) / 2 3422 if len(in) <= maxLen { 3423 return in 3424 } 3425 return in[:half] + elide + in[len(in)-half:] 3426 } 3427 3428 func (pc *ProwConfig) HasConfigFor() (global bool, orgs sets.Set[string], repos sets.Set[string]) { 3429 global = pc.hasGlobalConfig() 3430 orgs = sets.Set[string]{} 3431 repos = sets.Set[string]{} 3432 3433 for org, orgConfig := range pc.BranchProtection.Orgs { 3434 if isPolicySet(orgConfig.Policy) { 3435 orgs.Insert(org) 3436 } 3437 for repo := range orgConfig.Repos { 3438 repos.Insert(org + "/" + repo) 3439 } 3440 } 3441 3442 for orgOrRepo := range pc.Tide.MergeType { 3443 if strings.Contains(orgOrRepo, "/") { 3444 repos.Insert(orgOrRepo) 3445 } else { 3446 orgs.Insert(orgOrRepo) 3447 } 3448 } 3449 3450 for _, query := range pc.Tide.Queries { 3451 orgs.Insert(query.Orgs...) 3452 repos.Insert(query.Repos...) 3453 } 3454 3455 for orgOrRepo := range pc.SlackReporterConfigs { 3456 if orgOrRepo == "*" { 3457 // configuration for "*" is globally available 3458 continue 3459 } 3460 3461 if strings.Contains(orgOrRepo, "/") { 3462 repos.Insert(orgOrRepo) 3463 } else { 3464 orgs.Insert(orgOrRepo) 3465 } 3466 } 3467 3468 return global, orgs, repos 3469 } 3470 3471 func (pc *ProwConfig) hasGlobalConfig() bool { 3472 if pc.BranchProtection.ProtectTested != nil || pc.BranchProtection.AllowDisabledPolicies != nil || pc.BranchProtection.AllowDisabledJobPolicies != nil || pc.BranchProtection.ProtectReposWithOptionalJobs != nil || isPolicySet(pc.BranchProtection.Policy) || pc.SlackReporterConfigs.HasGlobalConfig() { 3473 return true 3474 } 3475 emptyReference := &ProwConfig{ 3476 BranchProtection: pc.BranchProtection, 3477 Tide: Tide{TideGitHubConfig: TideGitHubConfig{MergeType: pc.Tide.MergeType, Queries: pc.Tide.Queries}}, 3478 SlackReporterConfigs: pc.SlackReporterConfigs, 3479 } 3480 return cmp.Diff(pc, emptyReference, DefaultDiffOpts...) != "" 3481 } 3482 3483 // tideQueryMap is a map[tideQueryConfig]*tideQueryTarget. Because slices are not comparable, they 3484 // or structs containing them are not allowed as map keys. We sidestep this by using a json serialization 3485 // of the object as key instead. This is pretty inefficient but also something we only do once during 3486 // load. 3487 type tideQueryMap map[string]*tideQueryTarget 3488 3489 func (tm tideQueryMap) queries() (TideQueries, error) { 3490 var result TideQueries 3491 for k, v := range tm { 3492 var queryConfig tideQueryConfig 3493 if err := json.Unmarshal([]byte(k), &queryConfig); err != nil { 3494 return nil, fmt.Errorf("failed to unmarshal %q: %w", k, err) 3495 } 3496 result = append(result, TideQuery{ 3497 Orgs: v.Orgs, 3498 Repos: v.Repos, 3499 ExcludedRepos: v.ExcludedRepos, 3500 Author: queryConfig.Author, 3501 ExcludedBranches: queryConfig.ExcludedBranches, 3502 IncludedBranches: queryConfig.IncludedBranches, 3503 Labels: queryConfig.Labels, 3504 MissingLabels: queryConfig.MissingLabels, 3505 Milestone: queryConfig.Milestone, 3506 ReviewApprovedRequired: queryConfig.ReviewApprovedRequired, 3507 }) 3508 3509 } 3510 3511 // Sort the queries here to make sure that the de-duplication results 3512 // in a deterministic order. 3513 var errs []error 3514 sort.SliceStable(result, func(i, j int) bool { 3515 iSerialized, err := json.Marshal(result[i]) 3516 if err != nil { 3517 errs = append(errs, fmt.Errorf("failed to marshal %+v: %w", result[i], err)) 3518 } 3519 jSerialized, err := json.Marshal(result[j]) 3520 if err != nil { 3521 errs = append(errs, fmt.Errorf("failed to marshal %+v: %w", result[j], err)) 3522 } 3523 return string(iSerialized) < string(jSerialized) 3524 }) 3525 3526 return result, utilerrors.NewAggregate(errs) 3527 } 3528 3529 // sortStringSlice is a tiny wrapper that returns 3530 // the slice after sorting. 3531 func sortStringSlice(s []string) []string { 3532 sort.Strings(s) 3533 return s 3534 } 3535 3536 func (c *Config) deduplicateTideQueries(queries TideQueries) (TideQueries, error) { 3537 m := tideQueryMap{} 3538 for _, query := range queries { 3539 key := tideQueryConfig{ 3540 Author: query.Author, 3541 ExcludedBranches: sortStringSlice(query.ExcludedBranches), 3542 IncludedBranches: sortStringSlice(query.IncludedBranches), 3543 Labels: sortStringSlice(query.Labels), 3544 MissingLabels: sortStringSlice(query.MissingLabels), 3545 Milestone: query.Milestone, 3546 ReviewApprovedRequired: query.ReviewApprovedRequired, 3547 TenantIDs: query.TenantIDs(*c), 3548 } 3549 keyRaw, err := json.Marshal(key) 3550 if err != nil { 3551 return nil, fmt.Errorf("failed to marshal %+v: %w", key, err) 3552 } 3553 val, ok := m[string(keyRaw)] 3554 if !ok { 3555 val = &tideQueryTarget{} 3556 m[string(keyRaw)] = val 3557 } 3558 val.Orgs = append(val.Orgs, query.Orgs...) 3559 val.Repos = append(val.Repos, query.Repos...) 3560 val.ExcludedRepos = append(val.ExcludedRepos, query.ExcludedRepos...) 3561 } 3562 3563 return m.queries() 3564 }