go.chromium.org/luci@v0.0.0-20240309015107-7cdc2e660f33/buildbucket/appengine/internal/config/project.go (about) 1 // Copyright 2021 The LUCI Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package config 16 17 import ( 18 "context" 19 "crypto/sha1" 20 "crypto/sha256" 21 "encoding/hex" 22 "encoding/json" 23 "fmt" 24 "regexp" 25 "sort" 26 "strconv" 27 "strings" 28 "time" 29 30 "google.golang.org/protobuf/encoding/prototext" 31 "google.golang.org/protobuf/proto" 32 "google.golang.org/protobuf/types/known/structpb" 33 34 "go.chromium.org/luci/buildbucket/protoutil" 35 "go.chromium.org/luci/common/data/stringset" 36 "go.chromium.org/luci/common/data/strpair" 37 "go.chromium.org/luci/common/errors" 38 "go.chromium.org/luci/common/logging" 39 "go.chromium.org/luci/common/sync/parallel" 40 "go.chromium.org/luci/config" 41 "go.chromium.org/luci/config/cfgclient" 42 "go.chromium.org/luci/config/validation" 43 "go.chromium.org/luci/gae/service/datastore" 44 rdbpbutil "go.chromium.org/luci/resultdb/pbutil" 45 46 "go.chromium.org/luci/buildbucket/appengine/internal/clients" 47 "go.chromium.org/luci/buildbucket/appengine/model" 48 pb "go.chromium.org/luci/buildbucket/proto" 49 ) 50 51 const CurrentBucketSchemaVersion = 14 52 53 // maximumExpiration is the maximum allowed expiration_secs for a builder 54 // dimensions field. 55 const maximumExpiration = 21 * (24 * time.Hour) 56 57 // maxEntityCount is the Datastore maximum number of entities per transaction. 58 const maxEntityCount = 500 59 60 // maxBatchSize is the maximum allowed size in a batch Datastore operations. The 61 // Datastore actual maximum API request size is 10MB. We set it to 9MB to give 62 // some buffer room. 63 var maxBatchSize = 9 * 1000 * 1000 64 65 var ( 66 authGroupNameRegex = regexp.MustCompile(`^([a-z\-]+/)?[0-9a-z_\-\.@]{1,100}$`) 67 68 bucketRegex = regexp.MustCompile(`^[a-z0-9\-_.]{1,100}$`) 69 builderRegex = regexp.MustCompile(`^[a-zA-Z0-9\-_.\(\) ]{1,128}$`) 70 71 serviceAccountRegex = regexp.MustCompile(`^[0-9a-zA-Z_\-\.\+\%]+@[0-9a-zA-Z_\-\.]+$`) 72 73 dimensionKeyRegex = regexp.MustCompile(`^[a-zA-Z\_\-]+$`) 74 75 // cacheNameRegex is copied from 76 // https://chromium.googlesource.com/infra/luci/luci-py/+/f60b298f9057f19ddd7ffe26ec4c81cf8a9fa594/appengine/swarming/server/task_request.py#129 77 // Keep it synchronized. 78 cacheNameRegex = regexp.MustCompile(`^[a-z0-9_]+$`) 79 // cacheNameMaxLength cannot be added into the above cacheNameRegex as Golang 80 // regex expression can only specify a maximum repetition count below 1000. 81 cacheNameMaxLength = 4096 82 83 // DefExecutionTimeout is the default value for pb.Build.ExecutionTimeout. 84 // See setTimeouts. 85 DefExecutionTimeout = 3 * time.Hour 86 87 // DefSchedulingTimeout is the default value for pb.Build.SchedulingTimeout. 88 // See setTimeouts. 89 DefSchedulingTimeout = 6 * time.Hour 90 ) 91 92 // changeLog is a temporary struct to track all changes in UpdateProjectCfg. 93 type changeLog struct { 94 item string 95 action string 96 } 97 98 // UpdateProjectCfg fetches all projects' Buildbucket configs from luci-config 99 // and update into Datastore. 100 func UpdateProjectCfg(ctx context.Context) error { 101 client := cfgclient.Client(ctx) 102 // Cannot fetch all projects configs at once because luci-config still uses 103 // GAEv1 in Python2 framework, which has a response size limit on ~35MB. 104 // Have to first fetch all projects config metadata and then fetch the actual 105 // config in parallel. 106 cfgMetas, err := client.GetProjectConfigs(ctx, "${appid}.cfg", true) 107 if err != nil { 108 return errors.Annotate(err, "while fetching project configs' metadata").Err() 109 } 110 cfgs := make([]*config.Config, len(cfgMetas)) 111 err = parallel.WorkPool(min(64, len(cfgMetas)), func(work chan<- func() error) { 112 for i, meta := range cfgMetas { 113 i := i 114 cfgSet := meta.ConfigSet 115 work <- func() error { 116 cfg, err := client.GetConfig(ctx, cfgSet, "${appid}.cfg", false) 117 if err != nil { 118 return errors.Annotate(err, "failed to fetch the project config for %s", string(cfgSet)).Err() 119 } 120 cfgs[i] = cfg 121 return nil 122 } 123 } 124 }) 125 if err != nil { 126 // Just log the error, and continue to update configs for those which don't 127 // have errors. 128 logging.Errorf(ctx, err.Error()) 129 } 130 131 var bucketKeys []*datastore.Key 132 if err := datastore.GetAll(ctx, datastore.NewQuery(model.BucketKind), &bucketKeys); err != nil { 133 return errors.Annotate(err, "failed to fetch all bucket keys").Err() 134 } 135 136 var changes []*changeLog 137 bucketsToDelete := make(map[string]map[string]*datastore.Key) // project -> bucket -> bucket keys 138 for _, bk := range bucketKeys { 139 project := bk.Parent().StringID() 140 if _, ok := bucketsToDelete[project]; !ok { 141 bucketsToDelete[project] = make(map[string]*datastore.Key) 142 } 143 bucketsToDelete[project][bk.StringID()] = bk 144 } 145 146 var projKeys []*datastore.Key 147 if err := datastore.GetAll(ctx, datastore.NewQuery(model.ProjectKind), &projKeys); err != nil { 148 return errors.Annotate(err, "failed to fetch all project keys").Err() 149 } 150 projsToDelete := make(map[string]*datastore.Key) // project -> project keys 151 for _, projKey := range projKeys { 152 projsToDelete[projKey.StringID()] = projKey 153 } 154 var projsToPut []*model.Project 155 156 for i, meta := range cfgMetas { 157 project := meta.ConfigSet.Project() 158 if cfgs[i] == nil || cfgs[i].Content == "" { 159 delete(bucketsToDelete, project) 160 delete(projsToDelete, project) 161 continue 162 } 163 pCfg := &pb.BuildbucketCfg{} 164 if err := prototext.Unmarshal([]byte(cfgs[i].Content), pCfg); err != nil { 165 logging.Errorf(ctx, "config of project %s is broken: %s", project, err) 166 // If a project config is broken, we don't delete the already stored 167 // buckets and projects. 168 delete(bucketsToDelete, project) 169 delete(projsToDelete, project) 170 continue 171 } 172 delete(projsToDelete, project) 173 174 revision := meta.Revision 175 // revision is empty in file-system mode. Use SHA1 of the config as revision. 176 if revision == "" { 177 cntHash := sha1.Sum([]byte(cfgs[i].Content)) 178 revision = "sha1:" + hex.EncodeToString(cntHash[:]) 179 } 180 181 // Check if the shared project-level config has changed. 182 prjChanged, err := isCommonConfigChanged(ctx, pCfg.CommonConfig, project) 183 if err != nil { 184 return err 185 } 186 if prjChanged { 187 projsToPut = append(projsToPut, &model.Project{ 188 ID: project, 189 CommonConfig: pCfg.CommonConfig, 190 }) 191 } 192 193 // shadow bucket id -> bucket ids it shadows 194 shadows := make(map[string][]string) 195 // bucket id -> bucket entity 196 buckets := make(map[string]*model.Bucket) 197 for _, cfgBucket := range pCfg.Buckets { 198 cfgBktName := shortBucketName(cfgBucket.Name) 199 storedBucket := &model.Bucket{ 200 ID: cfgBktName, 201 Parent: model.ProjectKey(ctx, project), 202 } 203 delete(bucketsToDelete[project], cfgBktName) 204 if err := model.GetIgnoreMissing(ctx, storedBucket); err != nil { 205 return err 206 } 207 208 if storedBucket.Schema == CurrentBucketSchemaVersion && storedBucket.Revision == revision { 209 // Keep the stored buckets for now, so that in the case that a newly 210 // added bucket reuses an existing bucket as its shadow bucket, the shadow 211 // bucket gets updated later with the list of buckets it shadows. 212 buckets[cfgBktName] = storedBucket 213 if storedBucket.Proto.GetShadow() != "" { 214 // This bucket is shadowed. 215 shadow := storedBucket.Proto.Shadow 216 shadows[shadow] = append(shadows[shadow], cfgBktName) 217 } 218 continue 219 } 220 221 var builders []*model.Builder 222 bktKey := model.BucketKey(ctx, project, cfgBktName) 223 if err := datastore.GetAll(ctx, datastore.NewQuery(model.BuilderKind).Ancestor(bktKey), &builders); err != nil { 224 return errors.Annotate(err, "failed to fetch builders for %s.%s", project, cfgBktName).Err() 225 } 226 227 // Builders that in the current Datastore. 228 // The map item will be dynamically removed when iterating on builder 229 // configs in order to know which builders no longer exist in the latest configs. 230 bldrMap := make(map[string]*model.Builder) // full builder name -> *model.Builder 231 for _, bldr := range builders { 232 bldrMap[bldr.FullBuilderName()] = bldr 233 } 234 // buildersToPut[i] contains a list of builders to update which is under 235 // the maxEntityCount and maxBatchsize limit. 236 buildersToPut := [][]*model.Builder{{}} 237 currentBatchSize := 0 238 for _, cfgBuilder := range cfgBucket.GetSwarming().GetBuilders() { 239 if !checkPoolDimExists(cfgBuilder) { 240 cfgBuilder.Dimensions = append(cfgBuilder.Dimensions, fmt.Sprintf("pool:luci.%s.%s", project, cfgBktName)) 241 } 242 243 cfgBldrName := fmt.Sprintf("%s.%s.%s", project, cfgBktName, cfgBuilder.Name) 244 cfgBuilderHash, bldrSize, err := computeBuilderHash(cfgBuilder) 245 if err != nil { 246 return errors.Annotate(err, "while computing hash for builder:%s", cfgBldrName).Err() 247 } 248 if bldr, ok := bldrMap[cfgBldrName]; ok { 249 delete(bldrMap, cfgBldrName) 250 if bldr.ConfigHash == cfgBuilderHash { 251 continue 252 } 253 } 254 255 if bldrSize > maxBatchSize { 256 return errors.Reason("builder %s size exceeds %d bytes", cfgBldrName, maxBatchSize).Err() 257 } 258 bldrToPut := &model.Builder{ 259 ID: cfgBuilder.Name, 260 Parent: bktKey, 261 Config: cfgBuilder, 262 ConfigHash: cfgBuilderHash, 263 } 264 currentBatchIdx := len(buildersToPut) - 1 265 if currentBatchSize+bldrSize <= maxBatchSize && len(buildersToPut[currentBatchIdx])+1 <= maxEntityCount { 266 currentBatchSize += bldrSize 267 buildersToPut[currentBatchIdx] = append(buildersToPut[currentBatchIdx], bldrToPut) 268 } else { 269 buildersToPut = append(buildersToPut, []*model.Builder{bldrToPut}) 270 currentBatchSize = bldrSize 271 } 272 } 273 274 // Update the bucket in this for loop iteration, its builders and delete non-existent builders. 275 if len(cfgBucket.Swarming.GetBuilders()) != 0 { 276 // Trim builders. They're stored in separate Builder entities. 277 cfgBucket.Swarming.Builders = []*pb.BuilderConfig{} 278 } 279 bucketToUpdate := &model.Bucket{ 280 ID: cfgBktName, 281 Parent: model.ProjectKey(ctx, project), 282 Schema: CurrentBucketSchemaVersion, 283 Revision: revision, 284 Proto: cfgBucket, 285 } 286 buckets[cfgBktName] = bucketToUpdate 287 288 if cfgBucket.GetShadow() != "" { 289 // This bucket is shadowed. 290 shadow := cfgBucket.Shadow 291 shadows[shadow] = append(shadows[shadow], cfgBktName) 292 } 293 294 var bldrsToDel []*model.Builder 295 for _, bldr := range bldrMap { 296 bldrsToDel = append(bldrsToDel, &model.Builder{ 297 ID: bldr.ID, 298 Parent: bldr.Parent, 299 }) 300 } 301 var err error 302 // check if they can update transactionally. 303 if len(buildersToPut) == 1 && len(buildersToPut[0])+len(bldrsToDel) < maxEntityCount { 304 err = transacUpdate(ctx, bucketToUpdate, buildersToPut[0], bldrsToDel) 305 } else { 306 err = nonTransacUpdate(ctx, bucketToUpdate, buildersToPut, bldrsToDel) 307 } 308 if err != nil { 309 return errors.Annotate(err, "for bucket %s.%s", project, cfgBktName).Err() 310 } 311 312 // TODO(crbug.com/1362157) Delete after the correctness is proved in Prod. 313 changes = append(changes, &changeLog{item: project + "." + cfgBktName, action: "put"}) 314 for _, bldrs := range buildersToPut { 315 for _, bldr := range bldrs { 316 changes = append(changes, &changeLog{item: bldr.FullBuilderName(), action: "put"}) 317 } 318 } 319 for _, bldr := range bldrsToDel { 320 changes = append(changes, &changeLog{item: bldr.FullBuilderName(), action: "delete"}) 321 } 322 } 323 324 // Update shadow buckets. 325 stringSliceEqual := func(a, b []string) bool { 326 if len(a) != len(b) { 327 return false 328 } 329 sort.Strings(a) 330 sort.Strings(b) 331 for i, item := range a { 332 if item != b[i] { 333 return false 334 } 335 } 336 return true 337 } 338 var shadowBucketsToUpdate []*model.Bucket 339 for shadow, shadowed := range shadows { 340 shadowed = stringset.NewFromSlice(shadowed...).ToSlice() // Deduplicate 341 toUpdate, ok := buckets[shadow] 342 if !ok { 343 logging.Infof(ctx, "cannot find config for shadow bucket %s in project %s", shadow, project) 344 continue 345 } 346 sort.Strings(shadowed) 347 if !stringSliceEqual(toUpdate.Shadows, shadowed) { 348 toUpdate.Shadows = shadowed 349 } 350 shadowBucketsToUpdate = append(shadowBucketsToUpdate, toUpdate) 351 } 352 if len(shadowBucketsToUpdate) > 0 { 353 if err := datastore.Put(ctx, shadowBucketsToUpdate); err != nil { 354 return errors.Annotate(err, "for shadow buckets in project %s", project).Err() 355 } 356 } 357 } 358 359 // Delete non-existent buckets (and all associated builders). 360 var toDelete []*datastore.Key 361 for _, bktMap := range bucketsToDelete { 362 for _, bktKey := range bktMap { 363 bldrKeys := []*datastore.Key{} 364 if err := datastore.GetAll(ctx, datastore.NewQuery(model.BuilderKind).Ancestor(bktKey), &bldrKeys); err != nil { 365 return err 366 } 367 toDelete = append(toDelete, bldrKeys...) 368 toDelete = append(toDelete, bktKey) 369 } 370 } 371 // Delete non-existent projects. Their associated buckets/builders has been 372 // put into toDelete in the above code. 373 for _, projKey := range projsToDelete { 374 toDelete = append(toDelete, projKey) 375 } 376 377 // TODO(crbug.com/1362157) Delete after the correctness is proved in Prod. 378 for _, keys := range toDelete { 379 changes = append(changes, &changeLog{item: keys.StringID(), action: "delete"}) 380 } 381 for _, prj := range projsToPut { 382 changes = append(changes, &changeLog{item: prj.ID, action: "put"}) 383 } 384 385 if len(changes) == 0 { 386 logging.Debugf(ctx, "No changes this time.") 387 } else { 388 logging.Debugf(ctx, "Made %d changes:", len(changes)) 389 for _, change := range changes { 390 logging.Debugf(ctx, "%s, Action:%s", change.item, change.action) 391 } 392 } 393 if err := datastore.Put(ctx, projsToPut); err != nil { 394 return err 395 } 396 return datastore.Delete(ctx, toDelete) 397 } 398 399 func isCommonConfigChanged(ctx context.Context, newCfg *pb.BuildbucketCfg_CommonConfig, project string) (bool, error) { 400 storedPrj := &model.Project{ID: project} 401 switch err := datastore.Get(ctx, storedPrj); { 402 case err == datastore.ErrNoSuchEntity: 403 if len(newCfg.GetBuildsNotificationTopics()) != 0 { 404 return true, nil 405 } else { 406 return false, nil 407 } 408 case err != nil: 409 return false, errors.Annotate(err, "error fetching project entity - %s", project).Err() 410 } 411 412 storedTopics := storedPrj.CommonConfig.GetBuildsNotificationTopics() 413 if len(newCfg.GetBuildsNotificationTopics()) != len(storedTopics) { 414 return true, nil 415 } 416 417 storedTopicsSet := stringset.New(len(storedTopics)) 418 for _, t := range storedTopics { 419 storedTopicsSet.Add(t.Name + "|" + t.Compression.String()) 420 } 421 for _, t := range newCfg.GetBuildsNotificationTopics() { 422 if !storedTopicsSet.Has(t.Name + "|" + t.Compression.String()) { 423 return true, nil 424 } 425 } 426 return false, nil 427 } 428 429 // transacUpdate updates the given bucket, its builders or delete its builders transactionally. 430 func transacUpdate(ctx context.Context, bucketToUpdate *model.Bucket, buildersToPut, bldrsToDel []*model.Builder) error { 431 err := datastore.RunInTransaction(ctx, func(ctx context.Context) error { 432 if err := datastore.Put(ctx, bucketToUpdate); err != nil { 433 return errors.Annotate(err, "failed to put bucket").Err() 434 } 435 if err := datastore.Put(ctx, buildersToPut); err != nil { 436 return errors.Annotate(err, "failed to put %d builders", len(buildersToPut)).Err() 437 } 438 if err := datastore.Delete(ctx, bldrsToDel); err != nil { 439 return errors.Annotate(err, "failed to delete %d builders", len(bldrsToDel)).Err() 440 } 441 return nil 442 }, nil) 443 return err 444 } 445 446 // nonTransacUpdate updates the given bucket, its builders or delete its builders non-transactionally. 447 func nonTransacUpdate(ctx context.Context, bucketToUpdate *model.Bucket, buildersToPut [][]*model.Builder, bldrsToDel []*model.Builder) error { 448 // delete builders in bldrsToDel 449 for i := 0; i < (len(bldrsToDel)+maxBatchSize-1)/maxBatchSize; i++ { 450 startIdx := i * maxBatchSize 451 endIdx := startIdx + maxBatchSize 452 if endIdx > len(bldrsToDel) { 453 endIdx = len(bldrsToDel) 454 } 455 if err := datastore.Delete(ctx, bldrsToDel[startIdx:endIdx]); err != nil { 456 return errors.Annotate(err, "failed to delete builders[%d:%d]", startIdx, endIdx).Err() 457 } 458 } 459 460 // put builders in buildersToPut 461 for _, bldrs := range buildersToPut { 462 if err := datastore.Put(ctx, bldrs); err != nil { 463 return errors.Annotate(err, "failed to put builders starting from %s", bldrs[0].ID).Err() 464 } 465 } 466 467 // put the bucket 468 return datastore.Put(ctx, bucketToUpdate) 469 } 470 471 func min(i, j int) int { 472 if i < j { 473 return i 474 } 475 return j 476 } 477 478 // checkPoolDimExists check if the pool dimension exists in the builder config. 479 func checkPoolDimExists(cfgBuilder *pb.BuilderConfig) bool { 480 for _, dim := range cfgBuilder.GetDimensions() { 481 if strings.HasPrefix(dim, "pool:") { 482 return true 483 } 484 } 485 return false 486 } 487 488 // shortBucketName returns bucket name without "luci.<project_id>." prefix. 489 func shortBucketName(name string) string { 490 parts := strings.SplitN(name, ".", 3) 491 if len(parts) == 3 && parts[0] == "luci" { 492 return parts[2] 493 } 494 return name 495 } 496 497 // computeBuilderHash computes BuilderConfig hash. 498 // It returns the computed hash, BuilderConfig size or the error. 499 func computeBuilderHash(cfg *pb.BuilderConfig) (string, int, error) { 500 bCfg, err := proto.MarshalOptions{Deterministic: true}.Marshal(cfg) 501 if err != nil { 502 return "", 0, err 503 } 504 sha256Bldr := sha256.Sum256(bCfg) 505 return hex.EncodeToString(sha256Bldr[:]), len(bCfg), nil 506 } 507 508 // validateProjectCfg implements validation.Func and validates the content of 509 // Buildbucket project config file. 510 // 511 // Validation errors are returned via validation.Context. Non-validation errors 512 // are directly returned. 513 func validateProjectCfg(ctx *validation.Context, configSet, path string, content []byte) error { 514 cfg := pb.BuildbucketCfg{} 515 if err := prototext.Unmarshal(content, &cfg); err != nil { 516 ctx.Errorf("invalid BuildbucketCfg proto message: %s", err) 517 return nil 518 } 519 520 globalCfg, err := GetSettingsCfg(ctx.Context) 521 if err != nil { 522 // This error is unrelated to the data being validated. So directly return 523 // to instruct config service to retry. 524 return errors.Annotate(err, "error fetching service config").Err() 525 } 526 wellKnownExperiments := protoutil.WellKnownExperiments(globalCfg) 527 528 // The format of configSet here is "projects/.*" 529 project := strings.Split(configSet, "/")[1] 530 bucketNames := stringset.New(len(cfg.Buckets)) 531 for i, bucket := range cfg.Buckets { 532 ctx.Enter("buckets #%d - %s", i, bucket.Name) 533 switch err := validateBucketName(bucket.Name, project); { 534 case err != nil: 535 ctx.Errorf("invalid name %q: %s", bucket.Name, err) 536 case bucketNames.Has(bucket.Name): 537 ctx.Errorf("duplicate bucket name %q", bucket.Name) 538 case i > 0 && strings.Compare(bucket.Name, cfg.Buckets[i-1].Name) < 0: 539 ctx.Warningf("bucket %q out of order", bucket.Name) 540 case bucket.GetSwarming() != nil && bucket.GetDynamicBuilderTemplate() != nil: 541 ctx.Errorf("mutually exclusive fields swarming and dynamic_builder_template both exist in bucket %q", bucket.Name) 542 case bucket.GetDynamicBuilderTemplate() != nil && bucket.GetShadow() != "": 543 ctx.Errorf("dynamic bucket %q cannot have a shadow bucket %q", bucket.Name, bucket.Shadow) 544 } 545 bucketNames.Add(bucket.Name) 546 // TODO(crbug/1399576): Change this once bucket proto replaces Swarming message name 547 if s := bucket.Swarming; s != nil { 548 validateProjectSwarming(ctx, s, wellKnownExperiments, project, globalCfg) 549 } 550 if builderTemp := bucket.DynamicBuilderTemplate.GetTemplate(); builderTemp != nil { 551 validateBuilderCfg(ctx, builderTemp, wellKnownExperiments, project, globalCfg, true) 552 } 553 ctx.Exit() 554 } 555 556 if cfg.CommonConfig != nil { 557 validateBuildNotifyTopics(ctx, cfg.CommonConfig.BuildsNotificationTopics, project) 558 } 559 return nil 560 } 561 562 // validateBuildNotifyTopics validate `builds_notification_topics` field. 563 func validateBuildNotifyTopics(ctx *validation.Context, topics []*pb.BuildbucketCfg_Topic, project string) { 564 if len(topics) == 0 { 565 return 566 } 567 568 ctx.Enter("builds_notification_topics") 569 defer ctx.Exit() 570 571 errs := make(errors.MultiError, len(topics)) 572 _ = parallel.WorkPool(min(6, len(topics)), func(work chan<- func() error) { 573 for i, topic := range topics { 574 i := i 575 topic := topic 576 cloudProj, topicID, err := clients.ValidatePubSubTopicName(topic.Name) 577 if err != nil { 578 errs[i] = err 579 continue 580 } 581 work <- func() error { 582 client, err := clients.NewPubsubClient(ctx.Context, cloudProj, project) 583 if err != nil { 584 errs[i] = errors.Annotate(err, "failed to create a pubsub client for %q", cloudProj).Err() 585 return nil 586 } 587 cTopic := client.Topic(topicID) 588 switch perms, err := cTopic.IAM().TestPermissions(ctx.Context, []string{"pubsub.topics.publish"}); { 589 case err != nil: 590 errs[i] = errors.Annotate(err, "failed to check luci project account's permission for %s", topic.Name).Err() 591 case len(perms) < 1: 592 errs[i] = errors.Reason("luci project account (%s-scoped@luci-project-accounts.iam.gserviceaccount.com) doesn't have the publish permission for %s", project, topic.Name).Err() 593 } 594 return nil 595 } 596 } 597 }) 598 599 for _, err := range errs { 600 if err != nil { 601 ctx.Errorf("builds_notification_topics: %s", err) 602 } 603 } 604 } 605 606 // validateProjectSwarming validates project_config.Swarming. 607 func validateProjectSwarming(ctx *validation.Context, s *pb.Swarming, wellKnownExperiments stringset.Set, project string, globalCfg *pb.SettingsCfg) { 608 ctx.Enter("swarming") 609 defer ctx.Exit() 610 611 if s.GetTaskTemplateCanaryPercentage().GetValue() > 100 { 612 ctx.Errorf("task_template_canary_percentage.value must must be in [0, 100]") 613 } 614 615 builderNames := stringset.New(len(s.Builders)) 616 for i, b := range s.Builders { 617 ctx.Enter("builders #%d - %s", i, b.Name) 618 validateBuilderCfg(ctx, b, wellKnownExperiments, project, globalCfg, false) 619 if builderNames.Has(b.Name) { 620 ctx.Errorf("name: duplicate") 621 } else { 622 builderNames.Add(b.Name) 623 } 624 ctx.Exit() 625 } 626 } 627 628 func validateBucketName(bucket, project string) error { 629 switch { 630 case bucket == "": 631 return errors.New("bucket name is not specified") 632 case strings.HasPrefix(bucket, "luci.") && !strings.HasPrefix(bucket, fmt.Sprintf("luci.%s.", project)): 633 return errors.Reason("must start with 'luci.%s.' because it starts with 'luci.' and is defined in the %q project", project, project).Err() 634 case !bucketRegex.MatchString(bucket): 635 return errors.Reason("%q does not match %q", bucket, bucketRegex).Err() 636 } 637 return nil 638 } 639 640 // ValidateTaskBackendTarget validates the target value for a 641 // buildbucket task backend. 642 func ValidateTaskBackendTarget(globalCfg *pb.SettingsCfg, target string) error { 643 for _, backendSetting := range globalCfg.Backends { 644 if backendSetting.Target == target { 645 return nil 646 } 647 } 648 return errors.Reason("provided backend target was not in global config").Err() 649 } 650 651 // validateTaskBackendConfigJson makes an api call to the task backend server's 652 // ValidateConfigs RPC. If there are errors with the config it propagates them 653 // into the validation context. 654 func validateTaskBackendConfigJson(ctx *validation.Context, backend *pb.BuilderConfig_Backend, project string) error { 655 globalCfg, err := GetSettingsCfg(ctx.Context) 656 if err != nil { 657 return err 658 } 659 backendClient, err := clients.NewBackendClient(ctx.Context, project, backend.Target, globalCfg) 660 if err != nil { 661 return err 662 } 663 configJsonPb := &structpb.Struct{} 664 err = configJsonPb.UnmarshalJSON([]byte(backend.ConfigJson)) 665 if err != nil { 666 return err 667 } 668 req := &pb.ValidateConfigsRequest{ 669 Configs: []*pb.ValidateConfigsRequest_ConfigContext{ 670 { 671 Target: backend.Target, 672 ConfigJson: configJsonPb, 673 }, 674 }, 675 } 676 resp, err := backendClient.ValidateConfigs(ctx.Context, req) 677 if err != nil { 678 return err 679 } 680 if len(resp.ConfigErrors) > 0 { 681 for _, configErr := range resp.ConfigErrors { 682 ctx.Errorf("error validating task backend ConfigJson at index %d: %s", configErr.Index, configErr.Error) 683 } 684 } 685 return nil 686 } 687 688 func validateTaskBackend(ctx *validation.Context, backend *pb.BuilderConfig_Backend, project string) { 689 globalCfg, err := GetSettingsCfg(ctx.Context) 690 if err != nil { 691 ctx.Errorf("could not get global settings config") 692 } 693 694 // validating backend.Target 695 err = ValidateTaskBackendTarget(globalCfg, backend.GetTarget()) 696 if err != nil { 697 ctx.Errorf("error validating task backend target: %s", err) 698 } 699 if backend.ConfigJson != "" { 700 err = validateTaskBackendConfigJson(ctx, backend, project) 701 if err != nil { 702 ctx.Errorf("error validating task backend ConfigJson: %s", err) 703 } 704 } 705 } 706 707 // validateBuilderCfg validate a Builder config message. 708 func validateBuilderCfg(ctx *validation.Context, b *pb.BuilderConfig, wellKnownExperiments stringset.Set, project string, globalCfg *pb.SettingsCfg, isDynamic bool) { 709 // TODO(iannucci): also validate builder allowed_property_overrides field. See 710 // //lucicfg/starlark/stdlib/internal/luci/rules/builder.star 711 712 // name 713 if isDynamic && b.Name != "" { 714 ctx.Errorf("builder name should not be set in a dynamic bucket") 715 } else if !isDynamic && !builderRegex.MatchString(b.Name) { 716 ctx.Errorf("name must match %s", builderRegex) 717 } 718 719 // auto_builder_dimension 720 if isDynamic && b.GetAutoBuilderDimension() == pb.Toggle_YES { 721 ctx.Errorf("should not toggle on auto_builder_dimension in a dynamic bucket") 722 } 723 724 // Need to do separate checks here since backend and backend_alt can both be set. 725 // Either backend or swarming must be set, but not both. 726 switch { 727 case b.GetSwarmingHost() != "" && b.GetBackend() != nil: 728 ctx.Errorf("only one of swarming host or task backend is allowed") 729 case b.GetBackend() != nil: 730 validateTaskBackend(ctx, b.Backend, project) 731 case b.GetSwarmingHost() != "": 732 validateHostname(ctx, "swarming_host", b.SwarmingHost) 733 default: 734 ctx.Errorf("either swarming host or task backend must be set") 735 } 736 737 if b.GetBackendAlt() != nil { 738 // validate backend_alt 739 validateTaskBackend(ctx, b.BackendAlt, project) 740 } 741 742 // validate swarming_tags 743 for i, swarmingTag := range b.SwarmingTags { 744 ctx.Enter("swarming_tags #%d", i) 745 if swarmingTag != "vpython:native-python-wrapper" { 746 ctx.Errorf("Deprecated. Used only to enable \"vpython:native-python-wrapper\"") 747 } 748 ctx.Exit() 749 } 750 751 validateDimensions(ctx, b.Dimensions, false) 752 753 // timeouts 754 if b.ExecutionTimeoutSecs != 0 || b.ExpirationSecs != 0 { 755 exeTimeout := time.Duration(b.ExecutionTimeoutSecs) * time.Second 756 schedulingTimeout := time.Duration(b.ExpirationSecs) * time.Second 757 if exeTimeout == 0 { 758 exeTimeout = DefExecutionTimeout 759 } 760 if schedulingTimeout == 0 { 761 schedulingTimeout = DefSchedulingTimeout 762 } 763 if exeTimeout+schedulingTimeout > model.BuildMaxCompletionTime { 764 exeTimeoutSec := int(exeTimeout.Seconds()) 765 schedulingTimeoutSec := int(schedulingTimeout.Seconds()) 766 limitTimeoutSec := int(model.BuildMaxCompletionTime.Seconds()) 767 switch { 768 case b.ExecutionTimeoutSecs == 0: 769 ctx.Errorf("(default) execution_timeout_secs %d + expiration_secs %d exceeds max build completion time %d", exeTimeoutSec, schedulingTimeoutSec, limitTimeoutSec) 770 case b.ExpirationSecs == 0: 771 ctx.Errorf("execution_timeout_secs %d + (default) expiration_secs %d exceeds max build completion time %d", exeTimeoutSec, schedulingTimeoutSec, limitTimeoutSec) 772 default: 773 ctx.Errorf("execution_timeout_secs %d + expiration_secs %d exceeds max build completion time %d", exeTimeoutSec, schedulingTimeoutSec, limitTimeoutSec) 774 } 775 } 776 } 777 if b.HeartbeatTimeoutSecs != 0 && 778 !isLiteBackend(b.GetBackend().GetTarget(), globalCfg) && 779 !isLiteBackend(b.GetBackendAlt().GetTarget(), globalCfg) { 780 ctx.Errorf("heartbeat_timeout_secs should only be set for builders using a TaskBackendLite backend") 781 } 782 783 // resultdb 784 if b.Resultdb.GetHistoryOptions().GetCommit() != nil { 785 ctx.Errorf("resultdb.history_options.commit must be unset") 786 } 787 788 for i, bqExport := range b.Resultdb.GetBqExports() { 789 if err := rdbpbutil.ValidateBigQueryExport(bqExport); err != nil { 790 ctx.Errorf("error validating resultdb.bq_exports[%d]: %s", i, err) 791 } 792 } 793 794 validateCaches(ctx, b.Caches) 795 796 // exe and recipe 797 switch { 798 case (b.Exe == nil && b.Recipe == nil && !isDynamic) || (b.Exe != nil && b.Recipe != nil): 799 ctx.Errorf("exactly one of exe or recipe must be specified") 800 case b.Exe != nil && b.Exe.CipdPackage == "": 801 ctx.Errorf("exe.cipd_package: unspecified") 802 case b.Recipe != nil: 803 if b.Properties != "" { 804 ctx.Errorf("recipe and properties cannot be set together") 805 } 806 ctx.Enter("recipe") 807 validateBuilderRecipe(ctx, b.Recipe) 808 ctx.Exit() 809 } 810 811 // priority 812 if (b.Priority != 0) && (b.Priority < 20 || b.Priority > 255) { 813 ctx.Errorf("priority: must be in [20, 255] range; got %d", b.Priority) 814 } 815 816 // properties 817 if b.Properties != "" { 818 if !strings.HasPrefix(b.Properties, "{") || !json.Valid([]byte(b.Properties)) { 819 ctx.Errorf("properties is not a JSON object") 820 } 821 } 822 823 // service_account 824 if b.ServiceAccount != "" && b.ServiceAccount != "bot" && !serviceAccountRegex.MatchString(b.ServiceAccount) { 825 ctx.Errorf("service_account %q doesn't match %q", b.ServiceAccount, serviceAccountRegex) 826 } 827 828 // experiments 829 for expName, percent := range b.Experiments { 830 ctx.Enter("experiments %q", expName) 831 if err := ValidateExperimentName(expName, wellKnownExperiments); err != nil { 832 ctx.Error(err) 833 } 834 if percent < 0 || percent > 100 { 835 ctx.Errorf("value must be in [0, 100]") 836 } 837 ctx.Exit() 838 } 839 840 // shadow_builder_adjustments 841 if b.ShadowBuilderAdjustments != nil { 842 if isDynamic { 843 ctx.Errorf("cannot set shadow_builder_adjustments in a dynamic builder template") 844 } else { 845 ctx.Enter("shadow_builder_adjustments") 846 847 if b.ShadowBuilderAdjustments.GetProperties() != "" { 848 if !strings.HasPrefix(b.ShadowBuilderAdjustments.Properties, "{") || !json.Valid([]byte(b.ShadowBuilderAdjustments.Properties)) { 849 ctx.Errorf("properties is not a JSON object") 850 } 851 } 852 853 // Ensure pool and dimensions are consistent. 854 // In builder config: 855 // * setting shadow_pool would add the corresponding "pool:<shadow_pool>" dimension 856 // to shadow_dimensions; 857 // * setting shadow_dimensions with "pool:<shadow_pool>" would also set shadow_pool. 858 dims := b.ShadowBuilderAdjustments.GetDimensions() 859 if b.ShadowBuilderAdjustments.GetPool() != "" && len(dims) == 0 { 860 ctx.Errorf("dimensions.pool must be consistent with pool") 861 } 862 if len(dims) != 0 { 863 validateDimensions(ctx, dims, true) 864 865 empty := stringset.New(len(dims)) 866 nonEmpty := stringset.New(len(dims)) 867 for _, dim := range b.ShadowBuilderAdjustments.Dimensions { 868 _, key, value := ParseDimension(dim) 869 if value == "" { 870 if nonEmpty.Has(key) { 871 ctx.Errorf(fmt.Sprintf("dimensions contain both empty and non-empty value for the same key - %q", key)) 872 } 873 empty.Add(key) 874 } else { 875 if empty.Has(key) { 876 ctx.Errorf(fmt.Sprintf("dimensions contain both empty and non-empty value for the same key - %q", key)) 877 } 878 nonEmpty.Add(key) 879 } 880 if key == "pool" && value != b.ShadowBuilderAdjustments.Pool { 881 ctx.Errorf("dimensions.pool must be consistent with pool") 882 } 883 } 884 } 885 } 886 887 ctx.Exit() 888 } 889 } 890 891 func validateBuilderRecipe(ctx *validation.Context, recipe *pb.BuilderConfig_Recipe) { 892 if recipe.Name == "" { 893 ctx.Errorf("name: unspecified") 894 } 895 if recipe.CipdPackage == "" { 896 ctx.Errorf("cipd_package: unspecified") 897 } 898 899 seenKeys := make(stringset.Set) 900 validateProps := func(field string, props []string, isJson bool) { 901 for i, prop := range props { 902 ctx.Enter("%s #%d - %s", field, i, prop) 903 if !strings.Contains(prop, ":") { 904 ctx.Errorf("doesn't have a colon") 905 ctx.Exit() 906 continue 907 } 908 switch k, v := strpair.Parse(prop); { 909 case k == "": 910 ctx.Errorf("key not specified") 911 case seenKeys.Has(k): 912 ctx.Errorf("duplicate property") 913 case k == "buildbucket": 914 ctx.Errorf("reserved property") 915 case k == "$recipe_engine/runtime": 916 jsonMap := make(map[string]any) 917 if err := json.Unmarshal([]byte(v), &jsonMap); err != nil { 918 ctx.Errorf("not a JSON object: %s", err) 919 } 920 for key := range jsonMap { 921 if key == "is_luci" || key == "is_experimental" { 922 ctx.Errorf("key %q: reserved key", key) 923 } 924 } 925 case isJson: 926 if !json.Valid([]byte(v)) { 927 ctx.Errorf("not a JSON object") 928 } 929 default: 930 seenKeys.Add(k) 931 } 932 ctx.Exit() 933 } 934 } 935 936 validateProps("properties", recipe.Properties, false) 937 validateProps("properties_j", recipe.PropertiesJ, true) 938 } 939 940 // ParseDimension parses a dimension string. 941 // A dimension supports 2 forms - 942 // "<key>:<value>" and "<expiration_secs>:<key>:<value>" . 943 func ParseDimension(d string) (exp int64, k string, v string) { 944 k, v = strpair.Parse(d) 945 var err error 946 exp, err = strconv.ParseInt(k, 10, 64) 947 if err == nil { 948 // k was an int64, so v is in <key>:<value> form. 949 k, v = strpair.Parse(v) 950 } else { 951 exp = 0 952 // k was the <key> and v was the <value>. 953 } 954 return 955 } 956 957 // validateDimensions validates dimensions in project configs. 958 // A dimension supports 2 forms - 959 // "<key>:<value>" and "<expiration_secs>:<key>:<value>" . 960 func validateDimensions(ctx *validation.Context, dimensions []string, allowMissingValue bool) { 961 expirations := make(map[int64]bool) 962 963 for i, dim := range dimensions { 964 ctx.Enter("dimensions #%d - %q", i, dim) 965 if !strings.Contains(dim, ":") { 966 ctx.Errorf("%q does not have ':'", dim) 967 ctx.Exit() 968 continue 969 } 970 expiration, key, value := ParseDimension(dim) 971 switch { 972 case expiration < 0 || time.Duration(expiration) > maximumExpiration/time.Second: 973 ctx.Errorf("expiration_secs is outside valid range; up to %s", maximumExpiration) 974 case expiration%60 != 0: 975 ctx.Errorf("expiration_secs must be a multiple of 60 seconds") 976 case key == "": 977 ctx.Errorf("missing key") 978 case key == "caches": 979 ctx.Errorf("dimension key must not be 'caches'; caches must be declared via caches field") 980 case !dimensionKeyRegex.MatchString(key): 981 ctx.Errorf("key %q does not match pattern %q", key, dimensionKeyRegex) 982 case value == "" && !allowMissingValue: 983 ctx.Errorf("missing value") 984 default: 985 expirations[expiration] = true 986 } 987 988 if len(expirations) >= 6 { 989 ctx.Errorf("at most 6 different expiration_secs values can be used") 990 } 991 ctx.Exit() 992 } 993 } 994 995 func validateCaches(ctx *validation.Context, caches []*pb.BuilderConfig_CacheEntry) { 996 cacheNames := stringset.New(len(caches)) 997 cachePaths := stringset.New(len(caches)) 998 fallbackSecs := make(map[int32]bool) 999 for i, cache := range caches { 1000 ctx.Enter("caches #%d", i) 1001 validateCacheEntry(ctx, cache) 1002 if cache.Name != "" && cacheNames.Has(cache.Name) { 1003 ctx.Errorf("duplicate name") 1004 } 1005 if cache.Path != "" && cachePaths.Has(cache.Path) { 1006 ctx.Errorf("duplicate path") 1007 } 1008 cacheNames.Add(cache.Name) 1009 cachePaths.Add(cache.Path) 1010 switch secs := cache.WaitForWarmCacheSecs; { 1011 case secs == 0: 1012 case secs < 60: 1013 ctx.Errorf("wait_for_warm_cache_secs must be at least 60 seconds") 1014 case secs%60 != 0: 1015 ctx.Errorf("wait_for_warm_cache_secs must be rounded on 60 seconds") 1016 default: 1017 fallbackSecs[secs] = true 1018 } 1019 ctx.Exit() 1020 } 1021 1022 if len(fallbackSecs) > 7 { 1023 // There can only be 8 task_slices. 1024 ctx.Errorf("'too many different (%d) wait_for_warm_cache_secs values; max 7", len(fallbackSecs)) 1025 } 1026 } 1027 1028 func validateCacheEntry(ctx *validation.Context, entry *pb.BuilderConfig_CacheEntry) { 1029 switch name := entry.Name; { 1030 case name == "": 1031 ctx.Errorf("name: required") 1032 case !cacheNameRegex.MatchString(name): 1033 ctx.Errorf("name: %q does not match %q", name, cacheNameRegex) 1034 case len(name) > cacheNameMaxLength: 1035 ctx.Errorf("name: length should be less than %d", cacheNameMaxLength) 1036 } 1037 1038 ctx.Enter("path") 1039 switch path := entry.Path; { 1040 case path == "": 1041 ctx.Errorf("required") 1042 case strings.Contains(path, "\\"): 1043 ctx.Errorf("cannot contain \\. On Windows forward-slashes will be replaced with back-slashes.") 1044 case strings.Contains(path, ".."): 1045 ctx.Errorf("cannot contain '..'") 1046 case strings.HasPrefix(path, "/"): 1047 ctx.Errorf("cannot start with '/'") 1048 } 1049 ctx.Exit() 1050 } 1051 1052 func ValidateExperimentName(expName string, wellKnownExperiments stringset.Set) error { 1053 switch { 1054 case !protoutil.ExperimentNameRE.MatchString(expName): 1055 return errors.Reason("does not match %q", protoutil.ExperimentNameRE).Err() 1056 case strings.HasPrefix(expName, "luci.") && !wellKnownExperiments.Has(expName): 1057 return errors.New(`unknown experiment has reserved prefix "luci."`) 1058 } 1059 return nil 1060 } 1061 1062 func isLiteBackend(target string, globalCfg *pb.SettingsCfg) bool { 1063 if target == "" { 1064 return false 1065 } 1066 for _, backend := range globalCfg.Backends { 1067 if backend.Target == target { 1068 return backend.GetLiteMode() != nil 1069 } 1070 } 1071 return false 1072 }