go.chromium.org/luci@v0.0.0-20240309015107-7cdc2e660f33/lucicfg/starlark/stdlib/internal/luci/generators.star (about)

     1  # Copyright 2018 The LUCI Authors.
     2  #
     3  # Licensed under the Apache License, Version 2.0 (the "License");
     4  # you may not use this file except in compliance with the License.
     5  # You may obtain a copy of the License at
     6  #
     7  #      http://www.apache.org/licenses/LICENSE-2.0
     8  #
     9  # Unless required by applicable law or agreed to in writing, software
    10  # distributed under the License is distributed on an "AS IS" BASIS,
    11  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  # See the License for the specific language governing permissions and
    13  # limitations under the License.
    14  
    15  """Implementation of various LUCI *.cfg file generators."""
    16  
    17  load("@stdlib//internal/error.star", "error")
    18  load("@stdlib//internal/experiments.star", "experiments")
    19  load("@stdlib//internal/graph.star", "graph")
    20  load("@stdlib//internal/lucicfg.star", "lucicfg")
    21  load("@stdlib//internal/strutil.star", "strutil")
    22  load("@stdlib//internal/time.star", "time")
    23  load("@stdlib//internal/luci/common.star", "builder_ref", "keys", "kinds", "triggerer")
    24  load("@stdlib//internal/luci/lib/acl.star", "acl", "aclimpl")
    25  load("@stdlib//internal/luci/lib/cq.star", "cq")
    26  load("@stdlib//internal/luci/lib/realms.star", "realms")
    27  load(
    28      "@stdlib//internal/luci/proto.star",
    29      "buildbucket_pb",
    30      "common_pb",
    31      "config_pb",
    32      "cq_pb",
    33      "logdog_cloud_logging_pb",
    34      "logdog_pb",
    35      "milo_pb",
    36      "notify_pb",
    37      "realms_pb",
    38      "scheduler_pb",
    39      "tricium_pb",
    40  )
    41  load("@proto//google/protobuf/duration.proto", duration_pb = "google.protobuf")
    42  load("@proto//google/protobuf/wrappers.proto", wrappers_pb = "google.protobuf")
    43  
    44  # If set, do not generate legacy Buildbucket and Scheduler ACLs.
    45  #
    46  # Implies generation of shorter BuildbucketTask protos and conditional bindings,
    47  # succeeding experiment "crbug.com/1182002".
    48  _drop_legacy_shed_bb_acls = experiments.register("crbug.com/1347252", "1.32.0")
    49  
    50  # If set, do not populate the deprecated task_template_canary_percentage field
    51  # of BuilderConfig, but instead add the canary_software experiment.
    52  _use_experiment_for_task_template_canary_percentage = experiments.register("crbug.com/1496969", "1.41.0")
    53  
    54  def _legacy_acls():
    55      """True to generate legacy Scheduler and Buildbucket ACLs."""
    56      return not _drop_legacy_shed_bb_acls.is_enabled()
    57  
    58  def register():
    59      """Registers all LUCI config generator callbacks."""
    60      lucicfg.generator(impl = gen_project_cfg)
    61      lucicfg.generator(impl = gen_realms_cfg)
    62      lucicfg.generator(impl = gen_logdog_cfg)
    63      lucicfg.generator(impl = gen_buildbucket_cfg)
    64      lucicfg.generator(impl = gen_scheduler_cfg)
    65      lucicfg.generator(impl = gen_milo_cfg)
    66      lucicfg.generator(impl = gen_cq_cfg)
    67      lucicfg.generator(impl = gen_notify_cfg)
    68      lucicfg.generator(impl = gen_tricium_cfg)
    69  
    70  ################################################################################
    71  ## Utilities to be used from generators.
    72  
    73  def output_path(path):
    74      """Returns a full path to a LUCI config in the output set.
    75  
    76      Args:
    77        path: a LUCI config path relative to LUCI config root.
    78  
    79      Returns:
    80        A path relative to the config output root.
    81      """
    82      root = get_project().props.config_dir
    83      if root != ".":
    84          path = root + "/" + path
    85      return path
    86  
    87  def set_config(ctx, path, cfg):
    88      """Adds `cfg` as a LUCI config to the output at the given `path`.
    89  
    90      Args:
    91        ctx: the generator context.
    92        path: the path in the output to populate.
    93        cfg: a proto or a string to put in the output.
    94      """
    95      ctx.output[output_path(path)] = cfg
    96  
    97  def get_project(required = True):
    98      """Returns project() node or fails if it wasn't declared.
    99  
   100      Args:
   101        required: if True, fail if the luci.project(...) wasn't defined.
   102  
   103      Returns:
   104        luci.project(...) node.
   105      """
   106      n = graph.node(keys.project())
   107      if not n and required:
   108          fail("luci.project(...) definition is missing, it is required")
   109      return n
   110  
   111  def get_service(kind, why):
   112      """Returns service struct (see service.star), reading it from project node.
   113  
   114      Args:
   115        kind: e.g. `buildbucket`.
   116        why: the request why it is required, for error messages.
   117  
   118      Returns:
   119        The service struct.
   120      """
   121      proj = get_project()
   122      svc = getattr(proj.props, kind)
   123      if not svc:
   124          fail(
   125              "missing %r in luci.project(...), it is required for %s" % (kind, why),
   126              trace = proj.trace,
   127          )
   128      return svc
   129  
   130  def get_bb_notification_topics():
   131      """Returns all defined buildbucket_notification_topic() nodes, if any."""
   132      return graph.children(keys.project(), kinds.BUILDBUCKET_NOTIFICATION_TOPIC)
   133  
   134  def get_buckets():
   135      """Returns all defined bucket() nodes, if any."""
   136      return graph.children(keys.project(), kinds.BUCKET)
   137  
   138  def get_buckets_of(nodes):
   139      """Returns bucket() nodes with buckets that contain given nodes.
   140  
   141      Nodes are expected to have 'bucket' property.
   142      """
   143      buckets = set([n.props.bucket for n in nodes])
   144      return [b for b in get_buckets() if b.props.name in buckets]
   145  
   146  def get_project_acls():
   147      """Returns [acl.elementary] with the project-level ACLs."""
   148      return aclimpl.normalize_acls(get_project().props.acls)
   149  
   150  def get_bucket_acls(bucket):
   151      """Returns [acl.elementary] with combined bucket and project ACLs.
   152  
   153      Args:
   154        bucket: a bucket node, as returned by e.g. get_buckets().
   155      """
   156      return aclimpl.normalize_acls(bucket.props.acls + get_project().props.acls)
   157  
   158  def filter_acls(acls, roles):
   159      """Keeps only ACL entries that have any of given roles."""
   160      return [a for a in acls if a.role in roles]
   161  
   162  def legacy_bucket_name(bucket_name, project_name):
   163      """Prefixes the bucket name with `luci.<project>.`."""
   164      if bucket_name.startswith("luci."):
   165          fail("seeing long bucket name %r, shouldn't be possible" % bucket_name)
   166      return "luci.%s.%s" % (project_name, bucket_name)
   167  
   168  def optional_sec(duration):
   169      """duration|None => number of seconds | None."""
   170      return None if duration == None else duration // time.second
   171  
   172  def optional_duration_pb(duration):
   173      """duration|None => duration_pb.Duration | None."""
   174      if duration == None:
   175          return None
   176      return duration_pb.Duration(
   177          seconds = duration // time.second,
   178          nanos = (duration % time.second) * 1000000,
   179      )
   180  
   181  def optional_UInt32Value(val):
   182      """int|None => google.protobuf.UInt32Value."""
   183      return None if val == None else wrappers_pb.UInt32Value(value = val)
   184  
   185  ################################################################################
   186  ## project.cfg.
   187  
   188  def gen_project_cfg(ctx):
   189      """Generates project.cfg.
   190  
   191      Args:
   192        ctx: the generator context.
   193      """
   194  
   195      # lucicfg is allowed to interpret *.star files without any actual
   196      # definitions. This is used in tests, for example. If there's no
   197      # project(...) rule, but there are some other LUCI definitions, the
   198      # corresponding generators will fail on their own in get_project() calls.
   199      proj = get_project(required = False)
   200      if not proj:
   201          return
   202  
   203      # We put generated LUCI configs under proj.props.config_dir, see set_config.
   204      # Declare it is a project config set, so it is sent to LUCI config for
   205      # validation.
   206      ctx.declare_config_set("projects/%s" % proj.props.name, proj.props.config_dir)
   207  
   208      # Find all PROJECT_CONFIGS_READER role entries.
   209      access = []
   210      for a in filter_acls(get_project_acls(), [acl.PROJECT_CONFIGS_READER]):
   211          if a.user:
   212              access.append("user:" + a.user)
   213          elif a.group:
   214              access.append("group:" + a.group)
   215          elif a.project:
   216              access.append("project:" + a.project)
   217          else:
   218              fail("impossible")
   219  
   220      # Path to the generated LUCI config directory relative to the main package
   221      # root.
   222      config_dir = strutil.join_path(
   223          __native__.get_meta("config_dir"),
   224          proj.props.config_dir,
   225          allow_dots = True,
   226      )
   227  
   228      emit_metadata = not __native__.running_tests and not proj.props.omit_lucicfg_metadata
   229  
   230      set_config(ctx, "project.cfg", config_pb.ProjectCfg(
   231          name = proj.props.name,
   232          access = access,
   233          lucicfg = config_pb.GeneratorMetadata(
   234              version = "%d.%d.%d" % lucicfg.version(),
   235              config_dir = config_dir,
   236              package_dir = __native__.package_dir(config_dir),
   237              entry_point = __native__.entry_point,
   238              vars = __native__.var_flags,
   239              experiments = __native__.list_enabled_experiments(),
   240          ) if emit_metadata else None,
   241      ))
   242  
   243  ################################################################################
   244  ## realm.cfg.
   245  
   246  def realms_cfg(proj):
   247      """Returns either `realms.cfg` or `realms-dev.cfg`."""
   248      return "realms-dev.cfg" if proj.props.dev else "realms.cfg"
   249  
   250  def gen_realms_cfg(ctx):
   251      """Generates realms.cfg.
   252  
   253      Args:
   254        ctx: the generator context.
   255      """
   256      proj = get_project(required = False)
   257      if proj:
   258          set_config(
   259              ctx = ctx,
   260              path = realms_cfg(proj),
   261              cfg = realms.generate_realms_cfg(realms.default_impl),
   262          )
   263  
   264  ################################################################################
   265  ## logdog.cfg.
   266  
   267  def gen_logdog_cfg(ctx):
   268      """Generates logdog.cfg.
   269  
   270      Args:
   271        ctx: the generator context.
   272      """
   273      opts = graph.node(keys.logdog())
   274      if not opts:
   275          return
   276  
   277      # Note that acl.LOGDOG_* are declared as groups_only=True roles, so .group
   278      # is guaranteed to be set here.
   279      readers = []
   280      writers = []
   281      for a in get_project_acls():
   282          if a.role == acl.LOGDOG_READER:
   283              readers.append(a.group)
   284          elif a.role == acl.LOGDOG_WRITER:
   285              writers.append(a.group)
   286  
   287      cl_cfg = None
   288      if opts.props.cloud_logging_project:
   289          cl_cfg = logdog_cloud_logging_pb.CloudLoggingConfig(
   290              destination = opts.props.cloud_logging_project,
   291          )
   292  
   293      logdog = get_service("logdog", "defining LogDog options")
   294      set_config(ctx, logdog.cfg_file, logdog_pb.ProjectConfig(
   295          reader_auth_groups = readers,
   296          writer_auth_groups = writers,
   297          archive_gs_bucket = opts.props.gs_bucket,
   298          cloud_logging_config = cl_cfg,
   299      ))
   300  
   301  ################################################################################
   302  ## buildbucket.cfg.
   303  
   304  # acl.role => buildbucket_pb.Acl.Role.
   305  _buildbucket_roles = {
   306      acl.BUILDBUCKET_READER: buildbucket_pb.Acl.READER,
   307      acl.BUILDBUCKET_TRIGGERER: buildbucket_pb.Acl.SCHEDULER,
   308      acl.BUILDBUCKET_OWNER: buildbucket_pb.Acl.WRITER,
   309  }
   310  
   311  def gen_buildbucket_cfg(ctx):
   312      """Generates buildbucket.cfg.
   313  
   314      Args:
   315        ctx: the generator context.
   316      """
   317  
   318      # TODO(randymaldoando): crbug/399576 - move builders up a level and
   319      # replace swarming in bucket proto.
   320      buckets = get_buckets()
   321      if not buckets:
   322          return
   323  
   324      cfg = buildbucket_pb.BuildbucketCfg()
   325      buildbucket = get_service("buildbucket", "defining buckets")
   326      set_config(ctx, buildbucket.cfg_file, cfg)
   327      _buildbucket_check_connections()
   328  
   329      shadow_bucket_constraints = _buildbucket_shadow_bucket_constraints(buckets)
   330      for bucket in buckets:
   331          swarming = _buildbucket_builders(bucket)
   332          dynamic_builder_template = _buildbucket_dynamic_builder_template(bucket)
   333          if dynamic_builder_template != None and swarming != None:
   334              error("dynamic bucket \"%s\" must not have pre-defined builders" % bucket.props.name, trace = bucket.trace)
   335  
   336          constraints = _buildbucket_constraints(bucket)
   337          if shadow_bucket_constraints and shadow_bucket_constraints.get(bucket.props.name):
   338              if not constraints:
   339                  constraints = buildbucket_pb.Bucket.Constraints()
   340              additional_constraints = shadow_bucket_constraints[bucket.props.name]
   341              constraints.pools.extend(additional_constraints.pools)
   342              constraints.pools = sorted(set(constraints.pools))
   343              constraints.service_accounts.extend(additional_constraints.service_accounts)
   344              constraints.service_accounts = sorted(set(constraints.service_accounts))
   345  
   346          cfg.buckets.append(buildbucket_pb.Bucket(
   347              name = bucket.props.name,
   348              acls = _buildbucket_acls(get_bucket_acls(bucket)),
   349              swarming = swarming,
   350              shadow = _buildbucket_shadow(bucket),
   351              constraints = constraints,
   352              dynamic_builder_template = dynamic_builder_template,
   353          ))
   354  
   355      if shadow_bucket_constraints:
   356          _gen_shadow_service_account_bindings(
   357              ctx.output[output_path(realms_cfg(get_project()))],
   358              shadow_bucket_constraints,
   359          )
   360  
   361      topics = [
   362          buildbucket_pb.BuildbucketCfg.Topic(
   363              name = t.props.name,
   364              compression = t.props.compression,
   365          )
   366          for t in get_bb_notification_topics()
   367      ]
   368      if topics:
   369          cfg.common_config = buildbucket_pb.BuildbucketCfg.CommonConfig(
   370              builds_notification_topics = topics,
   371          )
   372  
   373  def _buildbucket_check_connections():
   374      """Ensures all luci.bucket_constraints(...) are connected to one and only one luci.bucket(...)."""
   375      root = keys.bucket_constraints_root()
   376      for e in graph.children(root):
   377          buckets = [p for p in graph.parents(e.key) if p.key != root]
   378          if len(buckets) == 0:
   379              error("%s is not added to any bucket, either remove or comment it out" % e, trace = e.trace)
   380          elif len(buckets) > 1:
   381              error(
   382                  "%s is added to multiple buckets: %s" %
   383                  (e, ", ".join([str(v) for v in buckets])),
   384                  trace = e.trace,
   385              )
   386  
   387  def _buildbucket_acls(elementary):
   388      """[acl.elementary] => filtered [buildbucket_pb.Acl]."""
   389      if not _legacy_acls():
   390          return []
   391      return [
   392          buildbucket_pb.Acl(
   393              role = _buildbucket_roles[a.role],
   394              group = a.group,
   395              identity = None if a.group else _buildbucket_identity(a),
   396          )
   397          for a in filter_acls(elementary, _buildbucket_roles.keys())
   398      ]
   399  
   400  def _buildbucket_identity(a):
   401      """acl.elementary => identity string for buildbucket_pb.Acl."""
   402      if a.user:
   403          return "user:" + a.user
   404      if a.project:
   405          return "project:" + a.project
   406      fail("impossible")
   407  
   408  def _buildbucket_shadow_bucket_constraints(buckets):
   409      """a list of luci.bucket(...) nodes => a dict of bucket name to constraints."""
   410      shadow_bucket_constraints = {}
   411      for bucket in buckets:
   412          service_accounts = []
   413          pools = []
   414          for node in graph.children(bucket.key, kinds.BUILDER):
   415              if node.props.shadow_service_account:
   416                  service_accounts.append(node.props.shadow_service_account)
   417              if node.props.shadow_pool:
   418                  pools.append(node.props.shadow_pool)
   419  
   420          if len(service_accounts) == 0 and len(pools) == 0:
   421              continue
   422          shadow = _buildbucket_shadow(bucket)
   423          if not shadow:
   424              error(
   425                  "builders in bucket %s set shadow_service_account or shadow_pool, but the bucket does not have a shadow bucket" %
   426                  bucket.props.name,
   427                  trace = bucket.trace,
   428              )
   429              return None
   430          constraints = shadow_bucket_constraints.setdefault(shadow, struct(
   431              service_accounts = [],
   432              pools = [],
   433          ))
   434          constraints.service_accounts.extend(service_accounts)
   435          constraints.pools.extend(pools)
   436      return shadow_bucket_constraints
   437  
   438  def _buildbucket_builder(node, def_swarming_host):
   439      if node.key.kind != kinds.BUILDER and node.key.kind != kinds.DYNAMIC_BUILDER_TEMPLATE:
   440          fail("impossible: can only generate builder config for a builder or dynamic_builder_template")
   441      exe, recipe, properties, experiments = _handle_executable(node)
   442      combined_experiments = dict(node.props.experiments)
   443      combined_experiments.update(experiments)
   444      task_template_canary_percentage = None
   445      if _use_experiment_for_task_template_canary_percentage.is_enabled():
   446          if node.props.task_template_canary_percentage:
   447              combined_experiments["luci.buildbucket.canary_software"] = node.props.task_template_canary_percentage
   448      else:
   449          task_template_canary_percentage = optional_UInt32Value(
   450              node.props.task_template_canary_percentage,
   451          )
   452      bldr_config = buildbucket_pb.BuilderConfig(
   453          name = node.props.name,
   454          description_html = node.props.description_html,
   455          exe = exe,
   456          recipe = recipe,
   457          properties = properties,
   458          allowed_property_overrides = sorted(node.props.allowed_property_overrides),
   459          service_account = node.props.service_account,
   460          caches = _buildbucket_caches(node.props.caches),
   461          execution_timeout_secs = optional_sec(node.props.execution_timeout),
   462          grace_period = optional_duration_pb(node.props.grace_period),
   463          heartbeat_timeout_secs = optional_sec(node.props.heartbeat_timeout),
   464          dimensions = _buildbucket_dimensions(node.props.dimensions),
   465          priority = node.props.priority,
   466          expiration_secs = optional_sec(node.props.expiration_timeout),
   467          wait_for_capacity = _buildbucket_trinary(node.props.wait_for_capacity),
   468          retriable = _buildbucket_trinary(node.props.retriable),
   469          build_numbers = _buildbucket_toggle(node.props.build_numbers),
   470          experimental = _buildbucket_toggle(node.props.experimental),
   471          experiments = combined_experiments,
   472          task_template_canary_percentage = task_template_canary_percentage,
   473          resultdb = node.props.resultdb,
   474          contact_team_email = node.props.contact_team_email,
   475      )
   476      if node.props.backend != None:
   477          backend = graph.node(node.props.backend)
   478          bldr_config.backend = buildbucket_pb.BuilderConfig.Backend(
   479              target = backend.props.target,
   480              config_json = backend.props.config,
   481          )
   482      if node.props.backend_alt != None:
   483          backend_alt = graph.node(node.props.backend_alt)
   484          bldr_config.backend_alt = buildbucket_pb.BuilderConfig.Backend(
   485              target = backend_alt.props.target,
   486              config_json = backend_alt.props.config,
   487          )
   488  
   489      swarming_host = node.props.swarming_host
   490      if node.props.backend == None and node.props.backend_alt == None and not swarming_host:
   491          if not def_swarming_host:
   492              def_swarming_host = get_service("swarming", "defining builders").host
   493          swarming_host = def_swarming_host
   494      if swarming_host:
   495          bldr_config.swarming_host = swarming_host
   496          bldr_config.swarming_tags = node.props.swarming_tags
   497  
   498      if node.props.shadow_service_account or node.props.shadow_pool or node.props.shadow_properties or node.props.shadow_dimensions:
   499          bldr_config.shadow_builder_adjustments = buildbucket_pb.BuilderConfig.ShadowBuilderAdjustments(
   500              service_account = node.props.shadow_service_account,
   501              pool = node.props.shadow_pool,
   502              properties = to_json(node.props.shadow_properties) if node.props.shadow_properties else None,
   503              dimensions = _buildbucket_dimensions(node.props.shadow_dimensions, allow_none = True),
   504          )
   505      return bldr_config, def_swarming_host
   506  
   507  def _buildbucket_builders(bucket):
   508      """luci.bucket(...) node => buildbucket_pb.Swarming or None."""
   509      def_swarming_host = None
   510      builders = []
   511      for node in graph.children(bucket.key, kinds.BUILDER):
   512          bldr_config, def_swarming_host = _buildbucket_builder(node, def_swarming_host)
   513          builders.append(bldr_config)
   514      return buildbucket_pb.Swarming(builders = builders) if builders else None
   515  
   516  def _handle_executable(node):
   517      """Handle a builder node's executable node.
   518  
   519      Builder node =>
   520        buildbucket_pb.BuilderConfig.Recipe | common_pb.Executable,
   521        buildbucket_pb.BuilderConfig.Properties, buildbucket_pb.BuilderConfig.Experiments
   522  
   523      This function produces either a Recipe or Executable definition depending on
   524      whether executable.props.recipe was set. luci.recipe(...) will always set
   525      executable.props.recipe.
   526  
   527      If we're handling a recipe, set properties_j in the Recipe definition.
   528      If we're handling a normal executable, return Properties to be assigned to
   529      Builder.Properties.
   530  
   531      When we are ready to move config output entirely from recipes to their
   532      exe equivalents, we can stop producing Recipe definitions here.
   533      """
   534      if node.key.kind != kinds.BUILDER and node.key.kind != kinds.DYNAMIC_BUILDER_TEMPLATE:
   535          fail("impossible: can only handle executable for a builder or dynamic_builder_template")
   536  
   537      executables = graph.children(node.key, kinds.EXECUTABLE)
   538      if len(executables) != 1:
   539          if node.key.kind == kinds.BUILDER:
   540              fail("impossible: the builder should have a reference to an executable")
   541          else:
   542              # a dynamic_builder_template without executable is allowed.
   543              properties = to_json(node.props.properties) if node.props.properties else None
   544              return None, None, properties, {}
   545      experiments = {}
   546      executable = executables[0]
   547      if not executable.props.cmd and executable.props.recipe:
   548          # old kitchen way
   549          recipe_def = buildbucket_pb.BuilderConfig.Recipe(
   550              name = executable.props.recipe,
   551              cipd_package = executable.props.cipd_package,
   552              cipd_version = executable.props.cipd_version,
   553              properties_j = sorted([
   554                  "%s:%s" % (k, to_json(v))
   555                  for k, v in node.props.properties.items()
   556              ]),
   557          )
   558          executable_def = None
   559          properties = None
   560      else:
   561          executable_def = common_pb.Executable(
   562              cipd_package = executable.props.cipd_package,
   563              cipd_version = executable.props.cipd_version,
   564              cmd = executable.props.cmd,
   565          )
   566          if executable.props.wrapper:
   567              executable_def.wrapper = executable.props.wrapper
   568          recipe_def = None
   569          props_dict = node.props.properties
   570          if executable.props.recipe:
   571              props_dict = dict(props_dict)
   572              props_dict["recipe"] = executable.props.recipe
   573          properties = to_json(props_dict)
   574          if executable.props.recipes_py3:
   575              experiments["luci.recipes.use_python3"] = 100
   576      return executable_def, recipe_def, properties, experiments
   577  
   578  def _buildbucket_caches(caches):
   579      """[swarming.cache] => [buildbucket_pb.BuilderConfig.CacheEntry]."""
   580      out = []
   581      for c in caches:
   582          out.append(buildbucket_pb.BuilderConfig.CacheEntry(
   583              name = c.name,
   584              path = c.path,
   585              wait_for_warm_cache_secs = optional_sec(c.wait_for_warm_cache),
   586          ))
   587      return sorted(out, key = lambda x: x.name)
   588  
   589  def _buildbucket_dimensions(dims, allow_none = False):
   590      """{str: [swarming.dimension]} => [str] for 'dimensions' field."""
   591      out = []
   592      for key in sorted(dims):
   593          if allow_none and dims[key] == None:
   594              out.append("%s:" % key)
   595              continue
   596          for d in dims[key]:
   597              if d.expiration == None:
   598                  out.append("%s:%s" % (key, d.value))
   599              else:
   600                  out.append("%d:%s:%s" % (d.expiration // time.second, key, d.value))
   601      return out
   602  
   603  def _buildbucket_trinary(val):
   604      """Bool|None => common_pb.Trinary."""
   605      if val == None:
   606          return common_pb.UNSET
   607      return common_pb.YES if val else common_pb.NO
   608  
   609  def _buildbucket_toggle(val):
   610      """Bool|None => buildbucket_pb.Toggle."""
   611      if val == None:
   612          return buildbucket_pb.UNSET
   613      return buildbucket_pb.YES if val else buildbucket_pb.NO
   614  
   615  def _buildbucket_shadow(bucket):
   616      """luci.bucket(...) node => buildbucket_pb.Shadow or None."""
   617      shadow = graph.node(keys.shadow_of(bucket.key))
   618      if shadow:
   619          return shadow.props.shadow
   620      return None
   621  
   622  def _buildbucket_constraints(bucket):
   623      """luci.bucket(...) node => buildbucket_pb.Bucket.Constraints or None."""
   624      pools = set()
   625      service_accounts = set()
   626      for node in graph.children(bucket.key, kinds.BUCKET_CONSTRAINTS):
   627          pools |= set(node.props.pools)
   628          service_accounts |= set(node.props.service_accounts)
   629      pools = sorted(pools)
   630      service_accounts = sorted(service_accounts)
   631      if len(pools) == 0 and len(service_accounts) == 0:
   632          return None
   633      return buildbucket_pb.Bucket.Constraints(pools = pools, service_accounts = service_accounts)
   634  
   635  def _gen_shadow_service_account_bindings(realms_cfg, shadow_bucket_constraints):
   636      """Mutates realms.cfg by adding `role/buildbucket.builderServiceAccount` bindings.
   637  
   638      This function is to add builders' shadow_service_accounts to their shadow
   639      buckets as builder service accounts.
   640  
   641      Args:
   642        realms_cfg: realms_pb.RealmsCfg to mutate.
   643        shadow_bucket_constraints: a dict of bucket name to constraints
   644      """
   645      for bucket, constraints in shadow_bucket_constraints.items():
   646          principals = sorted(set(["user:" + account for account in constraints.service_accounts]))
   647          if len(principals) == 0:
   648              continue
   649          realms.append_binding_pb(realms_cfg, keys.realm(bucket).id, realms_pb.Binding(
   650              role = "role/buildbucket.builderServiceAccount",
   651              principals = principals,
   652          ))
   653  
   654  ################################################################################
   655  ## scheduler.cfg.
   656  
   657  # acl.role => scheduler_pb.Acl.Role.
   658  _scheduler_roles = {
   659      acl.SCHEDULER_READER: scheduler_pb.Acl.READER,
   660      acl.SCHEDULER_TRIGGERER: scheduler_pb.Acl.TRIGGERER,
   661      acl.SCHEDULER_OWNER: scheduler_pb.Acl.OWNER,
   662  }
   663  
   664  # Enables generation of shorter BuildbucketTask protos and conditional bindings.
   665  _scheduler_use_bb_v2 = experiments.register("crbug.com/1182002")
   666  
   667  def gen_scheduler_cfg(ctx):
   668      """Generates scheduler.cfg.
   669  
   670      Args:
   671        ctx: the generator context.
   672      """
   673      buckets = get_buckets()
   674      if not buckets:
   675          return
   676  
   677      # Discover who triggers who, validate there are no ambiguities in 'triggers'
   678      # and 'triggered_by' relations (triggerer.targets reports them as errors).
   679  
   680      pollers = {}  # GITILES_POLLER node -> list of BUILDER nodes it triggers.
   681      builders = {}  # BUILDER node -> list GITILES_POLLER|BUILDER that trigger it (if any).
   682  
   683      def want_scheduler_job_for(builder):
   684          if builder not in builders:
   685              builders[builder] = []
   686  
   687      def add_triggered_by(builder, triggered_by):
   688          want_scheduler_job_for(builder)
   689          builders[builder].append(triggered_by)
   690  
   691      for bucket in buckets:
   692          for poller in graph.children(bucket.key, kinds.GITILES_POLLER):
   693              # Note: the list of targets may be empty. We still want to define a
   694              # poller, so add the entry to the dict regardless. This may be
   695              # useful to confirm a poller works before making it trigger
   696              # anything.
   697              pollers[poller] = triggerer.targets(poller)
   698              for target in pollers[poller]:
   699                  add_triggered_by(target, triggered_by = poller)
   700  
   701          for builder in graph.children(bucket.key, kinds.BUILDER):
   702              targets = triggerer.targets(builder)
   703              if targets and not builder.props.service_account:
   704                  error(
   705                      "%s needs service_account set, it triggers other builders: %s" %
   706                      (builder, ", ".join([str(t) for t in targets])),
   707                      trace = builder.trace,
   708                  )
   709              else:
   710                  for target in targets:
   711                      add_triggered_by(target, triggered_by = builder)
   712  
   713              # If using a cron schedule or a custom triggering policy, need to
   714              # setup a Job entity for this builder.
   715              if builder.props.schedule or builder.props.triggering_policy:
   716                  want_scheduler_job_for(builder)
   717  
   718      # List of BUILDER and GITILES_POLLER nodes we need an entity in the
   719      # scheduler config for.
   720      entities = pollers.keys() + builders.keys()
   721  
   722      # The scheduler service is not used at all? Don't require its hostname then.
   723      if not entities:
   724          return
   725  
   726      scheduler = get_service("scheduler", "using triggering or pollers")
   727      buildbucket = get_service("buildbucket", "using triggering")
   728      project = get_project()
   729  
   730      cfg = scheduler_pb.ProjectConfig()
   731      set_config(ctx, scheduler.cfg_file, cfg)
   732  
   733      if _legacy_acls():
   734          # Generate per-bucket ACL sets based on bucket-level permissions. Skip
   735          # buckets that aren't used to avoid polluting configs with unnecessary
   736          # entries.
   737          for bucket in get_buckets_of(entities):
   738              cfg.acl_sets.append(scheduler_pb.AclSet(
   739                  name = bucket.props.name,
   740                  acls = _scheduler_acls(get_bucket_acls(bucket)),
   741              ))
   742  
   743      # We prefer to use bucket-less names in the scheduler configs, so that IDs
   744      # that show up in logs and in the debug UI match what is used in the
   745      # starlark config. On conflicts, append the bucket name as suffix to
   746      # disambiguate.
   747      #
   748      # TODO(vadimsh): Revisit this if LUCI Scheduler starts supporting buckets
   749      # directly. Right now each project has a single flat namespace of job IDs
   750      # and all existing configs use 'scheduler job name == builder name'.
   751      # Artificially injecting bucket names into all job IDs will complicate the
   752      # migration to lucicfg by obscuring diffs between existing configs and new
   753      # generated configs.
   754      node_to_id = _scheduler_disambiguate_ids(entities)
   755  
   756      # Add Trigger entry for each gitiles poller. Sort according to final IDs.
   757      for poller, targets in pollers.items():
   758          cfg.trigger.append(scheduler_pb.Trigger(
   759              id = node_to_id[poller],
   760              realm = poller.props.realm,
   761              acl_sets = [poller.props.bucket] if _legacy_acls() else [],
   762              triggers = [node_to_id[b] for b in targets],
   763              schedule = poller.props.schedule,
   764              gitiles = scheduler_pb.GitilesTask(
   765                  repo = poller.props.repo,
   766                  refs = ["regexp:" + r for r in poller.props.refs],
   767                  path_regexps = poller.props.path_regexps,
   768                  path_regexps_exclude = poller.props.path_regexps_exclude,
   769              ),
   770          ))
   771      cfg.trigger = sorted(cfg.trigger, key = lambda x: x.id)
   772  
   773      # Add Job entry for each triggered or cron builder. Grant all triggering
   774      # builders (if any) TRIGGERER role. Sort according to final IDs.
   775      for builder, triggered_by in builders.items():
   776          cfg.job.append(scheduler_pb.Job(
   777              id = node_to_id[builder],
   778              realm = builder.props.realm,
   779              acl_sets = [builder.props.bucket] if _legacy_acls() else [],
   780              acls = _scheduler_acls(aclimpl.normalize_acls([
   781                  acl.entry(acl.SCHEDULER_TRIGGERER, users = t.props.service_account)
   782                  for t in triggered_by
   783                  if t.key.kind == kinds.BUILDER
   784              ])),
   785              schedule = builder.props.schedule,
   786              triggering_policy = builder.props.triggering_policy,
   787              buildbucket = _scheduler_task(builder, buildbucket, project.props.name),
   788          ))
   789      cfg.job = sorted(cfg.job, key = lambda x: x.id)
   790  
   791      # Add conditional "role/scheduler.triggerer" bindings that allow builders to
   792      # trigger jobs.
   793      if _scheduler_realms_configs():
   794          _gen_scheduler_bindings(
   795              ctx.output[output_path(realms_cfg(project))],
   796              builders,
   797              node_to_id,
   798          )
   799  
   800  def _scheduler_realms_configs():
   801      """True to generate realms-only Scheduler configs."""
   802      return _scheduler_use_bb_v2.is_enabled() or not _legacy_acls()
   803  
   804  def _scheduler_disambiguate_ids(nodes):
   805      """[graph.node] => dict{node: name to use for it in scheduler.cfg}."""
   806  
   807      # Build dict: name -> [nodes that have it].
   808      claims = {}
   809      for n in nodes:
   810          nm = n.props.name
   811          if nm not in claims:
   812              claims[nm] = []
   813          claims[nm].append(n)
   814  
   815      names = {}  # node -> name
   816      used = {}  # name -> node, reverse of 'names'
   817  
   818      # use_name deals with the second layer of ambiguities: when our generated
   819      # '<name>-<bucket>' name happened to clash with some other '<name>'. This
   820      # should be super rare. Lack of 'while' in starlark makes it difficult to
   821      # handle such collisions automatically, so we just give up and ask the user
   822      # to pick some other names.
   823      def use_name(name, node):
   824          if name in used:
   825              fail(
   826                  (
   827                      "%s and %s cause ambiguities in the scheduler config file, " +
   828                      "pick names that don't start with a bucket name"
   829                  ) % (node, used[name]),
   830                  trace = node.trace,
   831              )
   832          names[node] = name
   833          used[name] = node
   834  
   835      for nm, candidates in claims.items():
   836          if len(candidates) == 1:
   837              use_name(nm, candidates[0])
   838          else:
   839              for node in candidates:
   840                  use_name("%s-%s" % (node.props.bucket, nm), node)
   841  
   842      return names
   843  
   844  def _gen_scheduler_bindings(realms_cfg, builders, node_to_id):
   845      """Mutates realms.cfg by adding `role/scheduler.triggerer` bindings.
   846  
   847      Args:
   848        realms_cfg: realms_pb.RealmsCfg to mutate.
   849        builders: BUILDER node -> list GITILES_POLLER|BUILDER that trigger it.
   850        node_to_id: dict{BUILDER node: name to use for it in scheduler.cfg}.
   851      """
   852  
   853      # (target realm, triggering service account) => [triggered job ID].
   854      per_realm_per_account = {}
   855      for builder, triggered_by in builders.items():
   856          job_id = node_to_id[builder]
   857          job_realm = builder.props.realm
   858          for t in triggered_by:
   859              if t.key.kind == kinds.BUILDER and t.props.service_account:
   860                  key = (job_realm, t.props.service_account)
   861                  per_realm_per_account.setdefault(key, []).append(job_id)
   862  
   863      # Append corresponding bindings to realms.cfg.
   864      for realm, account in sorted(per_realm_per_account):
   865          jobs = sorted(set(per_realm_per_account[(realm, account)]))
   866          realms.append_binding_pb(realms_cfg, realm, realms_pb.Binding(
   867              role = "role/scheduler.triggerer",
   868              principals = ["user:" + account],
   869              conditions = [
   870                  realms_pb.Condition(
   871                      restrict = realms_pb.Condition.AttributeRestriction(
   872                          attribute = "scheduler.job.name",
   873                          values = jobs,
   874                      ),
   875                  ),
   876              ],
   877          ))
   878  
   879  def _scheduler_acls(elementary):
   880      """[acl.elementary] => filtered [scheduler_pb.Acl]."""
   881      if not _legacy_acls():
   882          return []
   883      return [
   884          scheduler_pb.Acl(
   885              role = _scheduler_roles[a.role],
   886              granted_to = _scheduler_identity(a),
   887          )
   888          for a in filter_acls(elementary, _scheduler_roles.keys())
   889      ]
   890  
   891  def _scheduler_identity(a):
   892      """acl.elementary => identity string for scheduler_pb.Acl."""
   893      if a.user:
   894          return a.user
   895      if a.group:
   896          return "group:" + a.group
   897      if a.project:
   898          return "project:" + a.project
   899      fail("impossible")
   900  
   901  def _scheduler_task(builder, buildbucket, project_name):
   902      """Produces scheduler_pb.BuildbucketTask for a scheduler job."""
   903      if not _scheduler_realms_configs():
   904          bucket = legacy_bucket_name(builder.props.bucket, project_name)
   905      else:
   906          bucket = builder.props.bucket
   907      return scheduler_pb.BuildbucketTask(
   908          server = buildbucket.host,
   909          bucket = bucket,
   910          builder = builder.props.name,
   911      )
   912  
   913  ################################################################################
   914  ## milo.cfg.
   915  
   916  def gen_milo_cfg(ctx):
   917      """Generates milo.cfg.
   918  
   919      Args:
   920        ctx: the generator context.
   921      """
   922      _milo_check_connections()
   923  
   924      # Note: luci.milo(...) node is optional.
   925      milo_node = graph.node(keys.milo())
   926      opts = struct(
   927          logo = milo_node.props.logo if milo_node else None,
   928          favicon = milo_node.props.favicon if milo_node else None,
   929          bug_url_template = milo_node.props.bug_url_template if milo_node else None,
   930      )
   931  
   932      # Keep the order of views as they were defined, for Milo's list of consoles.
   933      views = graph.children(keys.project(), kinds.MILO_VIEW, order_by = graph.DEFINITION_ORDER)
   934      if not views and not milo_node:
   935          return
   936  
   937      milo = get_service("milo", "using views or setting Milo config")
   938      project_name = get_project().props.name
   939  
   940      set_config(ctx, milo.cfg_file, milo_pb.Project(
   941          bug_url_template = opts.bug_url_template,
   942          logo_url = opts.logo,
   943          consoles = [
   944              _milo_console_pb(view, opts, project_name)
   945              for view in views
   946          ],
   947      ))
   948  
   949  def _milo_check_connections():
   950      """Ensures all *_view_entry are connected to one and only one *_view."""
   951      root = keys.milo_entries_root()
   952      for e in graph.children(root):
   953          views = [p for p in graph.parents(e.key) if p.key != root]
   954          if len(views) == 0:
   955              error("%s is not added to any view, either remove or comment it out" % e, trace = e.trace)
   956          elif len(views) > 1:
   957              error(
   958                  "%s is added to multiple views: %s" %
   959                  (e, ", ".join([str(v) for v in views])),
   960                  trace = e.trace,
   961              )
   962  
   963  def _milo_console_pb(view, opts, project_name):
   964      """Given MILO_VIEW node returns milo_pb.Console."""
   965      ch = graph.children(view.key)
   966      if len(ch) != 1:
   967          fail("impossible: %s" % (ch,))
   968      view = ch[0]
   969      if view.key.kind == kinds.LIST_VIEW:
   970          return _milo_list_view(view, opts, project_name)
   971      if view.key.kind == kinds.CONSOLE_VIEW:
   972          return _milo_console_view(view, opts, project_name)
   973      if view.key.kind == kinds.EXTERNAL_CONSOLE_VIEW:
   974          return _milo_external_console_view(view)
   975      fail("impossible: %s" % (view,))
   976  
   977  def _milo_external_console_view(view):
   978      """Given an EXTERNAL_CONSOLE_VIEW node returns milo_pb.Console."""
   979      return milo_pb.Console(
   980          id = view.props.name,
   981          name = view.props.title,
   982          external_project = view.props.external_project,
   983          external_id = view.props.external_id,
   984      )
   985  
   986  def _milo_list_view(view, opts, project_name):
   987      """Given a LIST_VIEW node produces milo_pb.Console."""
   988      builders = []
   989      seen = {}
   990      for e in graph.children(view.key, order_by = graph.DEFINITION_ORDER):
   991          pb = _milo_builder_pb(e, view, project_name, seen)
   992          if pb:
   993              builders.append(pb)
   994      return milo_pb.Console(
   995          id = view.props.name,
   996          name = view.props.title,
   997          favicon_url = view.props.favicon or opts.favicon,
   998          builder_view_only = True,
   999          builders = builders,
  1000      )
  1001  
  1002  def _milo_console_view(view, opts, project_name):
  1003      """Given a CONSOLE_VIEW node produces milo_pb.Console."""
  1004      builders = []
  1005      seen = {}
  1006      for e in graph.children(view.key, order_by = graph.DEFINITION_ORDER):
  1007          pb = _milo_builder_pb(e, view, project_name, seen)
  1008          if pb:
  1009              pb.short_name = e.props.short_name
  1010              pb.category = e.props.category
  1011              builders.append(pb)
  1012  
  1013      return milo_pb.Console(
  1014          id = view.props.name,
  1015          name = view.props.title,
  1016          header = view.props.header,
  1017          repo_url = view.props.repo,
  1018          refs = ["regexp:" + r for r in view.props.refs],
  1019          exclude_ref = view.props.exclude_ref,
  1020  
  1021          # TODO(hinoka,iannucci): crbug/832893 - Support custom manifest names,
  1022          # such as 'UNPATCHED' / 'PATCHED'.
  1023          manifest_name = "REVISION",
  1024          include_experimental_builds = view.props.include_experimental_builds,
  1025          favicon_url = view.props.favicon or opts.favicon,
  1026          builders = builders,
  1027          default_commit_limit = view.props.default_commit_limit,
  1028          default_expand = view.props.default_expand,
  1029      )
  1030  
  1031  def _milo_builder_pb(entry, view, project_name, seen):
  1032      """Returns milo_pb.Builder given *_VIEW_ENTRY node.
  1033  
  1034      Args:
  1035        entry: a *_VIEW_ENTRY node.
  1036        view: a parent *_VIEW node (for error messages).
  1037        project_name: LUCI project name, to expand short bucket names into
  1038          luci.<project>.<bucket> ones.
  1039        seen: a dict {BUILDER key -> *_VIEW_ENTRY node that added it} with
  1040          already added builders, to detect dups. Mutated.
  1041  
  1042      Returns:
  1043        milo_pb.Builder or None on errors.
  1044      """
  1045      builder_pb = milo_pb.Builder()
  1046  
  1047      # Note: this is a one-item list for regular entries per *_view_entry
  1048      # implementation.
  1049      refs = graph.children(entry.key, kinds.BUILDER_REF)
  1050      if len(refs) != 1:
  1051          fail("impossible result %s" % (refs,))
  1052  
  1053      # Grab BUILDER node and ensure it hasn't be added to this view yet.
  1054      builder = builder_ref.follow(refs[0], context_node = entry)
  1055      if builder.key in seen:
  1056          error(
  1057              "builder %s was already added to %s, previous declaration:\n%s" %
  1058              (builder, view, seen[builder.key].trace),
  1059              trace = entry.trace,
  1060          )
  1061          return None
  1062      seen[builder.key] = entry
  1063  
  1064      builder_pb.name = "buildbucket/%s/%s" % (
  1065          legacy_bucket_name(
  1066              builder.props.bucket,
  1067              builder.props.project or project_name,
  1068          ),
  1069          builder.props.name,
  1070      )
  1071      return builder_pb
  1072  
  1073  ################################################################################
  1074  ## commit-queue.cfg.
  1075  
  1076  def gen_cq_cfg(ctx):
  1077      """Generates commit-queue.cfg.
  1078  
  1079      Args:
  1080        ctx: the generator context.
  1081      """
  1082      _cq_check_connections()
  1083  
  1084      cq_groups = graph.children(keys.project(), kind = kinds.CQ_GROUP)
  1085      if not cq_groups:
  1086          return
  1087  
  1088      # Note: commit-queue.cfg without any ConfigGroup is forbidden by CQ, but we
  1089      # still allow to specify luci.cq(...) in this case (it is just ignored).
  1090      cq_node = graph.node(keys.cq())
  1091      cfg = cq_pb.Config(
  1092          cq_status_host = cq_node.props.status_host if cq_node else None,
  1093          draining_start_time = cq_node.props.draining_start_time if cq_node else None,
  1094      )
  1095      set_config(ctx, "commit-queue.cfg", cfg)
  1096  
  1097      if cq_node and cq_node.props.submit_max_burst:
  1098          cfg.submit_options = cq_pb.SubmitOptions(
  1099              max_burst = cq_node.props.submit_max_burst,
  1100              burst_delay = duration_pb.Duration(
  1101                  seconds = cq_node.props.submit_burst_delay // time.second,
  1102              ),
  1103          )
  1104  
  1105      # Each luci.cq_group(...) results in a separate cq_pb.ConfigGroup.
  1106      cfg.config_groups = [
  1107          _cq_config_group(g, get_project())
  1108          for g in cq_groups
  1109      ]
  1110  
  1111  def _cq_check_connections():
  1112      """Ensures all cq_tryjob_verifier are connected to one and only one group."""
  1113      for v in graph.children(keys.cq_verifiers_root()):
  1114          groups = graph.parents(v.key, kind = kinds.CQ_GROUP)
  1115          if len(groups) == 0:
  1116              error("%s is not added to any cq_group, either remove or comment it out" % v, trace = v.trace)
  1117          elif len(groups) > 1:
  1118              error(
  1119                  "%s is added to multiple cq_groups: %s" %
  1120                  (v, ", ".join([str(g) for g in groups])),
  1121                  trace = v.trace,
  1122              )
  1123  
  1124  def _cq_config_group(cq_group, project):
  1125      """Given a cq_group node returns cq_pb.ConfigGroup."""
  1126      acls = aclimpl.normalize_acls(cq_group.props.acls + project.props.acls)
  1127      gerrit_cq_ability = cq_pb.Verifiers.GerritCQAbility(
  1128          committer_list = [a.group for a in filter_acls(acls, [acl.CQ_COMMITTER])],
  1129          dry_run_access_list = [a.group for a in filter_acls(acls, [acl.CQ_DRY_RUNNER])],
  1130          new_patchset_run_access_list = [a.group for a in filter_acls(acls, [acl.CQ_NEW_PATCHSET_RUN_TRIGGERER])],
  1131          allow_submit_with_open_deps = cq_group.props.allow_submit_with_open_deps,
  1132          allow_owner_if_submittable = cq_group.props.allow_owner_if_submittable,
  1133          trust_dry_runner_deps = cq_group.props.trust_dry_runner_deps,
  1134          allow_non_owner_dry_runner = cq_group.props.allow_non_owner_dry_runner,
  1135      )
  1136      if not gerrit_cq_ability.committer_list:
  1137          error("at least one CQ_COMMITTER acl.entry must be specified (either here or in luci.project)", trace = cq_group.trace)
  1138  
  1139      tree_status = None
  1140      if cq_group.props.tree_status_host:
  1141          tree_status = cq_pb.Verifiers.TreeStatus(url = "https://" + cq_group.props.tree_status_host)
  1142  
  1143      # Note: CQ_TRYJOB_VERIFIER nodes are by default lexicographically sorted
  1144      # according to auto-generated unique keys (that do not show up in the
  1145      # output). We prefer to sort by user-visible 'name' instead.
  1146      seen = {}  # _cq_builder_name(...) -> verifier node that added it
  1147      tryjob = cq_pb.Verifiers.Tryjob(
  1148          retry_config = _cq_retry_config(cq_group.props.retry_config),
  1149          builders = sorted([
  1150              _cq_tryjob_builder(v, cq_group, project, seen)
  1151              for v in graph.children(cq_group.key, kind = kinds.CQ_TRYJOB_VERIFIER)
  1152          ], key = lambda b: b.name),
  1153      )
  1154  
  1155      group_by_gob_host = {}
  1156      for w in cq_group.props.watch:
  1157          if w.__kind != "gob":
  1158              fail("only Gerrit repos are supported")
  1159          group_by_gob_host.setdefault(w.__gob_host, []).append(w)
  1160  
  1161      user_limits = [_cq_user_limit(q) for q in cq_group.props.user_limits]
  1162      user_limit_default = cq_group.props.user_limit_default
  1163      if user_limit_default != None:
  1164          user_limit_default = _cq_user_limit(user_limit_default)
  1165      post_actions = cq_group.props.post_actions
  1166      if post_actions != None:
  1167          post_actions = [_cq_post_action(pa) for pa in post_actions]
  1168      tryjob_experiments = cq_group.props.tryjob_experiments
  1169      if tryjob_experiments != None:
  1170          tryjob_experiments = [
  1171              _cq_tryjob_experiment(te)
  1172              for te in tryjob_experiments
  1173          ]
  1174  
  1175      return cq_pb.ConfigGroup(
  1176          name = cq_group.key.id,
  1177          gerrit = [
  1178              cq_pb.ConfigGroup.Gerrit(
  1179                  url = "https://%s-review.googlesource.com" % host,
  1180                  projects = [
  1181                      cq_pb.ConfigGroup.Gerrit.Project(
  1182                          name = w.__gob_proj,
  1183                          ref_regexp = w.__refs,
  1184                          ref_regexp_exclude = w.__refs_exclude,
  1185                      )
  1186                      for w in watches
  1187                  ],
  1188              )
  1189              for host, watches in group_by_gob_host.items()
  1190          ],
  1191          verifiers = cq_pb.Verifiers(
  1192              gerrit_cq_ability = gerrit_cq_ability,
  1193              tree_status = tree_status,
  1194              tryjob = tryjob if tryjob.builders else None,
  1195          ),
  1196          additional_modes = [
  1197              _cq_run_mode(m)
  1198              for m in cq_group.props.additional_modes
  1199          ] if cq_group.props.additional_modes else None,
  1200          user_limits = user_limits,
  1201          user_limit_default = user_limit_default,
  1202          post_actions = post_actions,
  1203          tryjob_experiments = tryjob_experiments,
  1204      )
  1205  
  1206  def _cq_retry_config(retry_config):
  1207      """cq._retry_config(...) => cq_pb.Verifiers.Tryjob.RetryConfig."""
  1208      if not retry_config:
  1209          return None
  1210      return cq_pb.Verifiers.Tryjob.RetryConfig(
  1211          single_quota = retry_config.single_quota,
  1212          global_quota = retry_config.global_quota,
  1213          failure_weight = retry_config.failure_weight,
  1214          transient_failure_weight = retry_config.transient_failure_weight,
  1215          timeout_weight = retry_config.timeout_weight,
  1216      )
  1217  
  1218  def _cq_post_action(post_action):
  1219      """Converts a post action to cq_pb.ConfigGroup.PostAction."""
  1220      ret = cq_pb.ConfigGroup.PostAction(
  1221          name = post_action.name,
  1222          conditions = [
  1223              cq_pb.ConfigGroup.PostAction.TriggeringCondition(
  1224                  mode = cond.mode,
  1225                  statuses = cond.statuses,
  1226              )
  1227              for cond in post_action.conditions
  1228          ],
  1229      )
  1230      if post_action.vote_gerrit_labels != None:
  1231          ret.vote_gerrit_labels = cq_pb.ConfigGroup.PostAction.VoteGerritLabels(
  1232              votes = [
  1233                  cq_pb.ConfigGroup.PostAction.VoteGerritLabels.Vote(
  1234                      name = k,
  1235                      value = v,
  1236                  )
  1237                  for k, v in post_action.vote_gerrit_labels.items()
  1238              ],
  1239          )
  1240  
  1241      return ret
  1242  
  1243  def _cq_tryjob_experiment(experiment):
  1244      """Converts a tryjob experiment to cq_pb.ConfigGroup.TryjobExperiment."""
  1245      ret = cq_pb.ConfigGroup.TryjobExperiment(
  1246          name = experiment.name,
  1247      )
  1248      if experiment.owner_group_allowlist:
  1249          ret.condition = cq_pb.ConfigGroup.TryjobExperiment.Condition(
  1250              owner_group_allowlist = experiment.owner_group_allowlist,
  1251          )
  1252      return ret
  1253  
  1254  def _cq_run_mode(run_mode):
  1255      """cq._run_mode(...) => cq_pb.Mode."""
  1256      if not run_mode:
  1257          return None
  1258      return cq_pb.Mode(
  1259          name = run_mode.name,
  1260          cq_label_value = run_mode.cq_label_value,
  1261          triggering_label = run_mode.triggering_label,
  1262          triggering_value = run_mode.triggering_value,
  1263      )
  1264  
  1265  def _cq_tryjob_builder(verifier, cq_group, project, seen):
  1266      """cq_tryjob_verifier(...) => cq_pb.Verifiers.Tryjob.Builder.
  1267  
  1268      Args:
  1269        verifier: luci.cq_tryjob_verifier node.
  1270        cq_group: luci.cq_group node (for error messages).
  1271        project: luci.project node (for project name).
  1272        seen: map[builder name as in *.cfg => cq.cq_tryjob_verifier that added it].
  1273      """
  1274      builder = _cq_builder_from_node(verifier)
  1275      name = _cq_builder_name(builder, project)
  1276  
  1277      # Make sure there are no dups.
  1278      if name in seen:
  1279          error(
  1280              "verifier that references %s was already added to %s, previous declaration:\n%s" %
  1281              (builder, cq_group, seen[name].trace),
  1282              trace = verifier.trace,
  1283          )
  1284          return None
  1285      seen[name] = verifier
  1286  
  1287      return cq_pb.Verifiers.Tryjob.Builder(
  1288          name = name,
  1289          result_visibility = _cq_visibility(verifier.props.result_visibility),
  1290          includable_only = verifier.props.includable_only,
  1291          cancel_stale = _cq_toggle(verifier.props.cancel_stale),
  1292          disable_reuse = verifier.props.disable_reuse,
  1293          experiment_percentage = verifier.props.experiment_percentage,
  1294          owner_whitelist_group = verifier.props.owner_whitelist,
  1295          location_filters = [_cq_location_filter(n) for n in verifier.props.location_filters],
  1296          equivalent_to = _cq_equivalent_to(verifier, project),
  1297          mode_allowlist = verifier.props.mode_allowlist,
  1298      )
  1299  
  1300  def _cq_builder_from_node(node):
  1301      """Given a CQ node returns corresponding 'builder' node by following refs.
  1302  
  1303      Args:
  1304        node: either 'cq_tryjob_verifier' or 'cq_equivalent_builder' node.
  1305  
  1306      Returns:
  1307        Corresponding 'builder' node.
  1308      """
  1309  
  1310      # Per cq_tryjob_verifier implementation, the node MUST have single
  1311      # builder_ref child, which we resolve to a concrete luci.builder(...) node.
  1312      refs = graph.children(node.key, kinds.BUILDER_REF)
  1313      if len(refs) != 1:
  1314          fail("impossible result %s" % (refs,))
  1315      return builder_ref.follow(refs[0], context_node = node)
  1316  
  1317  def _cq_equivalent_to(verifier, project):
  1318      """cq_tryjob_verifier(...) => cq_pb.Verifiers.Tryjob.EquivalentBuilder | None.
  1319  
  1320      Args:
  1321        verifier: 'cq_tryjob_verifier' node.
  1322        project: 'project' node.
  1323      """
  1324      nodes = graph.children(verifier.key, kind = kinds.CQ_EQUIVALENT_BUILDER)
  1325      if len(nodes) == 0:
  1326          return None
  1327      if len(nodes) > 1:
  1328          fail("impossible result %s" % (nodes,))
  1329      equiv_builder = nodes[0]
  1330      return cq_pb.Verifiers.Tryjob.EquivalentBuilder(
  1331          name = _cq_builder_name(_cq_builder_from_node(equiv_builder), project),
  1332          percentage = equiv_builder.props.percentage,
  1333          owner_whitelist_group = equiv_builder.props.whitelist,
  1334      )
  1335  
  1336  def _cq_location_filter(node):
  1337      """cq.location_filter(...) => cq_pb.Verifiers.Tryjob.Builder.LocationFilter"""
  1338      return cq_pb.Verifiers.Tryjob.Builder.LocationFilter(
  1339          gerrit_host_regexp = node.gerrit_host_regexp or ".*",
  1340          gerrit_project_regexp = node.gerrit_project_regexp or ".*",
  1341          path_regexp = node.path_regexp or ".*",
  1342          exclude = node.exclude,
  1343      )
  1344  
  1345  def _cq_builder_name(builder, project):
  1346      """Given Builder node returns a string reference to it for CQ config."""
  1347      return "%s/%s/%s" % (
  1348          builder.props.project or project.props.name,
  1349          builder.props.bucket,
  1350          builder.props.name,
  1351      )
  1352  
  1353  def _cq_toggle(val):
  1354      """Bool|None => cq_pb.Toggle."""
  1355      if val == None:
  1356          return cq_pb.UNSET
  1357      return cq_pb.YES if val else cq_pb.NO
  1358  
  1359  def _cq_visibility(val):
  1360      """Visibility|None => cq_pb.Visibility."""
  1361      if val == None:
  1362          return cq_pb.COMMENT_LEVEL_UNSET
  1363      return val
  1364  
  1365  def _cq_user_limit(limit):
  1366      """cq.user_limit(...) => cq_pb.UserLimit."""
  1367      return cq_pb.UserLimit(
  1368          name = limit.name,
  1369          principals = limit.principals,
  1370          run = _cq_user_limit_run(limit.run),
  1371      )
  1372  
  1373  def _cq_user_limit_run(limits):
  1374      """cq.run_limits(...) => cq_pb.UserLimit.Run."""
  1375      return cq_pb.UserLimit.Run(
  1376          max_active = _cq_user_limit_limit(
  1377              limits.max_active if limits != None else None,
  1378          ),
  1379          reach_limit_msg = limits.reach_limit_msg if limits != None else None,
  1380      )
  1381  
  1382  def _cq_user_limit_limit(limit):
  1383      """Int|None => cq_pb.UserLimit.Limit."""
  1384  
  1385      # if the limit is None, return with unlimited = True, so that
  1386      # so that the config output clarifies what limits were set as unlimited.
  1387      if limit == None:
  1388          return cq_pb.UserLimit.Limit(unlimited = True)
  1389      return cq_pb.UserLimit.Limit(value = limit)
  1390  
  1391  ################################################################################
  1392  ## notify.cfg.
  1393  
  1394  def gen_notify_cfg(ctx):
  1395      """Generates notify.cfg.
  1396  
  1397      Args:
  1398        ctx: the generator context.
  1399      """
  1400      opts = graph.node(keys.notify())
  1401      tree_closing_enabled = opts.props.tree_closing_enabled if opts else False
  1402  
  1403      notifiables = graph.children(keys.project(), kinds.NOTIFIABLE)
  1404      templates = graph.children(keys.project(), kinds.NOTIFIER_TEMPLATE)
  1405      if not notifiables and not templates:
  1406          return
  1407  
  1408      service = get_service("notify", "using notifiers or tree closers")
  1409  
  1410      # Write all defined templates.
  1411      for t in templates:
  1412          path = "%s/email-templates/%s.template" % (service.app_id, t.props.name)
  1413          set_config(ctx, path, t.props.body)
  1414  
  1415      # Build the map 'builder node => [notifiable node] watching it'.
  1416      per_builder = {}
  1417      for n in notifiables:
  1418          for ref in graph.children(n.key, kinds.BUILDER_REF):
  1419              builder = builder_ref.follow(ref, context_node = n)
  1420              per_builder.setdefault(builder, []).append(n)
  1421  
  1422      # Calculate the map {builder key => [gitiles_poller that triggers it]}
  1423      # needed for deriving repo URLs associated with builders.
  1424      #
  1425      # TODO(vadimsh): Cache this somewhere. A very similar calculation is done by
  1426      # scheduler.cfg generator. There's currently no reliable mechanism to carry
  1427      # precalculated state between different luci.generator(...) implementations.
  1428      pollers = _notify_pollers_map()
  1429  
  1430      # Emit a single notify_pb.Notifier per builder with all notifications for
  1431      # that particular builder.
  1432      notifiers_pb = []
  1433      for builder, nodes in per_builder.items():
  1434          # 'nodes' here is a list of luci.notifiable nodes. Categorize them
  1435          # either into luci.notifier or luci.tree_closer based on their 'kind'
  1436          # property.
  1437          notifications = [n for n in nodes if n.props.kind == "luci.notifier"]
  1438          tree_closers = [n for n in nodes if n.props.kind == "luci.tree_closer"]
  1439          if len(notifications) + len(tree_closers) != len(nodes):
  1440              fail("impossible")
  1441  
  1442          # Validate luci.notifier(...) has enough information about builders.
  1443          repo = _notify_builder_repo(builder, pollers)
  1444          if any([n.props.notify_blamelist for n in notifications]) and not repo:
  1445              error(
  1446                  ("cannot deduce a primary repo for %s, which is observed by a " +
  1447                   "luci.notifier with notify_blamelist=True; add repo=... field") % builder,
  1448                  trace = builder.trace,
  1449              )
  1450  
  1451          notifiers_pb.append(notify_pb.Notifier(
  1452              notifications = [_notify_notification_pb(n) for n in notifications],
  1453              tree_closers = [_notify_tree_closer_pb(n) for n in tree_closers],
  1454              builders = [notify_pb.Builder(
  1455                  bucket = builder.props.bucket,
  1456                  name = builder.props.name,
  1457                  repository = repo,
  1458              )],
  1459          ))
  1460  
  1461      # Done!
  1462      set_config(ctx, service.cfg_file, notify_pb.ProjectConfig(
  1463          notifiers = sorted(
  1464              notifiers_pb,
  1465              key = lambda n: (n.builders[0].bucket, n.builders[0].name),
  1466          ),
  1467          tree_closing_enabled = tree_closing_enabled,
  1468      ))
  1469  
  1470  def _notify_used_template_name(node):
  1471      """Given a luci.notifiable node returns a name of a template it references."""
  1472      templs = graph.children(node.key, kind = kinds.NOTIFIER_TEMPLATE)
  1473      if len(templs) == 0:
  1474          return None
  1475      if len(templs) == 1:
  1476          return templs[0].props.name
  1477      fail("impossible")
  1478  
  1479  def _notify_notification_pb(node):
  1480      """Given a luci.notifiable node returns notify_pb.Notification."""
  1481      pb = notify_pb.Notification(
  1482          on_occurrence = node.props.on_occurrence,
  1483          on_new_status = node.props.on_new_status,
  1484          failed_step_regexp = node.props.failed_step_regexp,
  1485          failed_step_regexp_exclude = node.props.failed_step_regexp_exclude,
  1486          template = _notify_used_template_name(node),
  1487  
  1488          # deprecated
  1489          on_change = node.props.on_status_change,
  1490          on_failure = node.props.on_failure,
  1491          on_new_failure = node.props.on_new_failure,
  1492          on_success = node.props.on_success,
  1493      )
  1494      if node.props.notify_emails or node.props.notify_rotation_urls:
  1495          pb.email = notify_pb.Notification.Email(
  1496              recipients = node.props.notify_emails,
  1497              rotation_urls = node.props.notify_rotation_urls,
  1498          )
  1499      if node.props.notify_blamelist:
  1500          pb.notify_blamelist = notify_pb.Notification.Blamelist(
  1501              repository_allowlist = node.props.blamelist_repos_whitelist,
  1502          )
  1503      return pb
  1504  
  1505  def _notify_tree_closer_pb(node):
  1506      """Given a luci.notifiable node returns notify_pb.TreeCloser."""
  1507      return notify_pb.TreeCloser(
  1508          tree_status_host = node.props.tree_status_host,
  1509          failed_step_regexp = node.props.failed_step_regexp,
  1510          failed_step_regexp_exclude = node.props.failed_step_regexp_exclude,
  1511          template = _notify_used_template_name(node),
  1512      )
  1513  
  1514  def _notify_pollers_map():
  1515      """Returns a map {builder key => [gitiles poller that triggers it]}."""
  1516      out = {}
  1517      for bucket in get_buckets():
  1518          for poller in graph.children(bucket.key, kinds.GITILES_POLLER):
  1519              for builder in triggerer.targets(poller):
  1520                  out.setdefault(builder.key, []).append(poller)
  1521      return out
  1522  
  1523  def _notify_builder_repo(builder, pollers_map):
  1524      """Given a builder node returns its primary repo URL or None.
  1525  
  1526      Either uses a repo URL explicitly passed via `repo` field in the builder
  1527      definition, or grabs it from a poller that triggers this builder, if there's
  1528      only one such poller (so there's no ambiguity).
  1529      """
  1530      if builder.props.repo:
  1531          return builder.props.repo
  1532      repos = set([t.props.repo for t in pollers_map.get(builder.key, [])])
  1533      if len(repos) == 1:
  1534          return list(repos)[0]
  1535      return None
  1536  
  1537  ################################################################################
  1538  ## tricium.cfg.
  1539  
  1540  def gen_tricium_cfg(ctx):
  1541      """Generates tricium.cfg.
  1542  
  1543      Args:
  1544        ctx: the generator context.
  1545      """
  1546      cq_groups = graph.children(keys.project(), kind = kinds.CQ_GROUP)
  1547      if not cq_groups:
  1548          return
  1549  
  1550      project = get_project()
  1551      result = None
  1552      for cq_group in cq_groups:
  1553          tricium_config = _tricium_config(
  1554              graph.children(cq_group.key, kind = kinds.CQ_TRYJOB_VERIFIER),
  1555              cq_group,
  1556              project,
  1557          )
  1558          if tricium_config == None:
  1559              continue
  1560          elif result == None:
  1561              result = struct(cfg = tricium_config, mapping_cq_group = cq_group)
  1562          elif result.cfg != tricium_config:
  1563              # if multiple config groups have defined Tricium analyzers, they
  1564              # MUST generate the same Tricium project config.
  1565              error(
  1566                  "%s is watching different set of Gerrit repos or defining different analyzers from %s" % (cq_group, result.mapping_cq_group),
  1567                  trace = cq_group.trace,
  1568              )
  1569  
  1570      if result:
  1571          service = get_service("tricium", "defining Tricium project config")
  1572          result.cfg.service_account = "%s@appspot.gserviceaccount.com" % service.app_id
  1573          set_config(ctx, service.cfg_file, result.cfg)
  1574  
  1575  def _tricium_config(verifiers, cq_group, project):
  1576      """Returns tricium_pb.ProjectConfig.
  1577  
  1578      Returns None if none of the provided verifiers is a Tricium analyzer.
  1579  
  1580      Args:
  1581        verifiers: a list of luci.cq_tryjob_verifier nodes.
  1582        cq_group: a luci.cq_group node.
  1583        project: a luci.project node.
  1584      """
  1585      ret = tricium_pb.ProjectConfig()
  1586      whitelisted_group = None
  1587      watching_gerrit_projects = None
  1588      disable_reuse = None
  1589      for verifier in verifiers:
  1590          if cq.MODE_ANALYZER_RUN not in verifier.props.mode_allowlist:
  1591              continue
  1592          recipe = _tricium_recipe(verifier, project)
  1593          func_name = _compute_func_name(recipe)
  1594          gerrit_projs, exts = _parse_location_filters_for_tricium(verifier.props.location_filters)
  1595          if watching_gerrit_projects == None:
  1596              watching_gerrit_projects = gerrit_projs
  1597          elif watching_gerrit_projects != gerrit_projs:
  1598              error(
  1599                  "The location_filters of analyzer %s specifies a different set of Gerrit repos from the other analyzer; got: %s other: %s" % (
  1600                      verifier,
  1601                      ["%s-review.googlesource.com/%s" % (host, proj) for host, proj in gerrit_projs],
  1602                      ["%s-review.googlesource.com/%s" % (host, proj) for host, proj in watching_gerrit_projects],
  1603                  ),
  1604                  trace = verifier.trace,
  1605              )
  1606          verifier_disable_reuse = verifier.props.disable_reuse
  1607          if disable_reuse == None:
  1608              disable_reuse = verifier_disable_reuse
  1609          elif disable_reuse != verifier_disable_reuse:
  1610              error(
  1611                  "The disable_reuse field of analyzer %s does not match the others, got: %s, others: %s" % (
  1612                      verifier,
  1613                      verifier_disable_reuse,
  1614                      disable_reuse,
  1615                  ),
  1616                  trace = verifier.trace,
  1617              )
  1618          ret.functions.append(tricium_pb.Function(
  1619              type = tricium_pb.Function.ANALYZER,
  1620              name = func_name,
  1621              needs = tricium_pb.GIT_FILE_DETAILS,
  1622              provides = tricium_pb.RESULTS,
  1623              path_filters = ["*.%s" % ext for ext in exts],
  1624              impls = [
  1625                  tricium_pb.Impl(
  1626                      provides_for_platform = tricium_pb.LINUX,
  1627                      runtime_platform = tricium_pb.LINUX,
  1628                      recipe = recipe,
  1629                  ),
  1630              ],
  1631          ))
  1632          ret.selections.append(tricium_pb.Selection(
  1633              function = func_name,
  1634              platform = tricium_pb.LINUX,
  1635          ))
  1636          owner_whitelist = sorted(verifier.props.owner_whitelist) if verifier.props.owner_whitelist else []
  1637          if whitelisted_group == None:
  1638              whitelisted_group = owner_whitelist
  1639          elif whitelisted_group != owner_whitelist:
  1640              error(
  1641                  "analyzer %s has different owner_whitelist from other analyzers in the config group %s" % (verifier, cq_group),
  1642                  trace = verifier.trace,
  1643              )
  1644  
  1645      if not ret.functions:
  1646          return None
  1647  
  1648      ret.functions = sorted(ret.functions, key = lambda f: f.name)
  1649      ret.selections = sorted(ret.selections, key = lambda f: f.function)
  1650      watching_gerrit_projects = watching_gerrit_projects or sorted(set([
  1651          (w.__gob_host, w.__gob_proj)
  1652          for w in cq_group.props.watch
  1653      ]))
  1654      ret.repos = [
  1655          tricium_pb.RepoDetails(
  1656              gerrit_project = tricium_pb.RepoDetails.GerritProject(
  1657                  host = "%s-review.googlesource.com" % host,
  1658                  project = proj,
  1659                  git_url = "https://%s.googlesource.com/%s" % (host, proj),
  1660              ),
  1661              whitelisted_group = whitelisted_group,
  1662              check_all_revision_kinds = disable_reuse,
  1663          )
  1664          for host, proj in watching_gerrit_projects
  1665      ]
  1666      return ret
  1667  
  1668  def _tricium_recipe(verifier, project):
  1669      """(luci.cq_tryjob_verifier, luci.project) => tricium_pb.Recipe."""
  1670      builder = _cq_builder_from_node(verifier)
  1671      return tricium_pb.Recipe(
  1672          project = builder.props.project or project.props.name,
  1673          bucket = builder.props.bucket,
  1674          builder = builder.props.name,
  1675      )
  1676  
  1677  def _compute_func_name(recipe):
  1678      """Returns an alphanumeric function name."""
  1679  
  1680      def normalize(s):
  1681          return "".join([ch for ch in s.title().elems() if ch.isalnum()])
  1682  
  1683      return "".join([
  1684          normalize(recipe.project),
  1685          normalize(recipe.bucket),
  1686          normalize(recipe.builder),
  1687      ])
  1688  
  1689  def _parse_location_filters_for_tricium(location_filters):
  1690      """Returns Gerrit projects and path filters based on location_filters.
  1691  
  1692      The parsed host, project, and extension patterns are used only for
  1693      generating watched repos and path filters in the Tricium config. Hosts,
  1694      projects or path filters that aren't in the expected format will be
  1695      skipped.
  1696  
  1697      Returns:
  1698          A pair: (list of (host, proj) pairs, list of extension patterns).
  1699          Either list may be empty.
  1700      """
  1701      host_and_projs = []
  1702      exts = []
  1703      for f in location_filters:
  1704          if f.gerrit_host_regexp.endswith("-review.googlesource.com") and f.gerrit_project_regexp:
  1705              host = f.gerrit_host_regexp[:-len("-review.googlesource.com")]
  1706              proj = f.gerrit_project_regexp
  1707              host_and_projs.append((host, proj))
  1708          if f.path_regexp and f.path_regexp.startswith(r".+\."):
  1709              exts.append(f.path_regexp[len(r".+\."):])
  1710      return sorted(set(host_and_projs)), sorted(set(exts))
  1711  
  1712  def _buildbucket_dynamic_builder_template(bucket):
  1713      """luci.bucket(...) node => buildbucket_pb.Bucket.DynamicBuilderTemplate or None."""
  1714      if not bucket.props.dynamic and len(graph.children(bucket.key, kinds.DYNAMIC_BUILDER_TEMPLATE)) == 0:
  1715          return None
  1716  
  1717      if not bucket.props.dynamic and len(graph.children(bucket.key, kinds.DYNAMIC_BUILDER_TEMPLATE)) > 0:
  1718          error("bucket \"%s\" must not have dynamic_builder_template" % bucket.props.name, trace = bucket.trace)
  1719  
  1720      if len(graph.children(bucket.key, kinds.DYNAMIC_BUILDER_TEMPLATE)) > 1:
  1721          error("dynamic bucket \"%s\" can have at most one dynamic_builder_template" % bucket.props.name, trace = bucket.trace)
  1722  
  1723      bldr_template = None
  1724      for node in graph.children(bucket.key, kinds.DYNAMIC_BUILDER_TEMPLATE):
  1725          bldr_template, _ = _buildbucket_builder(node, None)
  1726  
  1727      return buildbucket_pb.Bucket.DynamicBuilderTemplate(template = bldr_template)