sigs.k8s.io/cluster-api@v1.7.1/Tiltfile (about)

     1  # -*- mode: Python -*-
     2  
     3  envsubst_cmd = "./hack/tools/bin/envsubst"
     4  clusterctl_cmd = "./bin/clusterctl"
     5  kubectl_cmd = "kubectl"
     6  kubernetes_version = "v1.29.2"
     7  
     8  load("ext://uibutton", "cmd_button", "location", "text_input")
     9  
    10  # set defaults
    11  version_settings(True, ">=0.30.8")
    12  
    13  settings = {
    14      "enable_providers": ["docker"],
    15      "kind_cluster_name": os.getenv("CAPI_KIND_CLUSTER_NAME", "capi-test"),
    16      "debug": {},
    17      "build_engine": "docker",
    18  }
    19  
    20  # global settings
    21  tilt_file = "./tilt-settings.yaml" if os.path.exists("./tilt-settings.yaml") else "./tilt-settings.json"
    22  settings.update(read_yaml(
    23      tilt_file,
    24      default = {},
    25  ))
    26  
    27  os.putenv("CAPI_KIND_CLUSTER_NAME", settings.get("kind_cluster_name"))
    28  
    29  allow_k8s_contexts(settings.get("allowed_contexts"))
    30  
    31  if str(local("command -v " + kubectl_cmd + " || true", quiet = True)) == "":
    32      fail("Required command '" + kubectl_cmd + "' not found in PATH")
    33  
    34  # detect if docker images should be built using podman
    35  if "Podman Engine" in str(local("docker version || podman version", quiet = True)):
    36      settings["build_engine"] = "podman"
    37  
    38  os_name = str(local("go env GOOS")).rstrip("\n")
    39  os_arch = str(local("go env GOARCH")).rstrip("\n")
    40  
    41  if settings.get("trigger_mode") == "manual":
    42      trigger_mode(TRIGGER_MODE_MANUAL)
    43  
    44  usingLocalRegistry = str(local(kubectl_cmd + " get cm -n kube-public local-registry-hosting || true", quiet = True))
    45  if not usingLocalRegistry:
    46      if settings.get("default_registry", "") == "":
    47          fail("default_registry is required when not using a local registry, please add it to your tilt-settings.yaml/json")
    48  
    49      protectedRegistries = ["gcr.io/k8s-staging-cluster-api"]
    50      if settings.get("default_registry") in protectedRegistries:
    51          fail("current default_registry '{}' is protected, tilt cannot push images to it. Please select another default_registry in your tilt-settings.yaml/json".format(settings.get("default_registry")))
    52  
    53  if settings.get("default_registry", "") != "":
    54      default_registry(settings.get("default_registry"))
    55  
    56  always_enable_providers = ["core"]
    57  
    58  providers = {
    59      "core": {
    60          "context": ".",  # NOTE: this should be kept in sync with corresponding setting in tilt-prepare
    61          "image": "gcr.io/k8s-staging-cluster-api/cluster-api-controller",
    62          "live_reload_deps": [
    63              "main.go",
    64              "go.mod",
    65              "go.sum",
    66              "api",
    67              "cmd",
    68              "controllers",
    69              "errors",
    70              "exp",
    71              "feature",
    72              "internal",
    73              "util",
    74              "webhooks",
    75          ],
    76          "label": "CAPI",
    77      },
    78      "kubeadm-bootstrap": {
    79          "context": "bootstrap/kubeadm",  # NOTE: this should be kept in sync with corresponding setting in tilt-prepare
    80          "image": "gcr.io/k8s-staging-cluster-api/kubeadm-bootstrap-controller",
    81          "live_reload_deps": [
    82              "main.go",
    83              "api",
    84              "controllers",
    85              "internal",
    86              "types",
    87              "../../go.mod",
    88              "../../go.sum",
    89          ],
    90          "label": "CABPK",
    91      },
    92      "kubeadm-control-plane": {
    93          "context": "controlplane/kubeadm",  # NOTE: this should be kept in sync with corresponding setting in tilt-prepare
    94          "image": "gcr.io/k8s-staging-cluster-api/kubeadm-control-plane-controller",
    95          "live_reload_deps": [
    96              "main.go",
    97              "api",
    98              "controllers",
    99              "internal",
   100              "../../go.mod",
   101              "../../go.sum",
   102          ],
   103          "label": "KCP",
   104      },
   105      "docker": {
   106          "context": "test/infrastructure/docker",  # NOTE: this should be kept in sync with corresponding setting in tilt-prepare
   107          "image": "gcr.io/k8s-staging-cluster-api/capd-manager",
   108          "live_reload_deps": [
   109              "main.go",
   110              "../../go.mod",
   111              "../../go.sum",
   112              "../container",
   113              "api",
   114              "controllers",
   115              "docker",
   116              "exp",
   117              "internal",
   118          ],
   119          "label": "CAPD",
   120      },
   121      "in-memory": {
   122          "context": "test/infrastructure/inmemory",  # NOTE: this should be kept in sync with corresponding setting in tilt-prepare
   123          "image": "gcr.io/k8s-staging-cluster-api/capim-manager",
   124          "live_reload_deps": [
   125              "main.go",
   126              "../../go.mod",
   127              "../../go.sum",
   128              "api",
   129              "controllers",
   130              "internal",
   131          ],
   132          "label": "CAPIM",
   133      },
   134      "test-extension": {
   135          "context": "test/extension",  # NOTE: this should be kept in sync with corresponding setting in tilt-prepare
   136          "image": "gcr.io/k8s-staging-cluster-api/test-extension",
   137          "live_reload_deps": [
   138              "main.go",
   139              "handlers",
   140          ],
   141          "label": "test-extension",
   142          # Add the ExtensionConfig for this Runtime extension; given that the ExtensionConfig can be installed only when capi_controller
   143          # are up and running, it is required to set a resource_deps to ensure proper install order.
   144          "additional_resources": [
   145              "config/tilt/extensionconfig.yaml",
   146          ],
   147          "resource_deps": ["capi_controller"],
   148      },
   149  }
   150  
   151  # Reads a provider's tilt-provider.json file and merges it into the providers map.
   152  # A list of dictionaries is also supported by enclosing it in brackets []
   153  # An example file looks like this:
   154  # {
   155  #     "name": "aws",
   156  #     "config": {
   157  #         "image": "gcr.io/k8s-staging-cluster-api-aws/cluster-api-aws-controller",
   158  #         "live_reload_deps": [
   159  #             "main.go", "go.mod", "go.sum", "api", "cmd", "controllers", "pkg"
   160  #         ]
   161  #     }
   162  # }
   163  
   164  def load_provider_tiltfiles():
   165      provider_repos = settings.get("provider_repos", [])
   166  
   167      for repo in provider_repos:
   168          file = repo + "/tilt-provider.yaml" if os.path.exists(repo + "/tilt-provider.yaml") else repo + "/tilt-provider.json"
   169          if not os.path.exists(file):
   170              fail("Failed to load provider. No tilt-provider.{yaml|json} file found in " + repo)
   171          provider_details = read_yaml(file, default = {})
   172          if type(provider_details) != type([]):
   173              provider_details = [provider_details]
   174          for item in provider_details:
   175              provider_name = item["name"]
   176              provider_config = item["config"]
   177              if "context" in provider_config:
   178                  provider_config["context"] = repo + "/" + provider_config["context"]
   179              else:
   180                  provider_config["context"] = repo
   181              if "go_main" not in provider_config:
   182                  provider_config["go_main"] = "main.go"
   183              providers[provider_name] = provider_config
   184  
   185  tilt_helper_dockerfile_header = """
   186  # Tilt image
   187  FROM golang:1.21.9 as tilt-helper
   188  # Install delve. Note this should be kept in step with the Go release minor version.
   189  RUN go install github.com/go-delve/delve/cmd/dlv@v1.21
   190  # Support live reloading with Tilt
   191  RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/tilt-dev/rerun-process-wrapper/master/restart.sh  && \
   192      wget --output-document /start.sh --quiet https://raw.githubusercontent.com/tilt-dev/rerun-process-wrapper/master/start.sh && \
   193      chmod +x /start.sh && chmod +x /restart.sh && chmod +x /go/bin/dlv && \
   194      touch /process.txt && chmod 0777 /process.txt `# pre-create PID file to allow even non-root users to run the image`
   195  """
   196  
   197  tilt_dockerfile_header = """
   198  FROM golang:1.21.9 as tilt
   199  WORKDIR /
   200  COPY --from=tilt-helper /process.txt .
   201  COPY --from=tilt-helper /start.sh .
   202  COPY --from=tilt-helper /restart.sh .
   203  COPY --from=tilt-helper /go/bin/dlv .
   204  COPY $binary_name .
   205  """
   206  
   207  def build_go_binary(context, reload_deps, debug, go_main, binary_name, label):
   208      # Set up a local_resource build of a go binary. The target repo is expected to have a main.go in
   209      # the context path or the main.go must be provided via go_main option. The binary is written to .tiltbuild/bin/{$binary_name}.
   210      # TODO @randomvariable: Race detector mode only currently works on x86-64 Linux.
   211      # Need to switch to building inside Docker when architecture is mismatched
   212      race_detector_enabled = debug.get("race_detector", False)
   213      if race_detector_enabled:
   214          if os_name != "linux" or os_arch != "amd64":
   215              fail("race_detector is only supported on Linux x86-64")
   216          cgo_enabled = "1"
   217          build_options = "-race"
   218          ldflags = "-linkmode external -extldflags \"-static\""
   219      else:
   220          cgo_enabled = "0"
   221          build_options = ""
   222          ldflags = "-extldflags \"-static\""
   223  
   224      debug_port = int(debug.get("port", 0))
   225      if debug_port != 0:
   226          # disable optimisations and include line numbers when debugging
   227          gcflags = "all=-N -l"
   228      else:
   229          gcflags = ""
   230  
   231      build_env = "CGO_ENABLED={cgo_enabled} GOOS=linux GOARCH={arch}".format(
   232          cgo_enabled = cgo_enabled,
   233          arch = os_arch,
   234      )
   235  
   236      build_cmd = "{build_env} go build {build_options} -gcflags '{gcflags}' -ldflags '{ldflags}' -o .tiltbuild/bin/{binary_name} {go_main}".format(
   237          build_env = build_env,
   238          build_options = build_options,
   239          gcflags = gcflags,
   240          go_main = go_main,
   241          ldflags = ldflags,
   242          binary_name = binary_name,
   243      )
   244  
   245      # Prefix each live reload dependency with context. For example, for if the context is
   246      # test/infra/docker and main.go is listed as a dep, the result is test/infra/docker/main.go. This adjustment is
   247      # needed so Tilt can watch the correct paths for changes.
   248      live_reload_deps = []
   249      for d in reload_deps:
   250          live_reload_deps.append(context + "/" + d)
   251  
   252      # Ensure the {context}/.tiltbuild/bin directory before any other resources
   253      # `local` is evaluated immediately, other resources are executed later in the startup/when triggered
   254      local("mkdir -p {context}/.tiltbuild/bin".format(context = shlex.quote(context)), quiet = True)
   255  
   256      # Build the go binary
   257      local_resource(
   258          label.lower() + "_binary",
   259          cmd = "cd {context};{build_cmd}".format(
   260              context = context,
   261              build_cmd = build_cmd,
   262          ),
   263          deps = live_reload_deps,
   264          labels = [label, "ALL.binaries"],
   265      )
   266  
   267  def build_docker_image(image, context, binary_name, additional_docker_build_commands, additional_docker_helper_commands, port_forwards):
   268      links = []
   269  
   270      dockerfile_contents = "\n".join([
   271          tilt_helper_dockerfile_header,
   272          additional_docker_helper_commands,
   273          tilt_dockerfile_header,
   274          additional_docker_build_commands,
   275      ])
   276  
   277      # Set up an image build for the provider. The live update configuration syncs the output from the local_resource
   278      # build into the container.
   279      if settings.get("build_engine") == "podman":
   280          bin_context = context + "/.tiltbuild/bin/"
   281  
   282          # Write dockerfile_contents to a Dockerfile as custom_build doesn't support dockerfile_contents nor stdin.
   283          # The Dockerfile is in the context path to simplify the below podman command.
   284          local("tee %s/Dockerfile" % (shlex.quote(bin_context)), quiet = True, stdin = dockerfile_contents)
   285  
   286          custom_build(
   287              ref = image,
   288              command = (
   289                  "set -ex\n" +
   290                  "podman build -t $EXPECTED_REF --build-arg binary_name=%s --target tilt %s\n" +
   291                  "podman push --format=docker $EXPECTED_REF\n"
   292              ) % (binary_name, shlex.quote(bin_context)),
   293              deps = [bin_context],
   294              skips_local_docker = True,
   295              live_update = [
   296                  sync(bin_context + binary_name, "/" + binary_name),
   297                  run("sh /restart.sh"),
   298              ],
   299          )
   300      else:
   301          docker_build(
   302              ref = image,
   303              context = context + "/.tiltbuild/bin/",
   304              dockerfile_contents = dockerfile_contents,
   305              build_args = {"binary_name": binary_name},
   306              target = "tilt",
   307              only = binary_name,
   308              live_update = [
   309                  sync(context + "/.tiltbuild/bin/" + binary_name, "/" + binary_name),
   310                  run("sh /restart.sh"),
   311              ],
   312          )
   313  
   314  def get_port_forwards(debug):
   315      port_forwards = []
   316      links = []
   317  
   318      debug_port = int(debug.get("port", 0))
   319      if debug_port != 0:
   320          port_forwards.append(port_forward(debug_port, 30000))
   321  
   322      metrics_port = int(debug.get("metrics_port", 0))
   323      profiler_port = int(debug.get("profiler_port", 0))
   324      if metrics_port != 0:
   325          port_forwards.append(port_forward(metrics_port, 8080))
   326          links.append(link("http://localhost:" + str(metrics_port) + "/metrics", "metrics"))
   327  
   328      if profiler_port != 0:
   329          port_forwards.append(port_forward(profiler_port, 6060))
   330          links.append(link("http://localhost:" + str(profiler_port) + "/debug/pprof", "profiler"))
   331  
   332      return port_forwards, links
   333  
   334  # Configures a provider by doing the following:
   335  #
   336  # 1. Enables a local_resource go build of the provider's manager binary
   337  # 2. Configures a docker build for the provider, with live updating of the manager binary
   338  # 3. Runs kustomize for the provider's config/default and applies it
   339  def enable_provider(name, debug):
   340      p = providers.get(name)
   341      label = p.get("label")
   342  
   343      port_forwards, links = get_port_forwards(debug)
   344  
   345      if p.get("image"):
   346          build_go_binary(
   347              context = p.get("context"),
   348              reload_deps = p.get("live_reload_deps"),
   349              debug = debug,
   350              go_main = p.get("go_main", "main.go"),
   351              binary_name = "manager",
   352              label = label,
   353          )
   354  
   355          build_docker_image(
   356              image = p.get("image"),
   357              context = p.get("context"),
   358              binary_name = "manager",
   359              additional_docker_helper_commands = p.get("additional_docker_helper_commands", ""),
   360              additional_docker_build_commands = p.get("additional_docker_build_commands", ""),
   361              port_forwards = port_forwards,
   362          )
   363  
   364      additional_objs = []
   365      p_resources = p.get("additional_resources", [])
   366      for resource in p_resources:
   367          k8s_yaml(p.get("context") + "/" + resource)
   368          additional_objs = additional_objs + decode_yaml_stream(read_file(p.get("context") + "/" + resource))
   369  
   370      if p.get("apply_provider_yaml", True):
   371          yaml = read_file("./.tiltbuild/yaml/{}.provider.yaml".format(name))
   372          k8s_yaml(yaml, allow_duplicates = True)
   373          objs = decode_yaml_stream(yaml)
   374          k8s_resource(
   375              workload = find_object_name(objs, "Deployment"),
   376              objects = [find_object_qualified_name(objs, "Provider")] + find_all_objects_names(additional_objs),
   377              new_name = label.lower() + "_controller",
   378              labels = [label, "ALL.controllers"],
   379              port_forwards = port_forwards,
   380              links = links,
   381              resource_deps = ["provider_crd"] + p.get("resource_deps", []),
   382          )
   383  
   384  def find_object_name(objs, kind):
   385      for o in objs:
   386          # Ignore objects that are not part of the provider, e.g. the ASO Deployment in CAPZ.
   387          if o["kind"] == kind and "cluster.x-k8s.io/provider" in o["metadata"]["labels"]:
   388              return o["metadata"]["name"]
   389      return ""
   390  
   391  def find_object_qualified_name(objs, kind):
   392      for o in objs:
   393          if o["kind"] == kind:
   394              return "{}:{}:{}".format(o["metadata"]["name"], kind, o["metadata"]["namespace"])
   395      return ""
   396  
   397  def find_all_objects_names(objs):
   398      qualified_names = []
   399      for o in objs:
   400          if "namespace" in o["metadata"] and o["metadata"]["namespace"] != "":
   401              qualified_names = qualified_names + ["{}:{}:{}".format(o["metadata"]["name"], o["kind"], o["metadata"]["namespace"])]
   402          else:
   403              qualified_names = qualified_names + ["{}:{}".format(o["metadata"]["name"], o["kind"])]
   404      return qualified_names
   405  
   406  # Users may define their own Tilt customizations in tilt.d. This directory is excluded from git and these files will
   407  # not be checked in to version control.
   408  def include_user_tilt_files():
   409      user_tiltfiles = listdir("tilt.d")
   410      for f in user_tiltfiles:
   411          include(f)
   412  
   413  # Enable core cluster-api plus everything listed in 'enable_providers' in tilt-settings.json
   414  def enable_providers():
   415      for name in get_providers():
   416          enable_provider(name, settings.get("debug").get(name, {}))
   417  
   418  def get_providers():
   419      user_enable_providers = settings.get("enable_providers", [])
   420      return {k: "" for k in user_enable_providers + always_enable_providers}.keys()
   421  
   422  def deploy_provider_crds():
   423      # NOTE: we are applying raw yaml for clusterctl resources (vs delegating this to clusterctl methods) because
   424      # it is required to control precedence between creating this CRDs and creating providers.
   425      k8s_yaml(read_file("./.tiltbuild/yaml/clusterctl.crd.yaml"))
   426      k8s_resource(
   427          objects = ["providers.clusterctl.cluster.x-k8s.io:CustomResourceDefinition:default"],
   428          new_name = "provider_crd",
   429      )
   430  
   431  def deploy_observability():
   432      if "promtail" in settings.get("deploy_observability", []):
   433          k8s_yaml(read_file("./.tiltbuild/yaml/promtail.observability.yaml"), allow_duplicates = True)
   434          k8s_resource(workload = "promtail", extra_pod_selectors = [{"app": "promtail"}], labels = ["observability"], resource_deps = ["loki"], objects = ["promtail:serviceaccount"])
   435  
   436      if "loki" in settings.get("deploy_observability", []):
   437          k8s_yaml(read_file("./.tiltbuild/yaml/loki.observability.yaml"), allow_duplicates = True)
   438          k8s_resource(workload = "loki", port_forwards = "3100", extra_pod_selectors = [{"app": "loki"}], labels = ["observability"], objects = ["loki:serviceaccount"])
   439  
   440          cmd_button(
   441              "loki:import logs",
   442              argv = ["sh", "-c", "cd ./hack/tools/internal/log-push && go run ./main.go --log-path=$LOG_PATH"],
   443              resource = "loki",
   444              icon_name = "import_export",
   445              text = "Import logs",
   446              inputs = [
   447                  text_input("LOG_PATH", label = "Log path, one of: GCS path, ProwJob URL or local folder"),
   448              ],
   449          )
   450  
   451      if "tempo" in settings.get("deploy_observability", []):
   452          k8s_yaml(read_file("./.tiltbuild/yaml/tempo.observability.yaml"), allow_duplicates = True)
   453  
   454          # Port-forward the tracing port to localhost, so we can also send traces from local.
   455          k8s_resource(workload = "tempo", port_forwards = "4317:4317", extra_pod_selectors = [{"app": "tempo"}], labels = ["observability"])
   456  
   457      if "grafana" in settings.get("deploy_observability", []):
   458          k8s_yaml(read_file("./.tiltbuild/yaml/grafana.observability.yaml"), allow_duplicates = True)
   459          k8s_resource(workload = "grafana", port_forwards = "3001:3000", extra_pod_selectors = [{"app": "grafana"}], labels = ["observability"], objects = ["grafana:serviceaccount"])
   460  
   461      if "prometheus" in settings.get("deploy_observability", []):
   462          k8s_yaml(read_file("./.tiltbuild/yaml/prometheus.observability.yaml"), allow_duplicates = True)
   463          k8s_resource(workload = "prometheus-server", new_name = "prometheus", port_forwards = "9090", extra_pod_selectors = [{"app": "prometheus"}], labels = ["observability"], objects = ["prometheus-server:serviceaccount"])
   464  
   465      if "kube-state-metrics" in settings.get("deploy_observability", []):
   466          k8s_yaml(read_file("./.tiltbuild/yaml/kube-state-metrics.observability.yaml"), allow_duplicates = True)
   467          k8s_resource(workload = "kube-state-metrics", new_name = "kube-state-metrics", extra_pod_selectors = [{"app": "kube-state-metrics"}], labels = ["observability"], objects = ["kube-state-metrics:serviceaccount"])
   468  
   469      if "parca" in settings.get("deploy_observability", []):
   470          k8s_yaml(read_file("./.tiltbuild/yaml/parca.observability.yaml"), allow_duplicates = True)
   471          k8s_resource(workload = "parca", new_name = "parca", port_forwards = "7070", extra_pod_selectors = [{"app": "parca"}], labels = ["observability"], objects = ["parca:serviceaccount"])
   472  
   473      if "metrics-server" in settings.get("deploy_observability", []):
   474          k8s_yaml(read_file("./.tiltbuild/yaml/metrics-server.observability.yaml"), allow_duplicates = True)
   475          k8s_resource(workload = "metrics-server", new_name = "metrics-server", extra_pod_selectors = [{"app": "metrics-server"}], labels = ["observability"], objects = ["metrics-server:serviceaccount"])
   476  
   477      if "visualizer" in settings.get("deploy_observability", []):
   478          k8s_yaml(read_file("./.tiltbuild/yaml/visualizer.observability.yaml"), allow_duplicates = True)
   479          k8s_resource(
   480              workload = "capi-visualizer",
   481              new_name = "visualizer",
   482              port_forwards = [port_forward(local_port = 8000, container_port = 8081, name = "View visualization")],
   483              labels = ["observability"],
   484              objects = ["capi-visualizer:serviceaccount"],
   485          )
   486  
   487  def deploy_additional_kustomizations():
   488      for name in settings.get("additional_kustomizations", []):
   489          yaml = read_file("./.tiltbuild/yaml/{}.kustomization.yaml".format(name))
   490          k8s_yaml(yaml)
   491          objs = decode_yaml_stream(yaml)
   492          print("objects")
   493          print(find_all_objects_names(objs))
   494          k8s_resource(
   495              new_name = name,
   496              objects = find_all_objects_names(objs),
   497              labels = ["kustomization"],
   498          )
   499  
   500  def prepare_all():
   501      tools_arg = "--tools kustomize,envsubst,clusterctl "
   502      tilt_settings_file_arg = "--tilt-settings-file " + tilt_file
   503  
   504      cmd = "make -B tilt-prepare && ./hack/tools/bin/tilt-prepare {tools_arg}{tilt_settings_file_arg}".format(
   505          tools_arg = tools_arg,
   506          tilt_settings_file_arg = tilt_settings_file_arg,
   507      )
   508      local(cmd, env = settings.get("kustomize_substitutions", {}))
   509  
   510  # create cluster template resources from cluster-template files in the templates directory
   511  def cluster_templates():
   512      substitutions = settings.get("kustomize_substitutions", {})
   513  
   514      # Ensure we have default values for a small set of well-known variables
   515      substitutions["NAMESPACE"] = substitutions.get("NAMESPACE", "default")
   516      substitutions["KUBERNETES_VERSION"] = substitutions.get("KUBERNETES_VERSION", kubernetes_version)
   517      substitutions["CONTROL_PLANE_MACHINE_COUNT"] = substitutions.get("CONTROL_PLANE_MACHINE_COUNT", "1")
   518      substitutions["WORKER_MACHINE_COUNT"] = substitutions.get("WORKER_MACHINE_COUNT", "1")
   519  
   520      template_dirs = settings.get("template_dirs", {
   521          "docker": ["./test/infrastructure/docker/templates"],
   522          "in-memory": ["./test/infrastructure/inmemory/templates"],
   523      })
   524  
   525      for provider, provider_dirs in template_dirs.items():
   526          if provider not in get_providers():
   527              continue
   528  
   529          p = providers.get(provider)
   530          label = p.get("label", provider)
   531  
   532          for template_dir in provider_dirs:
   533              template_list = [filename for filename in listdir(template_dir) if os.path.basename(filename).endswith("yaml")]
   534              for filename in template_list:
   535                  deploy_templates(filename, label, substitutions)
   536  
   537  def deploy_templates(filename, label, substitutions):
   538      # validate filename exists
   539      if not os.path.exists(filename):
   540          fail(filename + " not found")
   541  
   542      basename = os.path.basename(filename)
   543      if basename.endswith(".yaml"):
   544          if basename.startswith("clusterclass-"):
   545              clusterclass_name = basename.replace("clusterclass-", "").replace(".yaml", "")
   546              deploy_clusterclass(clusterclass_name, label, filename, substitutions)
   547          elif basename.startswith("cluster-template-"):
   548              template_name = basename.replace("cluster-template-", "").replace(".yaml", "")
   549              deploy_cluster_template(template_name, label, filename, substitutions)
   550          elif basename == "cluster-template.yaml":
   551              template_name = "default"
   552              deploy_cluster_template(template_name, label, filename, substitutions)
   553  
   554  def deploy_clusterclass(clusterclass_name, label, filename, substitutions):
   555      apply_clusterclass_cmd = "cat " + filename + " | " + envsubst_cmd + " | " + kubectl_cmd + " apply --namespace=$NAMESPACE -f - && echo \"ClusterClass created from\'" + filename + "\', don't forget to delete\n\""
   556      delete_clusterclass_cmd = kubectl_cmd + " --namespace=$NAMESPACE delete clusterclass " + clusterclass_name + ' --ignore-not-found=true; echo "\n"'
   557  
   558      local_resource(
   559          name = clusterclass_name,
   560          cmd = ["bash", "-c", apply_clusterclass_cmd],
   561          env = substitutions,
   562          auto_init = False,
   563          trigger_mode = TRIGGER_MODE_MANUAL,
   564          labels = [label + ".clusterclasses"],
   565      )
   566  
   567      cmd_button(
   568          clusterclass_name + ":apply",
   569          argv = ["bash", "-c", apply_clusterclass_cmd],
   570          env = dictonary_to_list_of_string(substitutions),
   571          resource = clusterclass_name,
   572          icon_name = "note_add",
   573          text = "Apply `" + clusterclass_name + "` ClusterClass",
   574          inputs = [
   575              text_input("NAMESPACE", default = substitutions.get("NAMESPACE")),
   576          ],
   577      )
   578  
   579      cmd_button(
   580          clusterclass_name + ":delete",
   581          argv = ["bash", "-c", delete_clusterclass_cmd],
   582          env = dictonary_to_list_of_string(substitutions),
   583          resource = clusterclass_name,
   584          icon_name = "delete_forever",
   585          text = "Delete `" + clusterclass_name + "` ClusterClass",
   586          inputs = [
   587              text_input("NAMESPACE", default = substitutions.get("NAMESPACE")),
   588          ],
   589      )
   590  
   591  def deploy_cluster_template(template_name, label, filename, substitutions):
   592      apply_cluster_template_cmd = "CLUSTER_NAME=" + template_name + "-$RANDOM;" + clusterctl_cmd + " generate cluster -n $NAMESPACE $CLUSTER_NAME --from " + filename + " | " + kubectl_cmd + " apply -f - && echo \"Cluster '$CLUSTER_NAME' created, don't forget to delete\n\""
   593      delete_clusters_cmd = 'DELETED=$(echo "$(bash -c "' + kubectl_cmd + ' --namespace=$NAMESPACE get clusters -A --no-headers -o custom-columns=":metadata.name"")" | grep -E "^' + template_name + '-[[:digit:]]{1,5}$"); if [ -z "$DELETED" ]; then echo "Nothing to delete for cluster template ' + template_name + '"; else echo "Deleting clusters:\n$DELETED\n"; echo $DELETED | xargs -L1 ' + kubectl_cmd + ' delete cluster; fi; echo "\n"'
   594  
   595      local_resource(
   596          name = template_name,
   597          cmd = ["bash", "-c", apply_cluster_template_cmd],
   598          env = substitutions,
   599          auto_init = False,
   600          trigger_mode = TRIGGER_MODE_MANUAL,
   601          labels = [label + ".templates"],
   602      )
   603  
   604      cmd_button(
   605          template_name + ":apply",
   606          argv = ["bash", "-c", apply_cluster_template_cmd],
   607          env = dictonary_to_list_of_string(substitutions),
   608          resource = template_name,
   609          icon_name = "add_box",
   610          text = "Create `" + template_name + "` cluster",
   611          inputs = [
   612              text_input("NAMESPACE", default = substitutions.get("NAMESPACE")),
   613              text_input("KUBERNETES_VERSION", default = substitutions.get("KUBERNETES_VERSION")),
   614              text_input("CONTROL_PLANE_MACHINE_COUNT", default = substitutions.get("CONTROL_PLANE_MACHINE_COUNT")),
   615              text_input("WORKER_MACHINE_COUNT", default = substitutions.get("WORKER_MACHINE_COUNT")),
   616          ],
   617      )
   618  
   619      cmd_button(
   620          template_name + ":delete",
   621          argv = ["bash", "-c", delete_clusters_cmd],
   622          env = dictonary_to_list_of_string(substitutions),
   623          resource = template_name,
   624          icon_name = "delete_forever",
   625          text = "Delete `" + template_name + "` clusters",
   626          inputs = [
   627              text_input("NAMESPACE", default = substitutions.get("NAMESPACE")),
   628          ],
   629      )
   630  
   631      cmd_button(
   632          template_name + ":delete-all",
   633          argv = ["bash", "-c", kubectl_cmd + " delete clusters --all --wait=false"],
   634          env = dictonary_to_list_of_string(substitutions),
   635          resource = template_name,
   636          icon_name = "delete_sweep",
   637          text = "Delete all workload clusters",
   638      )
   639  
   640  # A function to convert dictonary to list of strings in a format of "name=value"
   641  def dictonary_to_list_of_string(substitutions):
   642      substitutions_list = []
   643      for name, value in substitutions.items():
   644          substitutions_list.append(name + "=" + value)
   645      return substitutions_list
   646  
   647  ##############################
   648  # Actual work happens here
   649  ##############################
   650  
   651  include_user_tilt_files()
   652  
   653  load_provider_tiltfiles()
   654  
   655  prepare_all()
   656  
   657  deploy_provider_crds()
   658  
   659  deploy_observability()
   660  
   661  deploy_additional_kustomizations()
   662  
   663  enable_providers()
   664  
   665  cluster_templates()