sigs.k8s.io/cluster-api-provider-azure@v1.14.3/Tiltfile (about)

     1  # -*- mode: Python -*-
     2  
     3  # Pre-requisite make targets "install-tools" and "kind-create" ensure that the below tools are already installed.
     4  envsubst_cmd = "./hack/tools/bin/envsubst"
     5  kubectl_cmd = "./hack/tools/bin/kubectl"
     6  helm_cmd = "./hack/tools/bin/helm"
     7  kind_cmd = "./hack/tools/bin/kind"
     8  tools_bin = "./hack/tools/bin"
     9  
    10  #Add tools to path
    11  os.putenv("PATH", os.getenv("PATH") + ":" + tools_bin)
    12  
    13  update_settings(k8s_upsert_timeout_secs = 60)  # on first tilt up, often can take longer than 30 seconds
    14  
    15  # Default settings for tilt
    16  settings = {
    17      "allowed_contexts": [
    18          "kind-capz",
    19      ],
    20      "deploy_cert_manager": True,
    21      "preload_images_for_kind": True,
    22      "kind_cluster_name": "capz",
    23      "capi_version": "v1.6.4",
    24      "cert_manager_version": "v1.14.4",
    25      "kubernetes_version": "v1.28.3",
    26      "aks_kubernetes_version": "v1.28.3",
    27      "flatcar_version": "3374.2.1",
    28      "azure_location": "eastus",
    29      "control_plane_machine_count": "1",
    30      "az_control_plane_machine_type": "Standard_B2s",
    31      "worker_machine_count": "2",
    32      "az_node_machine_type": "Standard_B2s",
    33      "cluster_class_name": "default",
    34  }
    35  
    36  # Auth keys that need to be loaded from the environment
    37  keys = ["AZURE_SUBSCRIPTION_ID", "AZURE_TENANT_ID", "AZURE_CLIENT_SECRET", "AZURE_CLIENT_ID"]
    38  
    39  # Get global settings from tilt-settings.yaml or tilt-settings.json
    40  tilt_file = "./tilt-settings.yaml" if os.path.exists("./tilt-settings.yaml") else "./tilt-settings.json"
    41  settings.update(read_yaml(tilt_file, default = {}))
    42  
    43  if settings.get("trigger_mode") == "manual":
    44      trigger_mode(TRIGGER_MODE_MANUAL)
    45  
    46  if "allowed_contexts" in settings:
    47      allow_k8s_contexts(settings.get("allowed_contexts"))
    48  
    49  if "default_registry" in settings:
    50      default_registry(settings.get("default_registry"))
    51  
    52  os_arch = str(local("go env GOARCH")).rstrip("\n")
    53  
    54  # deploy CAPI
    55  def deploy_capi():
    56      version = settings.get("capi_version")
    57      capi_uri = "https://github.com/kubernetes-sigs/cluster-api/releases/download/{}/cluster-api-components.yaml".format(version)
    58      cmd = "curl --retry 3 -sSL {} | {} | {} apply -f -".format(capi_uri, envsubst_cmd, kubectl_cmd)
    59      local(cmd, quiet = True)
    60      if settings.get("extra_args"):
    61          extra_args = settings.get("extra_args")
    62          if extra_args.get("core"):
    63              core_extra_args = extra_args.get("core")
    64              if core_extra_args:
    65                  for namespace in ["capi-system", "capi-webhook-system"]:
    66                      patch_args_with_extra_args(namespace, "capi-controller-manager", core_extra_args)
    67          if extra_args.get("kubeadm-bootstrap"):
    68              kb_extra_args = extra_args.get("kubeadm-bootstrap")
    69              if kb_extra_args:
    70                  patch_args_with_extra_args("capi-kubeadm-bootstrap-system", "capi-kubeadm-bootstrap-controller-manager", kb_extra_args)
    71  
    72  def patch_args_with_extra_args(namespace, name, extra_args):
    73      args_str = str(local("{} get deployments {} -n {} -o jsonpath={{.spec.template.spec.containers[1].args}}".format(kubectl_cmd, name, namespace)))
    74      args_to_add = [arg for arg in extra_args if arg not in args_str]
    75      if args_to_add:
    76          args = args_str[1:-1].split()
    77          args.extend(args_to_add)
    78          patch = [{
    79              "op": "replace",
    80              "path": "/spec/template/spec/containers/1/args",
    81              "value": args,
    82          }]
    83          local("{} patch deployment {} -n {} --type json -p='{}'".format(kubectl_cmd, name, namespace, str(encode_json(patch)).replace("\n", "")))
    84  
    85  # Users may define their own Tilt customizations in tilt.d. This directory is excluded from git and these files will
    86  # not be checked in to version control.
    87  def include_user_tilt_files():
    88      user_tiltfiles = listdir("tilt.d")
    89      for f in user_tiltfiles:
    90          include(f)
    91  
    92  def append_arg_for_container_in_deployment(yaml_stream, name, namespace, contains_image_name, args):
    93      for item in yaml_stream:
    94          if item["kind"] == "Deployment" and item.get("metadata").get("name") == name and item.get("metadata").get("namespace") == namespace:
    95              containers = item.get("spec").get("template").get("spec").get("containers")
    96              for container in containers:
    97                  if contains_image_name in container.get("image"):
    98                      container.get("args").extend(args)
    99  
   100  def fixup_yaml_empty_arrays(yaml_str):
   101      yaml_str = yaml_str.replace("conditions: null", "conditions: []")
   102      return yaml_str.replace("storedVersions: null", "storedVersions: []")
   103  
   104  def validate_auth():
   105      substitutions = settings.get("kustomize_substitutions", {})
   106      os.environ.update(substitutions)
   107      for sub in substitutions:
   108          if sub[-4:] == "_B64":
   109              os.environ[sub[:-4]] = base64_decode(os.environ[sub])
   110      missing = [k for k in keys if not os.environ.get(k)]
   111      if missing:
   112          fail("missing kustomize_substitutions keys {} in tilt-setting.json".format(missing))
   113  
   114  tilt_helper_dockerfile_header = """
   115  # Tilt image
   116  FROM golang:1.20 as tilt-helper
   117  # Support live reloading with Tilt
   118  RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh  && \
   119      wget --output-document /start.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/start.sh && \
   120      chmod +x /start.sh && chmod +x /restart.sh && \
   121      touch /process.txt && chmod 0777 /process.txt `# pre-create PID file to allow even non-root users to run the image`
   122  """
   123  
   124  tilt_dockerfile_header = """
   125  FROM gcr.io/distroless/base:debug as tilt
   126  WORKDIR /tilt
   127  RUN ["/busybox/chmod", "0777", "."]
   128  COPY --from=tilt-helper /process.txt .
   129  COPY --from=tilt-helper /start.sh .
   130  COPY --from=tilt-helper /restart.sh .
   131  COPY manager .
   132  """
   133  
   134  # Install the OpenTelemetry helm chart
   135  def observability():
   136      instrumentation_key = os.getenv("AZURE_INSTRUMENTATION_KEY", "")
   137      if instrumentation_key == "":
   138          warn("AZURE_INSTRUMENTATION_KEY is not set, so traces won't be exported to Application Insights")
   139          trace_links = []
   140      else:
   141          trace_links = [link("https://ms.portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/microsoft.insights%2Fcomponents", "App Insights")]
   142      k8s_yaml(helm(
   143          "./hack/observability/opentelemetry/chart",
   144          name = "opentelemetry-collector",
   145          namespace = "capz-system",
   146          values = ["./hack/observability/opentelemetry/values.yaml"],
   147          set = ["config.exporters.azuremonitor.instrumentation_key=" + instrumentation_key],
   148      ))
   149      k8s_yaml(helm(
   150          "./hack/observability/jaeger/chart",
   151          name = "jaeger-all-in-one",
   152          namespace = "capz-system",
   153          set = [
   154              "crd.install=false",
   155              "rbac.create=false",
   156              "resources.limits.cpu=200m",
   157              "resources.limits.memory=256Mi",
   158          ],
   159      ))
   160  
   161      k8s_yaml(helm(
   162          "./hack/observability/cluster-api-visualizer/chart",
   163          name = "visualizer",
   164          namespace = "capz-system",
   165      ))
   166  
   167      k8s_resource(
   168          workload = "jaeger-all-in-one",
   169          new_name = "traces: jaeger-all-in-one",
   170          port_forwards = [port_forward(16686, name = "View traces", link_path = "/search?service=capz")],
   171          links = trace_links,
   172          labels = ["observability"],
   173      )
   174      k8s_resource(
   175          workload = "prometheus-operator",
   176          new_name = "metrics: prometheus-operator",
   177          port_forwards = [port_forward(local_port = 9090, container_port = 9090, name = "View metrics")],
   178          extra_pod_selectors = [{"app": "prometheus"}],
   179          labels = ["observability"],
   180      )
   181      k8s_resource(workload = "opentelemetry-collector", labels = ["observability"])
   182      k8s_resource(workload = "opentelemetry-collector-agent", labels = ["observability"])
   183      k8s_resource(
   184          workload = "capi-visualizer",
   185          new_name = "visualizer",
   186          port_forwards = [port_forward(local_port = 8000, container_port = 8081, name = "View visualization")],
   187          labels = ["observability"],
   188      )
   189  
   190      k8s_resource(workload = "capz-controller-manager", labels = ["cluster-api"])
   191      k8s_resource(workload = "azureserviceoperator-controller-manager", labels = ["cluster-api"])
   192  
   193  # Build CAPZ and add feature gates
   194  def capz():
   195      # Apply the kustomized yaml for this provider
   196      yaml = str(kustomizesub("./hack/observability"))  # build an observable kind deployment by default
   197  
   198      # add extra_args if they are defined
   199      if settings.get("extra_args"):
   200          azure_extra_args = settings.get("extra_args").get("azure")
   201          if azure_extra_args:
   202              yaml_dict = decode_yaml_stream(yaml)
   203              append_arg_for_container_in_deployment(yaml_dict, "capz-controller-manager", "capz-system", "cluster-api-azure-controller", azure_extra_args)
   204              yaml = str(encode_yaml_stream(yaml_dict))
   205              yaml = fixup_yaml_empty_arrays(yaml)
   206  
   207      # Forge the build command
   208      ldflags = "-extldflags \"-static\" " + str(local("hack/version.sh")).rstrip("\n")
   209      build_env = "CGO_ENABLED=0 GOOS=linux GOARCH={arch}".format(arch = os_arch)
   210      build_cmd = "{build_env} go build -ldflags '{ldflags}' -o .tiltbuild/manager".format(
   211          build_env = build_env,
   212          ldflags = ldflags,
   213      )
   214  
   215      # Set up a local_resource build of the provider's manager binary.
   216      local_resource(
   217          "manager",
   218          cmd = "mkdir -p .tiltbuild; " + build_cmd,
   219          deps = ["api", "azure", "config", "controllers", "exp", "feature", "pkg", "util", "go.mod", "go.sum", "main.go"],
   220          labels = ["cluster-api"],
   221      )
   222  
   223      dockerfile_contents = "\n".join([
   224          tilt_helper_dockerfile_header,
   225          tilt_dockerfile_header,
   226      ])
   227  
   228      entrypoint = ["sh", "/tilt/start.sh", "/tilt/manager"]
   229      extra_args = settings.get("extra_args")
   230      if extra_args:
   231          entrypoint.extend(extra_args)
   232  
   233      # Set up an image build for the provider. The live update configuration syncs the output from the local_resource
   234      # build into the container.
   235      docker_build(
   236          ref = "gcr.io/k8s-staging-cluster-api-azure/cluster-api-azure-controller",
   237          context = "./.tiltbuild/",
   238          dockerfile_contents = dockerfile_contents,
   239          target = "tilt",
   240          entrypoint = entrypoint,
   241          only = "manager",
   242          live_update = [
   243              sync(".tiltbuild/manager", "/tilt/manager"),
   244              run("sh /tilt/restart.sh"),
   245          ],
   246          ignore = ["templates"],
   247      )
   248  
   249      k8s_yaml(blob(yaml))
   250  
   251  def create_identity_secret():
   252      #create secret for identity password
   253      local(kubectl_cmd + " delete secret cluster-identity-secret --ignore-not-found=true")
   254  
   255      os.putenv("AZURE_CLUSTER_IDENTITY_SECRET_NAME", "cluster-identity-secret")
   256      os.putenv("AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE", "default")
   257      os.putenv("CLUSTER_IDENTITY_NAME", "cluster-identity")
   258  
   259      os.putenv("AZURE_CLIENT_SECRET_B64", base64_encode(os.environ.get("AZURE_CLIENT_SECRET")))
   260      local("cat templates/azure-cluster-identity/secret.yaml | " + envsubst_cmd + " | " + kubectl_cmd + " apply -f -", quiet = True, echo_off = True)
   261      os.unsetenv("AZURE_CLIENT_SECRET_B64")
   262  
   263  def create_crs():
   264      # create config maps
   265      local(kubectl_cmd + " delete configmaps csi-proxy-addon --ignore-not-found=true")
   266      local(kubectl_cmd + " create configmap csi-proxy-addon --from-file=templates/addons/windows/csi-proxy/csi-proxy.yaml")
   267  
   268      # need to set version for kube-proxy on windows.
   269      os.putenv("KUBERNETES_VERSION", settings.get("kubernetes_version", {}))
   270      local(kubectl_cmd + " create configmap calico-windows-addon --from-file=templates/addons/windows/calico/ --dry-run=client -o yaml | " + envsubst_cmd + " | " + kubectl_cmd + " apply -f -")
   271  
   272      # set up crs
   273      local(kubectl_cmd + " apply -f templates/addons/windows/calico-resource-set.yaml")
   274      local(kubectl_cmd + " apply -f templates/addons/windows/csi-proxy/csi-proxy-resource-set.yaml")
   275  
   276  # create flavor resources from cluster-template files in the templates directory
   277  def flavors():
   278      substitutions = settings.get("kustomize_substitutions", {})
   279  
   280      az_key_b64_name = "AZURE_SSH_PUBLIC_KEY_B64"
   281      az_key_name = "AZURE_SSH_PUBLIC_KEY"
   282      default_key_path = "$HOME/.ssh/id_rsa.pub"
   283  
   284      if substitutions.get(az_key_b64_name):
   285          os.environ.update({az_key_b64_name: substitutions.get(az_key_b64_name)})
   286          os.environ.update({az_key_name: base64_decode(substitutions.get(az_key_b64_name))})
   287      else:
   288          print("{} was not specified in tilt-settings.json, attempting to load {}".format(az_key_b64_name, default_key_path))
   289          os.environ.update({az_key_b64_name: base64_encode_file(default_key_path)})
   290          os.environ.update({az_key_name: read_file_from_path(default_key_path)})
   291  
   292      template_list = [item for item in listdir("./templates")]
   293      template_list = [template for template in template_list if os.path.basename(template).endswith("yaml")]
   294  
   295      for template in template_list:
   296          deploy_worker_templates(template, substitutions)
   297  
   298      local_resource(
   299          name = "delete-all-workload-clusters",
   300          cmd = kubectl_cmd + " delete clusters --all --wait=false",
   301          auto_init = False,
   302          trigger_mode = TRIGGER_MODE_MANUAL,
   303          labels = ["flavors"],
   304      )
   305  
   306  def deploy_worker_templates(template, substitutions):
   307      # validate template exists
   308      if not os.path.exists(template):
   309          fail(template + " not found")
   310  
   311      yaml = str(read_file(template))
   312      flavor = os.path.basename(template).replace("cluster-template-", "").replace(".yaml", "")
   313  
   314      # for the base cluster-template, flavor is "default"
   315      flavor = os.path.basename(flavor).replace("cluster-template", "default")
   316  
   317      # azure account and ssh replacements
   318      for substitution in substitutions:
   319          value = substitutions[substitution]
   320          yaml = yaml.replace("${" + substitution + "}", value)
   321  
   322      # if metadata defined for worker-templates in tilt_settings
   323      if "worker-templates" in settings:
   324          # first priority replacements defined per template
   325          if "flavors" in settings.get("worker-templates", {}):
   326              substitutions = settings.get("worker-templates").get("flavors").get(flavor, {})
   327              for substitution in substitutions:
   328                  value = substitutions[substitution]
   329                  yaml = yaml.replace("${" + substitution + "}", value)
   330  
   331          # second priority replacements defined common to templates
   332          if "metadata" in settings.get("worker-templates", {}):
   333              substitutions = settings.get("worker-templates").get("metadata", {})
   334              for substitution in substitutions:
   335                  value = substitutions[substitution]
   336                  yaml = yaml.replace("${" + substitution + "}", value)
   337  
   338      # programmatically define any remaining vars
   339      # "windows" can not be for cluster name because it sets the dns to trademarked name during reconciliation
   340      substitutions = {
   341          "AZURE_LOCATION": settings.get("azure_location"),
   342          "AZURE_VNET_NAME": "${CLUSTER_NAME}-vnet",
   343          "AZURE_RESOURCE_GROUP": "${CLUSTER_NAME}-rg",
   344          "CONTROL_PLANE_MACHINE_COUNT": settings.get("control_plane_machine_count"),
   345          "KUBERNETES_VERSION": settings.get("kubernetes_version"),
   346          "AZURE_CONTROL_PLANE_MACHINE_TYPE": settings.get("az_control_plane_machine_type"),
   347          "WORKER_MACHINE_COUNT": settings.get("worker_machine_count"),
   348          "AZURE_NODE_MACHINE_TYPE": settings.get("az_node_machine_type"),
   349          "FLATCAR_VERSION": settings.get("flatcar_version"),
   350          "CLUSTER_CLASS_NAME": settings.get("cluster_class_name"),
   351      }
   352  
   353      if "aks" in flavor:
   354          # AKS version support is usually a bit behind CAPI version, so use an older version
   355          substitutions["KUBERNETES_VERSION"] = settings.get("aks_kubernetes_version")
   356  
   357      for substitution in substitutions:
   358          value = substitutions[substitution]
   359          yaml = yaml.replace("${" + substitution + "}", value)
   360  
   361      yaml = shlex.quote(yaml)
   362      flavor_name = os.path.basename(flavor)
   363      flavor_cmd = "RANDOM=$(bash -c 'echo $RANDOM'); export CLUSTER_NAME=" + flavor.replace("windows", "win") + "-$RANDOM; make generate-flavors; echo " + yaml + "> ./.tiltbuild/" + flavor + "; cat ./.tiltbuild/" + flavor + " | " + envsubst_cmd + " | " + kubectl_cmd + " apply -f -; echo \"Cluster \'$CLUSTER_NAME\' created, don't forget to delete\""
   364  
   365      # wait for kubeconfig to be available
   366      flavor_cmd += "; until " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig > /dev/null 2>&1; do sleep 5; done; " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig -o jsonpath={.data.value} | base64 --decode > ./${CLUSTER_NAME}.kubeconfig; chmod 600 ./${CLUSTER_NAME}.kubeconfig; until " + kubectl_cmd + " --kubeconfig=./${CLUSTER_NAME}.kubeconfig get nodes > /dev/null 2>&1; do sleep 5; done"
   367  
   368      # copy the kubeadm configmap to the calico-system namespace.
   369      # This is a workaround needed for the calico-node-windows daemonset to be able to run in the calico-system namespace.
   370      if "windows" in flavor_name:
   371          flavor_cmd += "; until " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig get configmap kubeadm-config --namespace=kube-system > /dev/null 2>&1; do sleep 5; done"
   372          flavor_cmd += "; " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig create namespace calico-system --dry-run=client -o yaml | " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f -; " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig get configmap kubeadm-config --namespace=kube-system -o yaml | sed 's/namespace: kube-system/namespace: calico-system/' | " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f -"
   373  
   374      flavor_cmd += get_addons(flavor_name)
   375  
   376      local_resource(
   377          name = flavor_name,
   378          cmd = ["sh", "-ec", flavor_cmd],
   379          auto_init = False,
   380          trigger_mode = TRIGGER_MODE_MANUAL,
   381          labels = ["flavors"],
   382          allow_parallel = True,
   383      )
   384  
   385  def get_addons(flavor_name):
   386      # do not install calico and out of tree cloud provider for aks workload cluster
   387      if "aks" in flavor_name:
   388          return ""
   389  
   390      addon_cmd = "; export CIDRS=$(" + kubectl_cmd + " get cluster ${CLUSTER_NAME} -o jsonpath='{.spec.clusterNetwork.pods.cidrBlocks[*]}')"
   391      addon_cmd += "; export CIDR_LIST=$(bash -c 'echo $CIDRS' | tr ' ' ',')"
   392      addon_cmd += "; " + helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install --repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo cloud-provider-azure --generate-name --set infra.clusterName=${CLUSTER_NAME} --set cloudControllerManager.clusterCIDR=${CIDR_LIST}"
   393      if "flatcar" in flavor_name:  # append caCetDir location to the cloud-provider-azure helm install command for flatcar flavor
   394          addon_cmd += " --set-string cloudControllerManager.caCertDir=/usr/share/ca-certificates"
   395  
   396      if "azure-cni-v1" in flavor_name:
   397          addon_cmd += "; " + kubectl_cmd + " apply -f ./templates/addons/azure-cni-v1.yaml --kubeconfig ./${CLUSTER_NAME}.kubeconfig"
   398      else:
   399          # install calico
   400          if "ipv6" in flavor_name:
   401              calico_values = "./templates/addons/calico-ipv6/values.yaml"
   402          elif "dual-stack" in flavor_name:
   403              calico_values = "./templates/addons/calico-dual-stack/values.yaml"
   404          else:
   405              calico_values = "./templates/addons/calico/values.yaml"
   406          addon_cmd += "; " + helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install --repo https://docs.tigera.io/calico/charts --version ${CALICO_VERSION} calico tigera-operator -f " + calico_values + " --namespace tigera-operator --create-namespace"
   407  
   408      return addon_cmd
   409  
   410  def base64_encode(to_encode):
   411      encode_blob = local("echo '{}' | tr -d '\n' | base64 | tr -d '\n'".format(to_encode), quiet = True, echo_off = True)
   412      return str(encode_blob)
   413  
   414  def base64_encode_file(path_to_encode):
   415      encode_blob = local("cat {} | tr -d '\n' | base64 | tr -d '\n'".format(path_to_encode), quiet = True)
   416      return str(encode_blob)
   417  
   418  def read_file_from_path(path_to_read):
   419      str_blob = local("cat {} | tr -d '\n'".format(path_to_read), quiet = True)
   420      return str(str_blob)
   421  
   422  def base64_decode(to_decode):
   423      decode_blob = local("echo '{}' | base64 --decode".format(to_decode), quiet = True, echo_off = True)
   424      return str(decode_blob)
   425  
   426  def kustomizesub(folder):
   427      yaml = local("hack/kustomize-sub.sh {}".format(folder), quiet = True)
   428      return yaml
   429  
   430  def waitforsystem():
   431      local(kubectl_cmd + " wait --for=condition=ready --timeout=300s pod --all -n capi-kubeadm-bootstrap-system")
   432      local(kubectl_cmd + " wait --for=condition=ready --timeout=300s pod --all -n capi-kubeadm-control-plane-system")
   433      local(kubectl_cmd + " wait --for=condition=ready --timeout=300s pod --all -n capi-system")
   434  
   435  ##############################
   436  # Actual work happens here
   437  ##############################
   438  
   439  validate_auth()
   440  
   441  include_user_tilt_files()
   442  
   443  load("ext://cert_manager", "deploy_cert_manager")
   444  
   445  if settings.get("deploy_cert_manager"):
   446      deploy_cert_manager(version = settings.get("cert_manager_version"))
   447  
   448  deploy_capi()
   449  
   450  create_identity_secret()
   451  
   452  capz()
   453  
   454  observability()
   455  
   456  waitforsystem()
   457  
   458  create_crs()
   459  
   460  flavors()