sigs.k8s.io/cluster-api-provider-azure@v1.17.0/Tiltfile (about) 1 # -*- mode: Python -*- 2 3 # Pre-requisite make targets "install-tools" and "kind-create" ensure that the below tools are already installed. 4 envsubst_cmd = "./hack/tools/bin/envsubst" 5 kubectl_cmd = "./hack/tools/bin/kubectl" 6 helm_cmd = "./hack/tools/bin/helm" 7 kind_cmd = "./hack/tools/bin/kind" 8 tools_bin = "./hack/tools/bin" 9 10 #Add tools to path 11 os.putenv("PATH", os.getenv("PATH") + ":" + tools_bin) 12 13 update_settings(k8s_upsert_timeout_secs = 60) # on first tilt up, often can take longer than 30 seconds 14 15 # Default settings for tilt 16 settings = { 17 "allowed_contexts": [ 18 "kind-capz", 19 ], 20 "deploy_cert_manager": True, 21 "preload_images_for_kind": True, 22 "kind_cluster_name": "capz", 23 "capi_version": "v1.8.3", 24 "caaph_version": "v0.2.5", 25 "cert_manager_version": "v1.15.3", 26 "kubernetes_version": "v1.28.3", 27 "aks_kubernetes_version": "v1.28.3", 28 "flatcar_version": "3374.2.1", 29 "azure_location": "eastus", 30 "control_plane_machine_count": "1", 31 "az_control_plane_machine_type": "Standard_B2s", 32 "worker_machine_count": "2", 33 "az_node_machine_type": "Standard_B2s", 34 "cluster_class_name": "default", 35 } 36 37 # Auth keys that need to be loaded from the environment 38 keys = ["AZURE_SUBSCRIPTION_ID", "AZURE_TENANT_ID", "AZURE_CLIENT_ID"] 39 40 # Get global settings from tilt-settings.yaml or tilt-settings.json 41 tilt_file = "./tilt-settings.yaml" if os.path.exists("./tilt-settings.yaml") else "./tilt-settings.json" 42 settings.update(read_yaml(tilt_file, default = {})) 43 44 if settings.get("trigger_mode") == "manual": 45 trigger_mode(TRIGGER_MODE_MANUAL) 46 47 if "allowed_contexts" in settings: 48 allow_k8s_contexts(settings.get("allowed_contexts")) 49 50 if "default_registry" in settings: 51 default_registry(settings.get("default_registry")) 52 53 os_arch = str(local("go env GOARCH")).rstrip("\n") 54 if "aks" in settings.get("kustomize_substitutions", {}).get("MGMT_CLUSTER_NAME", ""): 55 print("Using AKS as management cluster, setting os_arch to amd64") 56 os_arch = "amd64" 57 58 # deploy CAPI 59 def deploy_capi(): 60 version = settings.get("capi_version") 61 capi_uri = "https://github.com/kubernetes-sigs/cluster-api/releases/download/{}/cluster-api-components.yaml".format(version) 62 cmd = "curl --retry 3 -sSL {} | {} | {} apply -f -".format(capi_uri, envsubst_cmd, kubectl_cmd) 63 local(cmd, quiet = True) 64 if settings.get("extra_args"): 65 extra_args = settings.get("extra_args") 66 if extra_args.get("core"): 67 core_extra_args = extra_args.get("core") 68 for namespace in ["capi-system", "capi-webhook-system"]: 69 patch_args_with_extra_args(namespace, "capi-controller-manager", core_extra_args) 70 if extra_args.get("kubeadm-bootstrap"): 71 kb_extra_args = extra_args.get("kubeadm-bootstrap") 72 patch_args_with_extra_args("capi-kubeadm-bootstrap-system", "capi-kubeadm-bootstrap-controller-manager", kb_extra_args) 73 74 # deploy CAAPH 75 def deploy_caaph(): 76 version = settings.get("caaph_version") 77 78 caaph_uri = "https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/releases/download/{}/addon-components.yaml".format(version) 79 cmd = "curl --retry 3 -sSL {} | {} | {} apply -f -".format(caaph_uri, envsubst_cmd, kubectl_cmd) 80 local(cmd, quiet = True) 81 if settings.get("extra_args"): 82 extra_args = settings.get("extra_args") 83 if extra_args.get("helm"): 84 core_extra_args = extra_args.get("helm") 85 for namespace in ["caaph-system", "caaph-webhook-system"]: 86 patch_args_with_extra_args(namespace, "caaph-controller-manager", core_extra_args) 87 88 def patch_args_with_extra_args(namespace, name, extra_args): 89 args_str = str(local("{} get deployments {} -n {} -o jsonpath={{.spec.template.spec.containers[1].args}}".format(kubectl_cmd, name, namespace))) 90 args_to_add = [arg for arg in extra_args if arg not in args_str] 91 if args_to_add: 92 args = args_str[1:-1].split() 93 args.extend(args_to_add) 94 patch = [{ 95 "op": "replace", 96 "path": "/spec/template/spec/containers/1/args", 97 "value": args, 98 }] 99 local("{} patch deployment {} -n {} --type json -p='{}'".format(kubectl_cmd, name, namespace, str(encode_json(patch)).replace("\n", ""))) 100 101 # Users may define their own Tilt customizations in tilt.d. This directory is excluded from git and these files will 102 # not be checked in to version control. 103 def include_user_tilt_files(): 104 user_tiltfiles = listdir("tilt.d") 105 for f in user_tiltfiles: 106 include(f) 107 108 def append_arg_for_container_in_deployment(yaml_stream, name, namespace, contains_image_name, args): 109 for item in yaml_stream: 110 if item["kind"] == "Deployment" and item.get("metadata").get("name") == name and item.get("metadata").get("namespace") == namespace: 111 containers = item.get("spec").get("template").get("spec").get("containers") 112 for container in containers: 113 if contains_image_name in container.get("image"): 114 container.get("args").extend(args) 115 116 def fixup_yaml_empty_arrays(yaml_str): 117 yaml_str = yaml_str.replace("conditions: null", "conditions: []") 118 return yaml_str.replace("storedVersions: null", "storedVersions: []") 119 120 def validate_auth(): 121 substitutions = settings.get("kustomize_substitutions", {}) 122 os.environ.update(substitutions) 123 for sub in substitutions: 124 if sub[-4:] == "_B64": 125 os.environ[sub[:-4]] = base64_decode(os.environ[sub]) 126 missing = [k for k in keys if not os.environ.get(k)] 127 if missing: 128 fail("missing kustomize_substitutions keys {} in tilt-setting.json".format(missing)) 129 130 tilt_helper_dockerfile_header = """ 131 # Tilt image 132 FROM golang:1.22 as tilt-helper 133 # Support live reloading with Tilt 134 RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \ 135 wget --output-document /start.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/start.sh && \ 136 chmod +x /start.sh && chmod +x /restart.sh && \ 137 touch /process.txt && chmod 0777 /process.txt `# pre-create PID file to allow even non-root users to run the image` 138 """ 139 140 tilt_dockerfile_header = """ 141 FROM gcr.io/distroless/base:debug as tilt 142 WORKDIR /tilt 143 RUN ["/busybox/chmod", "0777", "."] 144 COPY --from=tilt-helper /process.txt . 145 COPY --from=tilt-helper /start.sh . 146 COPY --from=tilt-helper /restart.sh . 147 COPY manager . 148 """ 149 150 # Install the OpenTelemetry helm chart 151 def observability(): 152 instrumentation_key = os.getenv("AZURE_INSTRUMENTATION_KEY", "") 153 if instrumentation_key == "": 154 warn("AZURE_INSTRUMENTATION_KEY is not set, so traces won't be exported to Application Insights") 155 trace_links = [] 156 else: 157 trace_links = [link("https://ms.portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/microsoft.insights%2Fcomponents", "App Insights")] 158 k8s_yaml(helm( 159 "./hack/observability/opentelemetry/chart", 160 name = "opentelemetry-collector", 161 namespace = "capz-system", 162 values = ["./hack/observability/opentelemetry/values.yaml"], 163 set = ["config.exporters.azuremonitor.instrumentation_key=" + instrumentation_key], 164 )) 165 k8s_yaml(helm( 166 "./hack/observability/jaeger/chart", 167 name = "jaeger-all-in-one", 168 namespace = "capz-system", 169 set = [ 170 "crd.install=false", 171 "rbac.create=false", 172 "resources.limits.cpu=200m", 173 "resources.limits.memory=256Mi", 174 ], 175 )) 176 177 k8s_yaml(helm( 178 "./hack/observability/cluster-api-visualizer/chart", 179 name = "visualizer", 180 namespace = "capz-system", 181 )) 182 183 k8s_resource( 184 workload = "jaeger-all-in-one", 185 new_name = "traces: jaeger-all-in-one", 186 port_forwards = [port_forward(16686, name = "View traces", link_path = "/search?service=capz")], 187 links = trace_links, 188 labels = ["observability"], 189 ) 190 k8s_resource( 191 workload = "prometheus-operator", 192 new_name = "metrics: prometheus-operator", 193 port_forwards = [port_forward(local_port = 9090, container_port = 9090, name = "View metrics")], 194 extra_pod_selectors = [{"app": "prometheus"}], 195 labels = ["observability"], 196 ) 197 k8s_resource(workload = "opentelemetry-collector", labels = ["observability"]) 198 k8s_resource(workload = "opentelemetry-collector-agent", labels = ["observability"]) 199 k8s_resource( 200 workload = "capi-visualizer", 201 new_name = "visualizer", 202 port_forwards = [port_forward(local_port = 8000, container_port = 8081, name = "View visualization")], 203 labels = ["observability"], 204 ) 205 206 k8s_resource(workload = "capz-controller-manager", labels = ["cluster-api"]) 207 k8s_resource(workload = "azureserviceoperator-controller-manager", labels = ["cluster-api"]) 208 209 # Build CAPZ and add feature gates 210 def capz(): 211 # Apply the kustomized yaml for this provider 212 yaml = str(kustomizesub("./hack/observability")) # build an observable kind deployment by default 213 214 # add extra_args if they are defined 215 if settings.get("extra_args"): 216 azure_extra_args = settings.get("extra_args").get("azure") 217 yaml_dict = decode_yaml_stream(yaml) 218 append_arg_for_container_in_deployment(yaml_dict, "capz-controller-manager", "capz-system", "cluster-api-azure-controller", azure_extra_args) 219 yaml = str(encode_yaml_stream(yaml_dict)) 220 yaml = fixup_yaml_empty_arrays(yaml) 221 222 # Forge the build command 223 ldflags = "-extldflags \"-static\" " + str(local("hack/version.sh")).rstrip("\n") 224 build_env = "CGO_ENABLED=0 GOOS=linux GOARCH={arch}".format(arch = os_arch) 225 build_cmd = "{build_env} go build -ldflags '{ldflags}' -o .tiltbuild/manager".format( 226 build_env = build_env, 227 ldflags = ldflags, 228 ) 229 230 # Set up a local_resource build of the provider's manager binary. 231 local_resource( 232 "manager", 233 cmd = "mkdir -p .tiltbuild; " + build_cmd, 234 deps = ["api", "azure", "config", "controllers", "exp", "feature", "pkg", "util", "go.mod", "go.sum", "main.go"], 235 labels = ["cluster-api"], 236 ) 237 238 dockerfile_contents = "\n".join([ 239 tilt_helper_dockerfile_header, 240 tilt_dockerfile_header, 241 ]) 242 243 entrypoint = ["sh", "/tilt/start.sh", "/tilt/manager"] 244 extra_args = settings.get("extra_args") 245 if extra_args: 246 entrypoint.extend(extra_args) 247 248 # use the user REGISTRY if set, otherwise use the default 249 if settings.get("kustomize_substitutions", {}).get("REGISTRY", "") != "": 250 registry = settings.get("kustomize_substitutions", {}).get("REGISTRY", "") 251 print("Using REGISTRY: " + registry + " from tilt-settings.yaml") 252 image = registry + "/cluster-api-azure-controller" 253 else: 254 image = "gcr.io/cluster-api-provider-azure/cluster-api-azure-controller" 255 256 # Set up an image build for the provider. The live update configuration syncs the output from the local_resource 257 # build into the container. 258 docker_build( 259 ref = image, 260 context = "./.tiltbuild/", 261 dockerfile_contents = dockerfile_contents, 262 target = "tilt", 263 entrypoint = entrypoint, 264 only = "manager", 265 live_update = [ 266 sync(".tiltbuild/manager", "/tilt/manager"), 267 run("sh /tilt/restart.sh"), 268 ], 269 ignore = ["templates"], 270 ) 271 272 k8s_yaml(blob(yaml)) 273 274 def create_identity_secret(): 275 #create secret for identity password 276 local(kubectl_cmd + " delete secret cluster-identity-secret --ignore-not-found=true") 277 278 os.putenv("AZURE_CLUSTER_IDENTITY_SECRET_NAME", "cluster-identity-secret") 279 os.putenv("AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE", "default") 280 os.putenv("CLUSTER_IDENTITY_NAME", "cluster-identity-ci") 281 os.putenv("ASO_CREDENTIAL_SECRET_NAME", "aso-credentials") 282 283 local("cat templates/flavors/aks-aso/credentials.yaml | " + envsubst_cmd + " | " + kubectl_cmd + " apply -f -", quiet = True, echo_off = True) 284 285 def create_crs(): 286 # create config maps 287 local(kubectl_cmd + " delete configmaps csi-proxy-addon --ignore-not-found=true") 288 local(kubectl_cmd + " create configmap csi-proxy-addon --from-file=templates/addons/windows/csi-proxy/csi-proxy.yaml") 289 290 # need to set version for kube-proxy on windows. 291 os.putenv("KUBERNETES_VERSION", settings.get("kubernetes_version", {})) 292 local(kubectl_cmd + " create configmap calico-windows-addon --from-file=templates/addons/windows/calico/ --dry-run=client -o yaml | " + envsubst_cmd + " | " + kubectl_cmd + " apply -f -") 293 294 # set up crs 295 local(kubectl_cmd + " apply -f templates/addons/windows/calico-resource-set.yaml") 296 local(kubectl_cmd + " apply -f templates/addons/windows/csi-proxy/csi-proxy-resource-set.yaml") 297 298 # create flavor resources from cluster-template files in the templates directory 299 def flavors(): 300 substitutions = settings.get("kustomize_substitutions", {}) 301 302 az_key_b64_name = "AZURE_SSH_PUBLIC_KEY_B64" 303 az_key_name = "AZURE_SSH_PUBLIC_KEY" 304 default_key_path = "$HOME/.ssh/id_rsa.pub" 305 306 if substitutions.get(az_key_b64_name): 307 os.environ.update({az_key_b64_name: substitutions.get(az_key_b64_name)}) 308 os.environ.update({az_key_name: base64_decode(substitutions.get(az_key_b64_name))}) 309 else: 310 print("{} was not specified in tilt-settings.json, attempting to load {}".format(az_key_b64_name, default_key_path)) 311 os.environ.update({az_key_b64_name: base64_encode_file(default_key_path)}) 312 os.environ.update({az_key_name: read_file_from_path(default_key_path)}) 313 314 template_list = [item for item in listdir("./templates")] 315 template_list = [template for template in template_list if os.path.basename(template).endswith("yaml")] 316 317 for template in template_list: 318 deploy_worker_templates(template, substitutions) 319 320 local_resource( 321 name = "delete-all-workload-clusters", 322 cmd = kubectl_cmd + " delete clusters --all --wait=false", 323 auto_init = False, 324 trigger_mode = TRIGGER_MODE_MANUAL, 325 labels = ["flavors"], 326 ) 327 328 def deploy_worker_templates(template, substitutions): 329 # validate template exists 330 if not os.path.exists(template): 331 fail(template + " not found") 332 333 yaml = str(read_file(template)) 334 flavor = os.path.basename(template).replace("cluster-template-", "").replace(".yaml", "") 335 336 # for the base cluster-template, flavor is "default" 337 flavor = os.path.basename(flavor).replace("cluster-template", "default") 338 339 # azure account and ssh replacements 340 for substitution in substitutions: 341 value = substitutions[substitution] 342 yaml = yaml.replace("${" + substitution + "}", value) 343 344 # if metadata defined for worker-templates in tilt_settings 345 if "worker-templates" in settings: 346 # first priority replacements defined per template 347 if "flavors" in settings.get("worker-templates", {}): 348 substitutions = settings.get("worker-templates").get("flavors").get(flavor, {}) 349 for substitution in substitutions: 350 value = substitutions[substitution] 351 yaml = yaml.replace("${" + substitution + "}", value) 352 353 # second priority replacements defined common to templates 354 if "metadata" in settings.get("worker-templates", {}): 355 substitutions = settings.get("worker-templates").get("metadata", {}) 356 for substitution in substitutions: 357 value = substitutions[substitution] 358 yaml = yaml.replace("${" + substitution + "}", value) 359 360 # programmatically define any remaining vars 361 # "windows" can not be for cluster name because it sets the dns to trademarked name during reconciliation 362 substitutions = { 363 "AZURE_LOCATION": settings.get("azure_location"), 364 "AZURE_VNET_NAME": "${CLUSTER_NAME}-vnet", 365 "AZURE_RESOURCE_GROUP": "${CLUSTER_NAME}-rg", 366 "CONTROL_PLANE_MACHINE_COUNT": settings.get("control_plane_machine_count"), 367 "KUBERNETES_VERSION": settings.get("kubernetes_version"), 368 "AZURE_CONTROL_PLANE_MACHINE_TYPE": settings.get("az_control_plane_machine_type"), 369 "WORKER_MACHINE_COUNT": settings.get("worker_machine_count"), 370 "AZURE_NODE_MACHINE_TYPE": settings.get("az_node_machine_type"), 371 "FLATCAR_VERSION": settings.get("flatcar_version"), 372 "CLUSTER_CLASS_NAME": settings.get("cluster_class_name"), 373 } 374 375 if "aks" in flavor: 376 # AKS version support is usually a bit behind CAPI version, so use an older version 377 substitutions["KUBERNETES_VERSION"] = settings.get("aks_kubernetes_version") 378 379 for substitution in substitutions: 380 value = substitutions[substitution] 381 yaml = yaml.replace("${" + substitution + "}", value) 382 383 yaml = shlex.quote(yaml) 384 flavor_name = os.path.basename(flavor) 385 flavor_cmd = "RANDOM=$(bash -c 'echo $RANDOM'); export CLUSTER_NAME=" + flavor.replace("windows", "win") + "-$RANDOM; make generate-flavors; echo " + yaml + "> ./.tiltbuild/" + flavor + "; cat ./.tiltbuild/" + flavor + " | " + envsubst_cmd + " | " + kubectl_cmd + " apply -f -; echo \"Cluster \'$CLUSTER_NAME\' created, don't forget to delete\"" 386 387 # wait for kubeconfig to be available 388 flavor_cmd += "; until " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig > /dev/null 2>&1; do sleep 5; done; " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig -o jsonpath={.data.value} | base64 --decode > ./${CLUSTER_NAME}.kubeconfig; chmod 600 ./${CLUSTER_NAME}.kubeconfig; until " + kubectl_cmd + " --kubeconfig=./${CLUSTER_NAME}.kubeconfig get nodes > /dev/null 2>&1; do sleep 5; done" 389 390 # copy the kubeadm configmap to the calico-system namespace. 391 # This is a workaround needed for the calico-node-windows daemonset to be able to run in the calico-system namespace. 392 if "windows" in flavor_name: 393 flavor_cmd += "; until " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig get configmap kubeadm-config --namespace=kube-system > /dev/null 2>&1; do sleep 5; done" 394 flavor_cmd += "; " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig create namespace calico-system --dry-run=client -o yaml | " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f -; " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig get configmap kubeadm-config --namespace=kube-system -o yaml | sed 's/namespace: kube-system/namespace: calico-system/' | " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f -" 395 396 flavor_cmd += get_addons(flavor_name) 397 398 local_resource( 399 name = flavor_name, 400 cmd = ["sh", "-ec", flavor_cmd], 401 auto_init = False, 402 trigger_mode = TRIGGER_MODE_MANUAL, 403 labels = ["flavors"], 404 allow_parallel = True, 405 ) 406 407 def get_addons(flavor_name): 408 # do not install calico and out of tree cloud provider for aks workload cluster 409 if "aks" in flavor_name: 410 return "" 411 412 addon_cmd = "; export CIDRS=$(" + kubectl_cmd + " get cluster ${CLUSTER_NAME} -o jsonpath='{.spec.clusterNetwork.pods.cidrBlocks[*]}')" 413 addon_cmd += "; export CIDR_LIST=$(bash -c 'echo $CIDRS' | tr ' ' ',')" 414 addon_cmd += "; " + helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install --repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo cloud-provider-azure --generate-name --set infra.clusterName=${CLUSTER_NAME} --set cloudControllerManager.clusterCIDR=${CIDR_LIST}" 415 if "flatcar" in flavor_name: # append caCetDir location to the cloud-provider-azure helm install command for flatcar flavor 416 addon_cmd += " --set-string cloudControllerManager.caCertDir=/usr/share/ca-certificates" 417 418 if "azure-cni-v1" in flavor_name: 419 addon_cmd += "; " + kubectl_cmd + " apply -f ./templates/addons/azure-cni-v1.yaml --kubeconfig ./${CLUSTER_NAME}.kubeconfig" 420 else: 421 # install calico 422 if "ipv6" in flavor_name: 423 calico_values = "./templates/addons/calico-ipv6/values.yaml" 424 elif "dual-stack" in flavor_name: 425 calico_values = "./templates/addons/calico-dual-stack/values.yaml" 426 else: 427 calico_values = "./templates/addons/calico/values.yaml" 428 addon_cmd += "; " + helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install --repo https://docs.tigera.io/calico/charts --version ${CALICO_VERSION} calico tigera-operator -f " + calico_values + " --namespace tigera-operator --create-namespace" 429 430 return addon_cmd 431 432 def base64_encode(to_encode): 433 encode_blob = local("echo '{}' | tr -d '\n' | base64 | tr -d '\n'".format(to_encode), quiet = True, echo_off = True) 434 return str(encode_blob) 435 436 def base64_encode_file(path_to_encode): 437 encode_blob = local("cat {} | tr -d '\n' | base64 | tr -d '\n'".format(path_to_encode), quiet = True) 438 return str(encode_blob) 439 440 def read_file_from_path(path_to_read): 441 str_blob = local("cat {} | tr -d '\n'".format(path_to_read), quiet = True) 442 return str(str_blob) 443 444 def base64_decode(to_decode): 445 decode_blob = local("echo '{}' | base64 --decode".format(to_decode), quiet = True, echo_off = True) 446 return str(decode_blob) 447 448 def kustomizesub(folder): 449 yaml = local("hack/kustomize-sub.sh {}".format(folder), quiet = True) 450 return yaml 451 452 def waitforsystem(): 453 local(kubectl_cmd + " wait --for=condition=ready --timeout=300s pod --all -n capi-kubeadm-bootstrap-system") 454 local(kubectl_cmd + " wait --for=condition=ready --timeout=300s pod --all -n capi-kubeadm-control-plane-system") 455 local(kubectl_cmd + " wait --for=condition=ready --timeout=300s pod --all -n capi-system") 456 457 ############################## 458 # Actual work happens here 459 ############################## 460 461 validate_auth() 462 463 include_user_tilt_files() 464 465 load("ext://cert_manager", "deploy_cert_manager") 466 467 if settings.get("deploy_cert_manager"): 468 deploy_cert_manager(version = settings.get("cert_manager_version")) 469 470 deploy_capi() 471 472 deploy_caaph() 473 474 create_identity_secret() 475 476 capz() 477 478 observability() 479 480 waitforsystem() 481 482 create_crs() 483 484 flavors()