github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/tiltfile/tiltfile_state.go (about) 1 package tiltfile 2 3 import ( 4 "context" 5 "fmt" 6 "path/filepath" 7 "strings" 8 "time" 9 10 "github.com/looplab/tarjan" 11 "github.com/pkg/errors" 12 "go.starlark.net/starlark" 13 "go.starlark.net/syntax" 14 "golang.org/x/mod/semver" 15 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 17 "github.com/tilt-dev/tilt/internal/controllers/apis/cmdimage" 18 "github.com/tilt-dev/tilt/internal/controllers/apis/dockerimage" 19 "github.com/tilt-dev/tilt/internal/controllers/apis/liveupdate" 20 "github.com/tilt-dev/tilt/internal/controllers/apiset" 21 "github.com/tilt-dev/tilt/internal/localexec" 22 "github.com/tilt-dev/tilt/internal/tiltfile/cisettings" 23 "github.com/tilt-dev/tilt/internal/tiltfile/hasher" 24 "github.com/tilt-dev/tilt/internal/tiltfile/links" 25 "github.com/tilt-dev/tilt/internal/tiltfile/print" 26 "github.com/tilt-dev/tilt/internal/tiltfile/probe" 27 "github.com/tilt-dev/tilt/internal/tiltfile/sys" 28 "github.com/tilt-dev/tilt/internal/tiltfile/tiltextension" 29 "github.com/tilt-dev/tilt/pkg/apis" 30 31 "github.com/tilt-dev/tilt/internal/container" 32 "github.com/tilt-dev/tilt/internal/dockercompose" 33 "github.com/tilt-dev/tilt/internal/feature" 34 "github.com/tilt-dev/tilt/internal/k8s" 35 "github.com/tilt-dev/tilt/internal/ospath" 36 "github.com/tilt-dev/tilt/internal/sliceutils" 37 "github.com/tilt-dev/tilt/internal/tiltfile/analytics" 38 "github.com/tilt-dev/tilt/internal/tiltfile/config" 39 "github.com/tilt-dev/tilt/internal/tiltfile/dockerprune" 40 "github.com/tilt-dev/tilt/internal/tiltfile/encoding" 41 "github.com/tilt-dev/tilt/internal/tiltfile/git" 42 "github.com/tilt-dev/tilt/internal/tiltfile/include" 43 "github.com/tilt-dev/tilt/internal/tiltfile/io" 44 tiltfile_k8s "github.com/tilt-dev/tilt/internal/tiltfile/k8s" 45 "github.com/tilt-dev/tilt/internal/tiltfile/k8scontext" 46 "github.com/tilt-dev/tilt/internal/tiltfile/loaddynamic" 47 "github.com/tilt-dev/tilt/internal/tiltfile/metrics" 48 "github.com/tilt-dev/tilt/internal/tiltfile/os" 49 "github.com/tilt-dev/tilt/internal/tiltfile/secretsettings" 50 "github.com/tilt-dev/tilt/internal/tiltfile/shlex" 51 "github.com/tilt-dev/tilt/internal/tiltfile/starkit" 52 "github.com/tilt-dev/tilt/internal/tiltfile/starlarkstruct" 53 "github.com/tilt-dev/tilt/internal/tiltfile/telemetry" 54 "github.com/tilt-dev/tilt/internal/tiltfile/updatesettings" 55 tfv1alpha1 "github.com/tilt-dev/tilt/internal/tiltfile/v1alpha1" 56 "github.com/tilt-dev/tilt/internal/tiltfile/version" 57 "github.com/tilt-dev/tilt/internal/tiltfile/watch" 58 fwatch "github.com/tilt-dev/tilt/internal/watch" 59 "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" 60 "github.com/tilt-dev/tilt/pkg/logger" 61 "github.com/tilt-dev/tilt/pkg/model" 62 ) 63 64 var unmatchedImageNoConfigsWarning = "We could not find any deployment instructions, e.g. `k8s_yaml` or `docker_compose`.\n" + 65 "Skipping all image builds until we know how to deploy them." 66 67 var unmatchedImageAllUnresourcedWarning = "No Kubernetes configs with images found.\n" + 68 "If you are using CRDs, add k8s_kind() to tell Tilt how to find images.\n" + 69 "https://docs.tilt.dev/api.html#api.k8s_kind" 70 71 var pkgInitTime = time.Now() 72 73 type resourceSet struct { 74 dc []*dcResourceSet 75 k8s []*k8sResource 76 } 77 78 type tiltfileState struct { 79 // set at creation 80 ctx context.Context 81 dcCli dockercompose.DockerComposeClient 82 webHost model.WebHost 83 execer localexec.Execer 84 k8sContextPlugin k8scontext.Plugin 85 versionPlugin version.Plugin 86 configPlugin *config.Plugin 87 extensionPlugin *tiltextension.Plugin 88 ciSettingsPlugin cisettings.Plugin 89 features feature.FeatureSet 90 91 // added to during execution 92 buildIndex *buildIndex 93 k8sObjectIndex *tiltfile_k8s.State 94 95 // The mutation semantics of these 3 things are a bit fuzzy 96 // Objects are moved back and forth between them in different 97 // phases of tiltfile execution and post-execution assembly. 98 // 99 // TODO(nick): Move these into a unified k8sObjectIndex that 100 // maintains consistent internal state. Right now the state 101 // is duplicated. 102 k8s []*k8sResource 103 k8sByName map[string]*k8sResource 104 k8sUnresourced []k8s.K8sEntity 105 106 dc dcResourceMap 107 108 k8sResourceOptions []k8sResourceOptions 109 localResources []*localResource 110 localByName map[string]*localResource 111 112 // ensure that any images are pushed to/pulled from this registry, rewriting names if needed 113 defaultReg *v1alpha1.RegistryHosting 114 115 k8sKinds map[k8s.ObjectSelector]*tiltfile_k8s.KindInfo 116 117 workloadToResourceFunction workloadToResourceFunction 118 119 // for assembly 120 usedImages map[string]bool 121 122 // count how many times each builtin is called, for analytics 123 builtinCallCounts map[string]int 124 // how many times each arg is used on each builtin 125 builtinArgCounts map[string]map[string]int 126 127 // any LiveUpdate steps that have been created but not used by a LiveUpdate will cause an error, to ensure 128 // that users aren't accidentally using step-creating functions incorrectly 129 // stored as a map of string(declarationPosition) -> step 130 // it'd be appealing to store this as a map[liveUpdateStep]bool, but then things get weird if we have two steps 131 // with the same hashcode (like, all restartcontainer steps) 132 unconsumedLiveUpdateSteps map[string]liveUpdateStep 133 134 // global trigger mode -- will be the default for all manifests (tho user can still explicitly set 135 // triggerMode for a specific manifest) 136 triggerMode triggerMode 137 138 // for error reporting in case it's called twice 139 triggerModeCallPosition syntax.Position 140 141 teamID string 142 143 secretSettings model.SecretSettings 144 145 apiObjects apiset.ObjectSet 146 147 logger logger.Logger 148 149 // postExecReadFiles is generally a mistake -- it means that if tiltfile execution fails, 150 // these will never be read. Remove these when you can!!! 151 postExecReadFiles []string 152 153 // Temporary directory for storing generated artifacts during the lifetime of the tiltfile context. 154 // The directory is recursively deleted when the context is done. 155 scratchDir *fwatch.TempDir 156 } 157 158 func newTiltfileState( 159 ctx context.Context, 160 dcCli dockercompose.DockerComposeClient, 161 webHost model.WebHost, 162 execer localexec.Execer, 163 k8sContextPlugin k8scontext.Plugin, 164 versionPlugin version.Plugin, 165 configPlugin *config.Plugin, 166 extensionPlugin *tiltextension.Plugin, 167 ciSettingsPlugin cisettings.Plugin, 168 features feature.FeatureSet) *tiltfileState { 169 return &tiltfileState{ 170 ctx: ctx, 171 dcCli: dcCli, 172 webHost: webHost, 173 execer: execer, 174 k8sContextPlugin: k8sContextPlugin, 175 versionPlugin: versionPlugin, 176 configPlugin: configPlugin, 177 extensionPlugin: extensionPlugin, 178 ciSettingsPlugin: ciSettingsPlugin, 179 buildIndex: newBuildIndex(), 180 k8sObjectIndex: tiltfile_k8s.NewState(), 181 k8sByName: make(map[string]*k8sResource), 182 dc: make(map[string]*dcResourceSet), 183 localByName: make(map[string]*localResource), 184 usedImages: make(map[string]bool), 185 logger: logger.Get(ctx), 186 builtinCallCounts: make(map[string]int), 187 builtinArgCounts: make(map[string]map[string]int), 188 unconsumedLiveUpdateSteps: make(map[string]liveUpdateStep), 189 localResources: []*localResource{}, 190 triggerMode: TriggerModeAuto, 191 features: features, 192 secretSettings: model.DefaultSecretSettings(), 193 apiObjects: apiset.ObjectSet{}, 194 k8sKinds: tiltfile_k8s.InitialKinds(), 195 } 196 } 197 198 // print() for fulfilling the starlark thread callback 199 func (s *tiltfileState) print(_ *starlark.Thread, msg string) { 200 s.logger.Infof("%s", msg) 201 } 202 203 // Load loads the Tiltfile in `filename`, and returns the manifests matching `matching`. 204 // 205 // This often returns a starkit.Model even on error, because the starkit.Model 206 // has a record of what happened during the execution (what files were read, etc). 207 // 208 // TODO(nick): Eventually this will just return a starkit.Model, which will contain 209 // all the mutable state collected by execution. 210 func (s *tiltfileState) loadManifests(tf *v1alpha1.Tiltfile) ([]model.Manifest, starkit.Model, error) { 211 s.logger.Infof("Loading Tiltfile at: %s", tf.Spec.Path) 212 213 result, err := starkit.ExecFile(tf, 214 s, 215 include.IncludeFn{}, 216 git.NewPlugin(), 217 os.NewPlugin(), 218 sys.NewPlugin(), 219 io.NewPlugin(), 220 s.k8sContextPlugin, 221 dockerprune.NewPlugin(), 222 analytics.NewPlugin(), 223 s.versionPlugin, 224 s.configPlugin, 225 starlarkstruct.NewPlugin(), 226 telemetry.NewPlugin(), 227 metrics.NewPlugin(), 228 updatesettings.NewPlugin(), 229 s.ciSettingsPlugin, 230 secretsettings.NewPlugin(), 231 encoding.NewPlugin(), 232 shlex.NewPlugin(), 233 watch.NewPlugin(), 234 loaddynamic.NewPlugin(), 235 s.extensionPlugin, 236 links.NewPlugin(), 237 print.NewPlugin(), 238 probe.NewPlugin(), 239 tfv1alpha1.NewPlugin(), 240 hasher.NewPlugin(), 241 ) 242 if err != nil { 243 return nil, result, starkit.UnpackBacktrace(err) 244 } 245 246 resources, unresourced, err := s.assemble() 247 if err != nil { 248 return nil, result, err 249 } 250 251 us, err := updatesettings.GetState(result) 252 if err != nil { 253 return nil, result, err 254 } 255 256 err = s.assertAllImagesMatched(us) 257 if err != nil { 258 s.logger.Warnf("%s", err.Error()) 259 } 260 261 manifests := []model.Manifest{} 262 k8sContextState, err := k8scontext.GetState(result) 263 if err != nil { 264 return nil, result, err 265 } 266 267 if len(resources.k8s) > 0 || len(unresourced) > 0 { 268 ms, err := s.translateK8s(resources.k8s, us) 269 if err != nil { 270 return nil, result, err 271 } 272 manifests = append(manifests, ms...) 273 274 isAllowed := k8sContextState.IsAllowed(tf) 275 if !isAllowed { 276 kubeContext := k8sContextState.KubeContext() 277 return nil, result, fmt.Errorf(`Stop! %s might be production. 278 If you're sure you want to deploy there, add: 279 allow_k8s_contexts('%s') 280 to your Tiltfile. Otherwise, switch k8s contexts and restart Tilt.`, kubeContext, kubeContext) 281 } 282 } 283 284 if len(resources.dc) > 0 { 285 if err := s.validateDockerComposeVersion(); err != nil { 286 return nil, result, err 287 } 288 289 for _, dc := range resources.dc { 290 ms, err := s.translateDC(dc) 291 if err != nil { 292 return nil, result, err 293 } 294 manifests = append(manifests, ms...) 295 } 296 } 297 298 err = s.validateLiveUpdatesForManifests(manifests) 299 if err != nil { 300 return nil, result, err 301 } 302 303 err = s.checkForUnconsumedLiveUpdateSteps() 304 if err != nil { 305 return nil, result, err 306 } 307 308 localManifests, err := s.translateLocal() 309 if err != nil { 310 return nil, result, err 311 } 312 manifests = append(manifests, localManifests...) 313 314 if len(unresourced) > 0 { 315 mn := model.UnresourcedYAMLManifestName 316 r := &k8sResource{ 317 name: mn.String(), 318 entities: unresourced, 319 podReadinessMode: model.PodReadinessIgnore, 320 } 321 kt, err := s.k8sDeployTarget(mn.TargetName(), r, nil, us) 322 if err != nil { 323 return nil, starkit.Model{}, err 324 } 325 326 yamlManifest := model.Manifest{Name: mn}.WithDeployTarget(kt) 327 manifests = append(manifests, yamlManifest) 328 } 329 330 err = s.sanitizeDependencies(manifests) 331 if err != nil { 332 return nil, starkit.Model{}, err 333 } 334 335 for i := range manifests { 336 // ensure all manifests have a label indicating they're owned 337 // by the Tiltfile - some reconcilers have special handling 338 l := manifests[i].Labels 339 if l == nil { 340 l = make(map[string]string) 341 } 342 manifests[i] = manifests[i].WithLabels(l) 343 344 err := manifests[i].Validate() 345 if err != nil { 346 // Even on manifest validation errors, we may be able 347 // to use other kinds of models (e.g., watched files) 348 return manifests, result, err 349 } 350 } 351 352 return manifests, result, nil 353 } 354 355 // Builtin functions 356 357 const ( 358 // build functions 359 dockerBuildN = "docker_build" 360 customBuildN = "custom_build" 361 defaultRegistryN = "default_registry" 362 363 // docker compose functions 364 dockerComposeN = "docker_compose" 365 dcResourceN = "dc_resource" 366 367 // k8s functions 368 k8sYamlN = "k8s_yaml" 369 filterYamlN = "filter_yaml" 370 k8sResourceN = "k8s_resource" 371 portForwardN = "port_forward" 372 k8sKindN = "k8s_kind" 373 k8sImageJSONPathN = "k8s_image_json_path" 374 workloadToResourceFunctionN = "workload_to_resource_function" 375 k8sCustomDeployN = "k8s_custom_deploy" 376 377 // local resource functions 378 localResourceN = "local_resource" 379 testN = "test" // a deprecated fork of local resource 380 381 // file functions 382 localN = "local" 383 kustomizeN = "kustomize" 384 helmN = "helm" 385 386 // live update functions 387 fallBackOnN = "fall_back_on" 388 syncN = "sync" 389 runN = "run" 390 restartContainerN = "restart_container" 391 392 // trigger mode 393 triggerModeN = "trigger_mode" 394 triggerModeAutoN = "TRIGGER_MODE_AUTO" 395 triggerModeManualN = "TRIGGER_MODE_MANUAL" 396 397 // feature flags 398 enableFeatureN = "enable_feature" 399 disableFeatureN = "disable_feature" 400 401 disableSnapshotsN = "disable_snapshots" 402 403 // other functions 404 setTeamN = "set_team" 405 ) 406 407 type triggerMode int 408 409 func (m triggerMode) String() string { 410 switch m { 411 case TriggerModeAuto: 412 return triggerModeAutoN 413 case TriggerModeManual: 414 return triggerModeManualN 415 default: 416 return fmt.Sprintf("unknown trigger mode with value %d", m) 417 } 418 } 419 420 func (t triggerMode) Type() string { 421 return "TriggerMode" 422 } 423 424 func (t triggerMode) Freeze() { 425 // noop 426 } 427 428 func (t triggerMode) Truth() starlark.Bool { 429 return starlark.MakeInt(int(t)).Truth() 430 } 431 432 func (t triggerMode) Hash() (uint32, error) { 433 return starlark.MakeInt(int(t)).Hash() 434 } 435 436 var _ starlark.Value = triggerMode(0) 437 438 const ( 439 TriggerModeUnset triggerMode = iota 440 TriggerModeAuto triggerMode = iota 441 TriggerModeManual triggerMode = iota 442 ) 443 444 func (s *tiltfileState) triggerModeForResource(resourceTriggerMode triggerMode) triggerMode { 445 if resourceTriggerMode != TriggerModeUnset { 446 return resourceTriggerMode 447 } else { 448 return s.triggerMode 449 } 450 } 451 452 func starlarkTriggerModeToModel(triggerMode triggerMode, autoInit bool) (model.TriggerMode, error) { 453 switch triggerMode { 454 case TriggerModeAuto: 455 if !autoInit { 456 return model.TriggerModeAutoWithManualInit, nil 457 } 458 return model.TriggerModeAuto, nil 459 case TriggerModeManual: 460 if autoInit { 461 return model.TriggerModeManualWithAutoInit, nil 462 } else { 463 return model.TriggerModeManual, nil 464 } 465 default: 466 return 0, fmt.Errorf("unknown triggerMode %v", triggerMode) 467 } 468 } 469 470 // count how many times each Builtin is called, for analytics 471 func (s *tiltfileState) OnBuiltinCall(name string, fn *starlark.Builtin) { 472 s.builtinCallCounts[name]++ 473 } 474 475 func (s *tiltfileState) OnExec(t *starlark.Thread, tiltfilePath string, contents []byte) error { 476 return nil 477 } 478 479 // wrap a builtin such that it's only allowed to run when we have a known safe k8s context 480 // (none (e.g., docker-compose), local, or specified by `allow_k8s_contexts`) 481 func (s *tiltfileState) potentiallyK8sUnsafeBuiltin(f starkit.Function) starkit.Function { 482 return func(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { 483 tf, err := starkit.StartTiltfileFromThread(thread) 484 if err != nil { 485 return nil, err 486 } 487 488 model, err := starkit.ModelFromThread(thread) 489 if err != nil { 490 return nil, err 491 } 492 493 k8sContextState, err := k8scontext.GetState(model) 494 if err != nil { 495 return nil, err 496 } 497 498 isAllowed := k8sContextState.IsAllowed(tf) 499 if !isAllowed { 500 kubeContext := k8sContextState.KubeContext() 501 return nil, fmt.Errorf(`Refusing to run '%s' because %s might be a production kube context. 502 If you're sure you want to continue add: 503 allow_k8s_contexts('%s') 504 before this function call in your Tiltfile. Otherwise, switch k8s contexts and restart Tilt.`, fn.Name(), kubeContext, kubeContext) 505 } 506 507 return f(thread, fn, args, kwargs) 508 } 509 } 510 511 func (s *tiltfileState) unpackArgs(fnname string, args starlark.Tuple, kwargs []starlark.Tuple, pairs ...interface{}) error { 512 err := starlark.UnpackArgs(fnname, args, kwargs, pairs...) 513 if err == nil { 514 var paramNames []string 515 for i, o := range pairs { 516 if i%2 == 0 { 517 name := strings.TrimSuffix(o.(string), "?") 518 paramNames = append(paramNames, name) 519 } 520 } 521 522 usedParamNames := paramNames[:args.Len()] 523 for _, p := range kwargs { 524 name := strings.TrimSuffix(string(p[0].(starlark.String)), "?") 525 usedParamNames = append(usedParamNames, name) 526 } 527 _, ok := s.builtinArgCounts[fnname] 528 if !ok { 529 s.builtinArgCounts[fnname] = make(map[string]int) 530 } 531 for _, paramName := range usedParamNames { 532 s.builtinArgCounts[fnname][paramName]++ 533 } 534 } 535 return err 536 } 537 538 // TODO(nick): Split these into separate plugins 539 func (s *tiltfileState) OnStart(e *starkit.Environment) error { 540 e.SetArgUnpacker(s.unpackArgs) 541 e.SetPrint(s.print) 542 e.SetContext(s.ctx) 543 544 for _, b := range []struct { 545 name string 546 builtin starkit.Function 547 }{ 548 {localN, s.potentiallyK8sUnsafeBuiltin(s.local)}, 549 {dockerBuildN, s.dockerBuild}, 550 {customBuildN, s.customBuild}, 551 {defaultRegistryN, s.defaultRegistry}, 552 {dockerComposeN, s.dockerCompose}, 553 {dcResourceN, s.dcResource}, 554 {k8sYamlN, s.k8sYaml}, 555 {filterYamlN, s.filterYaml}, 556 {k8sResourceN, s.k8sResource}, 557 {k8sCustomDeployN, s.k8sCustomDeploy}, 558 {localResourceN, s.localResource}, 559 {testN, s.localResource}, 560 {portForwardN, s.portForward}, 561 {k8sKindN, s.k8sKind}, 562 {k8sImageJSONPathN, s.k8sImageJsonPath}, 563 {workloadToResourceFunctionN, s.workloadToResourceFunctionFn}, 564 {kustomizeN, s.kustomize}, 565 {helmN, s.helm}, 566 {triggerModeN, s.triggerModeFn}, 567 {fallBackOnN, s.liveUpdateFallBackOn}, 568 {syncN, s.liveUpdateSync}, 569 {runN, s.liveUpdateRun}, 570 {restartContainerN, s.liveUpdateRestartContainer}, 571 {enableFeatureN, s.enableFeature}, 572 {disableFeatureN, s.disableFeature}, 573 {disableSnapshotsN, s.disableSnapshots}, 574 {setTeamN, s.setTeam}, 575 } { 576 err := e.AddBuiltin(b.name, b.builtin) 577 if err != nil { 578 return err 579 } 580 } 581 582 for _, v := range []struct { 583 name string 584 value starlark.Value 585 }{ 586 {triggerModeAutoN, TriggerModeAuto}, 587 {triggerModeManualN, TriggerModeManual}, 588 } { 589 err := e.AddValue(v.name, v.value) 590 if err != nil { 591 return err 592 } 593 } 594 595 return nil 596 } 597 598 func (s *tiltfileState) assemble() (resourceSet, []k8s.K8sEntity, error) { 599 err := s.assembleImages() 600 if err != nil { 601 return resourceSet{}, nil, err 602 } 603 604 err = s.assembleK8s() 605 if err != nil { 606 return resourceSet{}, nil, err 607 } 608 609 err = s.assembleDC() 610 if err != nil { 611 return resourceSet{}, nil, err 612 } 613 614 dcRes := []*dcResourceSet{} 615 for _, resSet := range s.dc { 616 dcRes = append(dcRes, resSet) 617 } 618 619 return resourceSet{ 620 dc: dcRes, 621 k8s: s.k8s, 622 }, s.k8sUnresourced, nil 623 } 624 625 // Emit an error if there are unmatches images. 626 // 627 // There are 4 mistakes people commonly make if they 628 // have unmatched images: 629 // 1. They didn't include any Kubernetes or Docker Compose configs at all. 630 // 2. They included Kubernetes configs, but they're custom resources 631 // and Tilt can't infer the image. 632 // 3. They typo'd the image name, and need help finding the right name. 633 // 4. The tooling they're using to generating the k8s resources 634 // isn't generating what they expect. 635 // 636 // This function intends to help with cases (1)-(3). 637 // Long-term, we want to have better tooling to help with (4), 638 // like being able to see k8s resources as they move thru 639 // the build system. 640 func (s *tiltfileState) assertAllImagesMatched(us model.UpdateSettings) error { 641 unmatchedImages := s.buildIndex.unmatchedImages() 642 unmatchedImages = filterUnmatchedImages(us, unmatchedImages) 643 if len(unmatchedImages) == 0 { 644 return nil 645 } 646 647 dcSvcCount := s.dc.ServiceCount() 648 649 if dcSvcCount == 0 && len(s.k8s) == 0 && len(s.k8sUnresourced) == 0 { 650 return fmt.Errorf(unmatchedImageNoConfigsWarning) 651 } 652 653 if len(s.k8s) == 0 && len(s.k8sUnresourced) != 0 { 654 return fmt.Errorf(unmatchedImageAllUnresourcedWarning) 655 } 656 657 configType := "Kubernetes" 658 if dcSvcCount > 0 { 659 configType = "Docker Compose" 660 } 661 return s.buildIndex.unmatchedImageWarning(unmatchedImages[0], configType) 662 } 663 664 func (s *tiltfileState) assembleImages() error { 665 for _, imageBuilder := range s.buildIndex.images { 666 if imageBuilder.dbDockerfile != "" { 667 depImages, err := imageBuilder.dbDockerfile.FindImages(imageBuilder.dbBuildArgs) 668 if err != nil { 669 return err 670 } 671 for _, depImage := range depImages { 672 depBuilder := s.buildIndex.findBuilderForConsumedImage(depImage) 673 if depBuilder == nil { 674 // Images in the Dockerfile that don't have docker_build 675 // instructions are OK. We'll pull them as prebuilt images. 676 continue 677 } 678 679 imageBuilder.imageMapDeps = append(imageBuilder.imageMapDeps, depBuilder.ImageMapName()) 680 } 681 } 682 683 for _, depImage := range imageBuilder.customImgDeps { 684 depBuilder := s.buildIndex.findBuilderForConsumedImage(depImage) 685 if depBuilder == nil { 686 // If the user specifically said to depend on this image, there 687 // must be a build instruction for it. 688 return fmt.Errorf("image %q: image dep %q not found", 689 imageBuilder.configurationRef.RefFamiliarString(), container.FamiliarString(depImage)) 690 } 691 imageBuilder.imageMapDeps = append(imageBuilder.imageMapDeps, depBuilder.ImageMapName()) 692 } 693 694 } 695 return nil 696 } 697 698 func (s *tiltfileState) assembleDC() error { 699 if s.dc.ServiceCount() > 0 && !container.IsEmptyRegistry(s.defaultReg) { 700 return errors.New("default_registry is not supported with docker compose") 701 } 702 703 for _, resSet := range s.dc { 704 for _, svcName := range resSet.serviceNames { 705 svc := resSet.services[svcName] 706 builder := s.buildIndex.findBuilderForConsumedImage(svc.ImageRef()) 707 if builder != nil { 708 // there's a Tilt-managed builder (e.g. docker_build or custom_build) for this image reference, so use that 709 svc.ImageMapDeps = append(svc.ImageMapDeps, builder.ImageMapName()) 710 } else { 711 // create a DockerComposeBuild image target and consume it if this service has a build section in YAML 712 err := s.maybeAddDockerComposeImageBuilder(svc) 713 if err != nil { 714 return err 715 } 716 } 717 } 718 } 719 return nil 720 } 721 722 func (s *tiltfileState) maybeAddDockerComposeImageBuilder(svc *dcService) error { 723 build := svc.ServiceConfig.Build 724 if build == nil || build.Context == "" { 725 // this Docker Compose service has no build info - it relies purely on 726 // a pre-existing image (e.g. from a registry) 727 return nil 728 } 729 730 buildContext := build.Context 731 dfPath := build.Dockerfile 732 if dfPath == "" { 733 // Per Compose spec, the default is "Dockerfile" (in the context dir) 734 dfPath = "Dockerfile" 735 } 736 737 // Only populate dbBuildPath if it's an absolute path, not if it's a git url. 738 dbBuildPath := "" 739 if filepath.IsAbs(buildContext) { 740 dbBuildPath = buildContext 741 } 742 743 if !filepath.IsAbs(dfPath) && dbBuildPath != "" { 744 dfPath = filepath.Join(dbBuildPath, dfPath) 745 } 746 747 imageRef := svc.ImageRef() 748 err := s.buildIndex.addImage( 749 &dockerImage{ 750 buildType: DockerComposeBuild, 751 configurationRef: container.NewRefSelector(imageRef), 752 dockerComposeService: svc.ServiceName, 753 dockerComposeLocalVolumePaths: svc.MountedLocalDirs, 754 dbBuildPath: dbBuildPath, 755 dbDockerfilePath: dfPath, 756 }) 757 if err != nil { 758 return err 759 } 760 b := s.buildIndex.findBuilderForConsumedImage(imageRef) 761 svc.ImageMapDeps = append(svc.ImageMapDeps, b.ImageMapName()) 762 return nil 763 } 764 765 func (s *tiltfileState) assembleK8s() error { 766 err := s.assembleK8sByWorkload() 767 if err != nil { 768 return err 769 } 770 771 err = s.assembleK8sUnresourced() 772 if err != nil { 773 return err 774 } 775 776 resourcedEntities := []k8s.K8sEntity{} 777 for _, r := range s.k8sByName { 778 resourcedEntities = append(resourcedEntities, r.entities...) 779 } 780 781 allEntities := append(resourcedEntities, s.k8sUnresourced...) 782 783 fragmentsToEntities := k8s.FragmentsToEntities(allEntities) 784 785 fullNames := make([]string, len(allEntities)) 786 for i, e := range allEntities { 787 fullNames[i] = fullNameFromK8sEntity(e) 788 } 789 790 for _, opts := range s.k8sResourceOptions { 791 if opts.manuallyGrouped { 792 r, err := s.makeK8sResource(opts.newName) 793 if err != nil { 794 return err 795 } 796 r.manuallyGrouped = true 797 s.k8sByName[opts.newName] = r 798 } 799 if r, ok := s.k8sByName[opts.workload]; ok { 800 // Options are added, so aggregate options from previous resource calls. 801 r.extraPodSelectors = append(r.extraPodSelectors, opts.extraPodSelectors...) 802 if opts.podReadinessMode != model.PodReadinessNone { 803 r.podReadinessMode = opts.podReadinessMode 804 } 805 if opts.discoveryStrategy != "" { 806 r.discoveryStrategy = opts.discoveryStrategy 807 } 808 r.portForwards = append(r.portForwards, opts.portForwards...) 809 if opts.triggerMode != TriggerModeUnset { 810 r.triggerMode = opts.triggerMode 811 } 812 if opts.autoInit.IsSet { 813 r.autoInit = bool(opts.autoInit.Value) 814 } 815 r.resourceDeps = append(r.resourceDeps, opts.resourceDeps...) 816 r.links = append(r.links, opts.links...) 817 for k, v := range opts.labels { 818 r.labels[k] = v 819 } 820 if opts.newName != "" && opts.newName != r.name { 821 err := s.checkResourceConflict(opts.newName) 822 if err != nil { 823 return fmt.Errorf("%s: k8s_resource specified to rename %q to %q: %v", 824 opts.tiltfilePosition.String(), r.name, opts.newName, err) 825 } 826 delete(s.k8sByName, r.name) 827 r.name = opts.newName 828 s.k8sByName[r.name] = r 829 } 830 831 selectors := make([]k8s.ObjectSelector, len(opts.objects)) 832 for i, o := range opts.objects { 833 s, err := k8s.SelectorFromString(o) 834 if err != nil { 835 return errors.Wrapf(err, "Error making selector from string %q", o) 836 } 837 selectors[i] = s 838 } 839 840 for i, o := range opts.objects { 841 entities, ok := fragmentsToEntities[strings.ToLower(o)] 842 if !ok || len(entities) == 0 { 843 return fmt.Errorf("No object identified by the fragment %q could be found. Possible objects are: %s", o, sliceutils.QuotedStringList(fullNames)) 844 } 845 if len(entities) > 1 { 846 matchingObjects := make([]string, len(entities)) 847 for i, e := range entities { 848 matchingObjects[i] = fullNameFromK8sEntity(e) 849 } 850 return fmt.Errorf("%q is not a unique fragment. Objects that match %q are %s", o, o, sliceutils.QuotedStringList(matchingObjects)) 851 } 852 853 entitiesToRemove := filterEntitiesBySelector(s.k8sUnresourced, selectors[i]) 854 if len(entitiesToRemove) == 0 { 855 // we've already taken these entities out of unresourced 856 remainingUnresourced := make([]string, len(s.k8sUnresourced)) 857 for i, entity := range s.k8sUnresourced { 858 remainingUnresourced[i] = fullNameFromK8sEntity(entity) 859 } 860 return fmt.Errorf("No object identified by the fragment %q could be found in remaining YAML. Valid remaining fragments are: %s", o, sliceutils.QuotedStringList(remainingUnresourced)) 861 } 862 if len(entitiesToRemove) > 1 { 863 panic(fmt.Sprintf("Fragment %q matches %d resources. Each object fragment must match exactly 1 resource. This should NOT be possible at this point in the code, we should have already checked that this fragment was unique", o, len(entitiesToRemove))) 864 } 865 866 s.addEntityToResourceAndRemoveFromUnresourced(entitiesToRemove[0], r) 867 } 868 869 } else { 870 var knownResources []string 871 for name := range s.k8sByName { 872 knownResources = append(knownResources, name) 873 } 874 return fmt.Errorf("%s: k8s_resource specified unknown resource %q. known k8s resources: %s", 875 opts.tiltfilePosition.String(), opts.workload, strings.Join(knownResources, ", ")) 876 } 877 } 878 879 for _, r := range s.k8s { 880 if err := s.validateK8s(r); err != nil { 881 return err 882 } 883 } 884 return nil 885 } 886 887 // NOTE(dmiller): This isn't _technically_ a fullname since it is missing "group" (core, apps, data, etc) 888 // A true full name would look like "foo:secret:mynamespace:core" 889 // However because we 890 // a) couldn't think of a concrete case where you would need to specify group 891 // b) being able to do so would make things more complicated, like in the case where you want to specify the group of 892 // 893 // a cluster scoped object but are unable to specify the namespace (e.g. foo:clusterrole::rbac.authorization.k8s.io) 894 // 895 // we decided to leave it off for now. When we encounter a concrete use case for specifying group it shouldn't be too 896 // hard to add it here and in the docs. 897 func fullNameFromK8sEntity(e k8s.K8sEntity) string { 898 return k8s.SelectorStringFromParts([]string{e.Name(), e.GVK().Kind, e.Namespace().String()}) 899 } 900 901 func filterEntitiesBySelector(entities []k8s.K8sEntity, sel k8s.ObjectSelector) []k8s.K8sEntity { 902 ret := []k8s.K8sEntity{} 903 904 for _, e := range entities { 905 if sel.Matches(e) { 906 ret = append(ret, e) 907 } 908 } 909 910 return ret 911 } 912 913 func (s *tiltfileState) addEntityToResourceAndRemoveFromUnresourced(e k8s.K8sEntity, r *k8sResource) { 914 r.entities = append(r.entities, e) 915 for i, ur := range s.k8sUnresourced { 916 if ur == e { 917 // delete from unresourced 918 s.k8sUnresourced = append(s.k8sUnresourced[:i], s.k8sUnresourced[i+1:]...) 919 return 920 } 921 } 922 923 panic("Unable to find entity in unresourced YAML after checking that it was there. This should never happen") 924 } 925 926 func (s *tiltfileState) assembleK8sByWorkload() error { 927 locators := s.k8sImageLocatorsList() 928 929 var workloads, rest []k8s.K8sEntity 930 for _, e := range s.k8sUnresourced { 931 isWorkload, err := s.isWorkload(e, locators) 932 if err != nil { 933 return err 934 } 935 if isWorkload { 936 workloads = append(workloads, e) 937 } else { 938 rest = append(rest, e) 939 } 940 } 941 s.k8sUnresourced = rest 942 943 resourceNames, err := s.calculateResourceNames(workloads) 944 if err != nil { 945 return err 946 } 947 948 for i, resourceName := range resourceNames { 949 workload := workloads[i] 950 res, err := s.makeK8sResource(resourceName) 951 if err != nil { 952 return errors.Wrapf(err, "error making resource for workload %s", newK8sObjectID(workload)) 953 } 954 err = res.addEntities([]k8s.K8sEntity{workload}, locators, s.envVarImages()) 955 if err != nil { 956 return err 957 } 958 959 // find any other entities that match the workload's labels (e.g., services), 960 // and move them from unresourced to this resource 961 match, rest, err := k8s.FilterByMatchesPodTemplateSpec(workload, s.k8sUnresourced) 962 if err != nil { 963 return err 964 } 965 966 err = res.addEntities(match, locators, s.envVarImages()) 967 if err != nil { 968 return err 969 } 970 971 s.k8sUnresourced = rest 972 } 973 974 return nil 975 } 976 977 func (s *tiltfileState) envVarImages() []container.RefSelector { 978 var r []container.RefSelector 979 // explicitly don't care about order 980 for _, img := range s.buildIndex.images { 981 if !img.matchInEnvVars { 982 continue 983 } 984 r = append(r, img.configurationRef) 985 } 986 return r 987 } 988 989 func (s *tiltfileState) isWorkload(e k8s.K8sEntity, locators []k8s.ImageLocator) (bool, error) { 990 for sel := range s.k8sKinds { 991 if sel.Matches(e) { 992 return true, nil 993 } 994 } 995 996 images, err := e.FindImages(locators, s.envVarImages()) 997 if err != nil { 998 return false, errors.Wrapf(err, "finding images in %s", e.Name()) 999 } else { 1000 return len(images) > 0, nil 1001 } 1002 } 1003 1004 // assembleK8sUnresourced makes k8sResources for all k8s entities that: 1005 // a. are not already attached to a Tilt resource, and 1006 // b. will result in pods, 1007 // and stores the resulting resource(s) on the tiltfileState. 1008 // (We smartly grouping pod-creating entities with some kinds of 1009 // corresponding entities, e.g. services), 1010 func (s *tiltfileState) assembleK8sUnresourced() error { 1011 withPodSpec, allRest, err := k8s.FilterByHasPodTemplateSpec(s.k8sUnresourced) 1012 if err != nil { 1013 return nil 1014 } 1015 for _, e := range withPodSpec { 1016 target, err := s.k8sResourceForName(e.Name()) 1017 if err != nil { 1018 return err 1019 } 1020 target.entities = append(target.entities, e) 1021 1022 match, rest, err := k8s.FilterByMatchesPodTemplateSpec(e, allRest) 1023 if err != nil { 1024 return err 1025 } 1026 target.entities = append(target.entities, match...) 1027 allRest = rest 1028 } 1029 1030 s.k8sUnresourced = allRest 1031 1032 return nil 1033 } 1034 1035 func (s *tiltfileState) validateK8s(r *k8sResource) error { 1036 if len(r.entities) == 0 && r.customDeploy == nil { 1037 return fmt.Errorf("resource %q: could not associate any k8s_yaml() or k8s_custom_deploy() with this resource", r.name) 1038 } 1039 1040 for _, ref := range r.imageRefs { 1041 builder := s.buildIndex.findBuilderForConsumedImage(ref) 1042 if builder != nil { 1043 r.imageMapDeps = append(r.imageMapDeps, builder.ImageMapName()) 1044 continue 1045 } 1046 1047 metadata, ok := r.imageDepsMetadata[ref.String()] 1048 if ok && metadata.required { 1049 return fmt.Errorf("resource %q: image build %q not found", r.name, container.FamiliarString(ref)) 1050 } 1051 } 1052 1053 return nil 1054 } 1055 1056 // k8sResourceForName returns the k8sResource with which this name is associated 1057 // (either an existing resource or a new one). 1058 func (s *tiltfileState) k8sResourceForName(name string) (*k8sResource, error) { 1059 if r, ok := s.k8sByName[name]; ok { 1060 return r, nil 1061 } 1062 1063 // otherwise, create a new resource 1064 return s.makeK8sResource(name) 1065 } 1066 1067 // Auto-infer the readiness mode 1068 // 1069 // CONVO: 1070 // jazzdan: This still feels overloaded to me 1071 // nicks: i think whenever we define a new CRD, we need to know: 1072 1073 // how to find the images in it 1074 // how to find any pods it deploys (if they can't be found by owner references) 1075 // if it should not expect pods at all (e.g., PostgresVersion) 1076 // if it should wait for the pods to be ready before building the next resource (e.g., servers) 1077 // if it should wait for the pods to be complete before building the next resource (e.g., jobs) 1078 // and it's complicated a bit by the fact that there are both normal CRDs where the image shows up in the same place each time, and more meta CRDs (like HelmRelease) where it might appear in different places 1079 // 1080 // feels like we're still doing this very ad-hoc rather than holistically 1081 func (s *tiltfileState) inferPodReadinessMode(r *k8sResource) model.PodReadinessMode { 1082 // The mode set directly on the resource has highest priority. 1083 if r.podReadinessMode != model.PodReadinessNone { 1084 return r.podReadinessMode 1085 } 1086 1087 // Next, check if any of the k8s kinds have a mode. 1088 hasMode := make(map[model.PodReadinessMode]bool) 1089 for _, e := range r.entities { 1090 for sel, info := range s.k8sKinds { 1091 if sel.Matches(e) { 1092 hasMode[info.PodReadinessMode] = true 1093 } 1094 } 1095 } 1096 1097 modes := []model.PodReadinessMode{model.PodReadinessWait, model.PodReadinessIgnore, model.PodReadinessSucceeded} 1098 for _, m := range modes { 1099 if hasMode[m] { 1100 return m 1101 } 1102 } 1103 1104 // Auto-infer based on context 1105 // 1106 // If the resource was 1107 // 1) manually grouped (i.e., we didn't find any images in it) 1108 // 2) doesn't have pod selectors, and 1109 // 3) doesn't depend on images 1110 // assume that it will never create pods. 1111 if r.manuallyGrouped && len(r.extraPodSelectors) == 0 && len(r.imageMapDeps) == 0 { 1112 return model.PodReadinessIgnore 1113 } 1114 1115 return model.PodReadinessWait 1116 } 1117 1118 func (s *tiltfileState) translateK8s(resources []*k8sResource, updateSettings model.UpdateSettings) ([]model.Manifest, error) { 1119 var result []model.Manifest 1120 for _, r := range resources { 1121 mn := model.ManifestName(r.name) 1122 tm, err := starlarkTriggerModeToModel(s.triggerModeForResource(r.triggerMode), r.autoInit) 1123 if err != nil { 1124 return nil, errors.Wrapf(err, "error in resource %s options", mn) 1125 } 1126 1127 var mds []model.ManifestName 1128 for _, md := range r.resourceDeps { 1129 mds = append(mds, model.ManifestName(md)) 1130 } 1131 m := model.Manifest{ 1132 Name: mn, 1133 TriggerMode: tm, 1134 ResourceDependencies: mds, 1135 } 1136 1137 m = m.WithLabels(r.labels) 1138 1139 iTargets, err := s.imgTargetsForDeps(mn, r.imageMapDeps) 1140 if err != nil { 1141 return nil, errors.Wrapf(err, "getting image build info for %s", r.name) 1142 } 1143 1144 for i, iTarget := range iTargets { 1145 if liveupdate.IsEmptySpec(iTarget.LiveUpdateSpec) { 1146 continue 1147 } 1148 iTarget.LiveUpdateReconciler = true 1149 iTargets[i] = iTarget 1150 } 1151 1152 m = m.WithImageTargets(iTargets) 1153 1154 k8sTarget, err := s.k8sDeployTarget(mn.TargetName(), r, iTargets, updateSettings) 1155 if err != nil { 1156 return nil, errors.Wrapf(err, "creating K8s deploy target for %s", r.name) 1157 } 1158 1159 m = m.WithDeployTarget(k8sTarget) 1160 result = append(result, m) 1161 } 1162 1163 err := maybeRestartContainerDeprecationError(result) 1164 if err != nil { 1165 return nil, err 1166 } 1167 1168 return result, nil 1169 } 1170 1171 func (s *tiltfileState) k8sDeployTarget(targetName model.TargetName, r *k8sResource, imageTargets []model.ImageTarget, updateSettings model.UpdateSettings) (model.K8sTarget, error) { 1172 var kdTemplateSpec *v1alpha1.KubernetesDiscoveryTemplateSpec 1173 if len(r.extraPodSelectors) != 0 { 1174 kdTemplateSpec = &v1alpha1.KubernetesDiscoveryTemplateSpec{ 1175 ExtraSelectors: k8s.SetsAsLabelSelectors(r.extraPodSelectors), 1176 } 1177 } 1178 1179 sinceTime := apis.NewTime(pkgInitTime) 1180 applySpec := v1alpha1.KubernetesApplySpec{ 1181 Cluster: v1alpha1.ClusterNameDefault, 1182 Timeout: metav1.Duration{Duration: updateSettings.K8sUpsertTimeout()}, 1183 PortForwardTemplateSpec: k8s.PortForwardTemplateSpec(s.defaultedPortForwards(r.portForwards)), 1184 DiscoveryStrategy: r.discoveryStrategy, 1185 KubernetesDiscoveryTemplateSpec: kdTemplateSpec, 1186 PodLogStreamTemplateSpec: &v1alpha1.PodLogStreamTemplateSpec{ 1187 SinceTime: &sinceTime, 1188 IgnoreContainers: []string{ 1189 string(container.IstioInitContainerName), 1190 string(container.IstioSidecarContainerName), 1191 string(container.LinkerdSidecarContainerName), 1192 string(container.LinkerdInitContainerName), 1193 }, 1194 }, 1195 } 1196 1197 var deps []string 1198 var ignores []v1alpha1.IgnoreDef 1199 if r.customDeploy != nil { 1200 deps = r.customDeploy.deps 1201 ignores = append(ignores, model.DockerignoresToIgnores(r.customDeploy.ignores)...) 1202 applySpec.ApplyCmd = toKubernetesApplyCmd(r.customDeploy.applyCmd) 1203 applySpec.DeleteCmd = toKubernetesApplyCmd(r.customDeploy.deleteCmd) 1204 applySpec.RestartOn = &v1alpha1.RestartOnSpec{ 1205 FileWatches: []string{apis.SanitizeName(fmt.Sprintf("%s:apply", targetName.String()))}, 1206 } 1207 } else { 1208 entities := k8s.SortedEntities(r.entities) 1209 var err error 1210 applySpec.YAML, err = k8s.SerializeSpecYAML(entities) 1211 if err != nil { 1212 return model.K8sTarget{}, err 1213 } 1214 1215 for _, locator := range s.k8sImageLocatorsList() { 1216 if k8s.LocatorMatchesOne(locator, entities) { 1217 applySpec.ImageLocators = append(applySpec.ImageLocators, locator.ToSpec()) 1218 } 1219 } 1220 } 1221 1222 ignores = append(ignores, repoIgnoresForPaths(deps)...) 1223 1224 t, err := k8s.NewTarget(targetName, applySpec, s.inferPodReadinessMode(r), r.links) 1225 if err != nil { 1226 return model.K8sTarget{}, err 1227 } 1228 1229 t = t.WithImageDependencies(model.FilterLiveUpdateOnly(r.imageMapDeps, imageTargets)). 1230 WithRefInjectCounts(r.imageRefInjectCounts()). 1231 WithPathDependencies(deps). 1232 WithIgnores(ignores) 1233 1234 return t, nil 1235 } 1236 1237 // Fill in default values in port-forwarding. 1238 // 1239 // In Kubernetes, "defaulted" is used as a verb to say "if a YAML value of a specification 1240 // was left blank, the API server should fill in the value with a default". See: 1241 // 1242 // https://kubernetes.io/docs/tasks/manage-kubernetes-objects/declarative-config/#default-field-values 1243 // 1244 // In Tilt, we typically do this in the Tiltfile loader post-execution. 1245 // Here, we default the port-forward Host to the WebHost. 1246 // 1247 // TODO(nick): I think the "right" way to do this is to give the starkit plugin system 1248 // a "default"-ing hook that runs post-execution. 1249 func (s *tiltfileState) defaultedPortForwards(pfs []model.PortForward) []model.PortForward { 1250 result := make([]model.PortForward, 0, len(pfs)) 1251 for _, pf := range pfs { 1252 if pf.Host == "" { 1253 pf.Host = string(s.webHost) 1254 } 1255 result = append(result, pf) 1256 } 1257 return result 1258 } 1259 1260 func (s *tiltfileState) validateLiveUpdatesForManifests(manifests []model.Manifest) error { 1261 for _, m := range manifests { 1262 err := s.validateLiveUpdatesForManifest(m) 1263 if err != nil { 1264 return err 1265 } 1266 } 1267 return nil 1268 } 1269 1270 // validateLiveUpdatesForManifest checks any image targets on the 1271 // given manifest the contain any illegal LiveUpdates 1272 func (s *tiltfileState) validateLiveUpdatesForManifest(m model.Manifest) error { 1273 g, err := model.NewTargetGraph(m.TargetSpecs()) 1274 if err != nil { 1275 return err 1276 } 1277 1278 for _, iTarg := range m.ImageTargets { 1279 isDeployed := m.IsImageDeployed(iTarg) 1280 1281 // This check only applies to images with live updates. 1282 if liveupdate.IsEmptySpec(iTarg.LiveUpdateSpec) { 1283 continue 1284 } 1285 1286 // TODO(nick): If an undeployed base image has a live-update component, we 1287 // should probably emit a different kind of warning. 1288 if !isDeployed { 1289 continue 1290 } 1291 1292 err = s.validateLiveUpdate(iTarg, g) 1293 if err != nil { 1294 return err 1295 } 1296 } 1297 return nil 1298 } 1299 1300 func (s *tiltfileState) validateLiveUpdate(iTarget model.ImageTarget, g model.TargetGraph) error { 1301 luSpec := iTarget.LiveUpdateSpec 1302 if liveupdate.IsEmptySpec(luSpec) { 1303 return nil 1304 } 1305 1306 var watchedPaths []string 1307 err := g.VisitTree(iTarget, func(t model.TargetSpec) error { 1308 current, ok := t.(model.ImageTarget) 1309 if !ok { 1310 return nil 1311 } 1312 1313 watchedPaths = append(watchedPaths, current.Dependencies()...) 1314 return nil 1315 }) 1316 if err != nil { 1317 return err 1318 } 1319 1320 // Verify that all a) sync step src's and b) fall_back_on files are children of a watched paths. 1321 // (If not, we'll never even get "file changed" events for them--they're nonsensical input, throw an error.) 1322 for _, sync := range liveupdate.SyncSteps(luSpec) { 1323 if !ospath.IsChildOfOne(watchedPaths, sync.LocalPath) { 1324 return fmt.Errorf("sync step source '%s' is not a child of any watched filepaths (%v)", 1325 sync.LocalPath, watchedPaths) 1326 } 1327 } 1328 1329 pathSet := liveupdate.FallBackOnFiles(luSpec) 1330 for _, path := range pathSet.Paths { 1331 resolved := path 1332 if !filepath.IsAbs(resolved) { 1333 resolved = filepath.Join(pathSet.BaseDirectory, path) 1334 } 1335 if !ospath.IsChildOfOne(watchedPaths, resolved) { 1336 return fmt.Errorf("fall_back_on paths '%s' is not a child of any watched filepaths (%v)", 1337 resolved, watchedPaths) 1338 } 1339 } 1340 1341 return nil 1342 } 1343 1344 func (s *tiltfileState) validateDockerComposeVersion() error { 1345 const minimumDockerComposeVersion = "v1.28.3" 1346 1347 dcVersion, _, err := s.dcCli.Version(s.ctx) 1348 if err != nil { 1349 logger.Get(s.ctx).Debugf("Failed to determine Docker Compose version: %v", err) 1350 } else if semver.Compare(dcVersion, minimumDockerComposeVersion) == -1 { 1351 return fmt.Errorf( 1352 "Tilt requires Docker Compose %s+ (you have %s). Please upgrade and re-launch Tilt.", 1353 minimumDockerComposeVersion, 1354 dcVersion) 1355 } else if semver.Major(dcVersion) == "v2" && semver.Compare(dcVersion, "v2.2") < 0 { 1356 logger.Get(s.ctx).Warnf("Using Docker Compose %s (version < 2.2) may result in errors or broken functionality.\n"+ 1357 "For best results, we recommend upgrading to Docker Compose >= v2.2.0.", dcVersion) 1358 } 1359 return nil 1360 } 1361 1362 func maybeRestartContainerDeprecationError(manifests []model.Manifest) error { 1363 var needsError []model.ManifestName 1364 for _, m := range manifests { 1365 if needsRestartContainerDeprecationError(m) { 1366 needsError = append(needsError, m.Name) 1367 } 1368 } 1369 1370 if len(needsError) > 0 { 1371 return fmt.Errorf("%s", restartContainerDeprecationError(needsError)) 1372 } 1373 return nil 1374 } 1375 func needsRestartContainerDeprecationError(m model.Manifest) bool { 1376 // 7/2/20: we've deprecated restart_container() in favor of the restart_process plugin. 1377 // If this is a k8s resource with a restart_container step, throw a deprecation error. 1378 // (restart_container is still allowed for Docker Compose resources) 1379 if !m.IsK8s() { 1380 return false 1381 } 1382 1383 for _, iTarg := range m.ImageTargets { 1384 if liveupdate.ShouldRestart(iTarg.LiveUpdateSpec) { 1385 return true 1386 } 1387 } 1388 1389 return false 1390 } 1391 1392 // Grabs all image targets for the given references, 1393 // as well as any of their transitive dependencies. 1394 func (s *tiltfileState) imgTargetsForDeps(mn model.ManifestName, imageMapDeps []string) ([]model.ImageTarget, error) { 1395 claimStatus := make(map[string]claim, len(imageMapDeps)) 1396 return s.imgTargetsForDepsHelper(mn, imageMapDeps, claimStatus) 1397 } 1398 1399 func (s *tiltfileState) imgTargetsForDepsHelper(mn model.ManifestName, imageMapDeps []string, claimStatus map[string]claim) ([]model.ImageTarget, error) { 1400 iTargets := make([]model.ImageTarget, 0, len(imageMapDeps)) 1401 for _, imName := range imageMapDeps { 1402 image := s.buildIndex.findBuilderByImageMapName(imName) 1403 if image == nil { 1404 return nil, fmt.Errorf("Internal error: no image builder found for id %s", imName) 1405 } 1406 1407 claim := claimStatus[imName] 1408 if claim == claimFinished { 1409 // Skip this target, an earlier call has already built it 1410 continue 1411 } else if claim == claimPending { 1412 return nil, fmt.Errorf("Image dependency cycle: %s", image.configurationRef) 1413 } 1414 claimStatus[imName] = claimPending 1415 1416 var overrideCommand *v1alpha1.ImageMapOverrideCommand 1417 if !image.entrypoint.Empty() { 1418 overrideCommand = &v1alpha1.ImageMapOverrideCommand{ 1419 Command: image.entrypoint.Argv, 1420 } 1421 } 1422 1423 iTarget := model.ImageTarget{ 1424 ImageMapSpec: v1alpha1.ImageMapSpec{ 1425 Selector: image.configurationRef.RefFamiliarString(), 1426 MatchInEnvVars: image.matchInEnvVars, 1427 MatchExact: image.configurationRef.MatchExact(), 1428 OverrideCommand: overrideCommand, 1429 OverrideArgs: image.overrideArgs, 1430 }, 1431 LiveUpdateSpec: image.liveUpdate, 1432 } 1433 if !liveupdate.IsEmptySpec(image.liveUpdate) { 1434 iTarget.LiveUpdateName = liveupdate.GetName(mn, iTarget.ID()) 1435 } 1436 1437 contextIgnores, fileWatchIgnores, err := s.ignoresForImage(image) 1438 if err != nil { 1439 return nil, err 1440 } 1441 1442 switch image.Type() { 1443 case DockerBuild: 1444 iTarget.DockerImageName = dockerimage.GetName(mn, iTarget.ID()) 1445 1446 spec := v1alpha1.DockerImageSpec{ 1447 DockerfileContents: image.dbDockerfile.String(), 1448 Context: image.dbBuildPath, 1449 Args: image.dbBuildArgs, 1450 Target: image.targetStage, 1451 SSHAgentConfigs: image.sshSpecs, 1452 Secrets: image.secretSpecs, 1453 Network: image.network, 1454 CacheFrom: image.cacheFrom, 1455 Pull: image.pullParent, 1456 Platform: image.platform, 1457 ExtraTags: image.extraTags, 1458 ContextIgnores: contextIgnores, 1459 ExtraHosts: image.extraHosts, 1460 } 1461 iTarget = iTarget.WithBuildDetails(model.DockerBuild{DockerImageSpec: spec}) 1462 case CustomBuild: 1463 iTarget.CmdImageName = cmdimage.GetName(mn, iTarget.ID()) 1464 1465 spec := v1alpha1.CmdImageSpec{ 1466 Args: image.customCommand.Argv, 1467 Dir: image.customCommand.Dir, 1468 Env: image.customCommand.Env, 1469 OutputTag: image.customTag, 1470 OutputsImageRefTo: image.outputsImageRefTo, 1471 } 1472 if image.skipsLocalDocker { 1473 spec.OutputMode = v1alpha1.CmdImageOutputRemote 1474 } else if image.disablePush { 1475 spec.OutputMode = v1alpha1.CmdImageOutputLocalDockerAndRemote 1476 } else { 1477 spec.OutputMode = v1alpha1.CmdImageOutputLocalDocker 1478 } 1479 r := model.CustomBuild{ 1480 CmdImageSpec: spec, 1481 Deps: image.customDeps, 1482 } 1483 iTarget = iTarget.WithBuildDetails(r) 1484 case DockerComposeBuild: 1485 bd := model.DockerComposeBuild{ 1486 Service: image.dockerComposeService, 1487 Context: image.dbBuildPath, 1488 } 1489 iTarget = iTarget.WithBuildDetails(bd) 1490 case UnknownBuild: 1491 return nil, fmt.Errorf("no build info for image %s", image.configurationRef.RefFamiliarString()) 1492 } 1493 1494 iTarget = iTarget.WithImageMapDeps(image.imageMapDeps). 1495 WithFileWatchIgnores(fileWatchIgnores) 1496 1497 depTargets, err := s.imgTargetsForDepsHelper(mn, image.imageMapDeps, claimStatus) 1498 if err != nil { 1499 return nil, err 1500 } 1501 1502 iTargets = append(iTargets, depTargets...) 1503 iTargets = append(iTargets, iTarget) 1504 1505 claimStatus[imName] = claimFinished 1506 } 1507 return iTargets, nil 1508 } 1509 1510 func (s *tiltfileState) translateDC(dc *dcResourceSet) ([]model.Manifest, error) { 1511 var result []model.Manifest 1512 1513 for _, name := range dc.serviceNames { 1514 svc := dc.services[name] 1515 iTargets, err := s.imgTargetsForDeps(model.ManifestName(svc.Name), svc.ImageMapDeps) 1516 if err != nil { 1517 return nil, errors.Wrapf(err, "getting image build info for %s", svc.Name) 1518 } 1519 1520 for _, iTarg := range iTargets { 1521 if iTarg.OverrideCommand != nil { 1522 return nil, fmt.Errorf("docker_build/custom_build.entrypoint not supported for Docker Compose resources") 1523 } 1524 } 1525 1526 m, err := s.dcServiceToManifest(svc, dc, iTargets) 1527 if err != nil { 1528 return nil, err 1529 } 1530 1531 result = append(result, m) 1532 } 1533 1534 return result, nil 1535 } 1536 1537 type claim int 1538 1539 const ( 1540 claimNone claim = iota 1541 claimPending 1542 claimFinished 1543 ) 1544 1545 var _ claim = claimNone 1546 1547 func (s *tiltfileState) triggerModeFn(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { 1548 var triggerMode triggerMode 1549 err := s.unpackArgs(fn.Name(), args, kwargs, "trigger_mode", &triggerMode) 1550 if err != nil { 1551 return nil, err 1552 } 1553 1554 if s.triggerModeCallPosition.IsValid() { 1555 return starlark.None, fmt.Errorf("%s can only be called once. It was already called at %s", fn.Name(), s.triggerModeCallPosition.String()) 1556 } 1557 1558 s.triggerMode = triggerMode 1559 s.triggerModeCallPosition = thread.CallFrame(1).Pos 1560 1561 return starlark.None, nil 1562 } 1563 1564 func (s *tiltfileState) setTeam(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { 1565 var teamID string 1566 err := s.unpackArgs(fn.Name(), args, kwargs, "team_id", &teamID) 1567 if err != nil { 1568 return nil, err 1569 } 1570 1571 if len(teamID) == 0 { 1572 return nil, errors.New("team_id cannot be empty") 1573 } 1574 1575 if s.teamID != "" { 1576 return nil, fmt.Errorf("team_id set multiple times (to '%s' and '%s')", s.teamID, teamID) 1577 } 1578 1579 s.teamID = teamID 1580 1581 return starlark.None, nil 1582 } 1583 1584 func (s *tiltfileState) translateLocal() ([]model.Manifest, error) { 1585 var result []model.Manifest 1586 1587 for _, r := range s.localResources { 1588 mn := model.ManifestName(r.name) 1589 tm, err := starlarkTriggerModeToModel(s.triggerModeForResource(r.triggerMode), r.autoInit) 1590 if err != nil { 1591 return nil, errors.Wrapf(err, "error in resource %s options", mn) 1592 } 1593 1594 paths := append([]string{}, r.deps...) 1595 paths = append(paths, r.threadDir) 1596 1597 ignores := repoIgnoresForPaths(paths) 1598 if len(r.ignores) != 0 { 1599 ignores = append(ignores, v1alpha1.IgnoreDef{ 1600 BasePath: r.threadDir, 1601 Patterns: r.ignores, 1602 }) 1603 } 1604 1605 lt := model.NewLocalTarget(model.TargetName(r.name), r.updateCmd, r.serveCmd, r.deps). 1606 WithAllowParallel(r.allowParallel || r.updateCmd.Empty()). 1607 WithLinks(r.links). 1608 WithReadinessProbe(r.readinessProbe) 1609 lt.FileWatchIgnores = ignores 1610 1611 var mds []model.ManifestName 1612 for _, md := range r.resourceDeps { 1613 mds = append(mds, model.ManifestName(md)) 1614 } 1615 m := model.Manifest{ 1616 Name: mn, 1617 TriggerMode: tm, 1618 ResourceDependencies: mds, 1619 }.WithDeployTarget(lt) 1620 1621 m = m.WithLabels(r.labels) 1622 1623 result = append(result, m) 1624 } 1625 1626 return result, nil 1627 } 1628 1629 func (s *tiltfileState) tempDir() (*fwatch.TempDir, error) { 1630 if s.scratchDir == nil { 1631 dir, err := fwatch.NewDir("tiltfile") 1632 if err != nil { 1633 return dir, err 1634 } 1635 s.scratchDir = dir 1636 go func() { 1637 <-s.ctx.Done() 1638 _ = s.scratchDir.TearDown() 1639 }() 1640 } 1641 return s.scratchDir, nil 1642 } 1643 1644 func (s *tiltfileState) sanitizeDependencies(ms []model.Manifest) error { 1645 // warn + delete resource deps that don't exist 1646 // error if resource deps are not a DAG 1647 1648 knownResources := make(map[model.ManifestName]bool) 1649 for _, m := range ms { 1650 knownResources[m.Name] = true 1651 } 1652 1653 // construct the graph and make sure all edges are valid 1654 edges := make(map[interface{}][]interface{}) 1655 for i, m := range ms { 1656 var sanitizedDeps []model.ManifestName 1657 for _, b := range m.ResourceDependencies { 1658 if m.Name == b { 1659 return fmt.Errorf("resource %s specified a dependency on itself", m.Name) 1660 } 1661 if _, ok := knownResources[b]; !ok { 1662 logger.Get(s.ctx).Warnf("resource %s specified a dependency on unknown resource %s - dependency ignored", m.Name, b) 1663 continue 1664 } 1665 edges[m.Name] = append(edges[m.Name], b) 1666 sanitizedDeps = append(sanitizedDeps, b) 1667 } 1668 1669 m.ResourceDependencies = sanitizedDeps 1670 ms[i] = m 1671 } 1672 1673 // check for cycles 1674 connections := tarjan.Connections(edges) 1675 for _, g := range connections { 1676 if len(g) > 1 { 1677 var nodes []string 1678 for i := range g { 1679 nodes = append(nodes, string(g[len(g)-i-1].(model.ManifestName))) 1680 } 1681 nodes = append(nodes, string(g[len(g)-1].(model.ManifestName))) 1682 return fmt.Errorf("cycle detected in resource dependency graph: %s", strings.Join(nodes, " -> ")) 1683 } 1684 } 1685 1686 return nil 1687 } 1688 1689 func toKubernetesApplyCmd(cmd model.Cmd) *v1alpha1.KubernetesApplyCmd { 1690 if cmd.Empty() { 1691 return nil 1692 } 1693 return &v1alpha1.KubernetesApplyCmd{ 1694 Args: cmd.Argv, 1695 Dir: cmd.Dir, 1696 Env: cmd.Env, 1697 } 1698 } 1699 1700 func (s *tiltfileState) ignoresForImage(image *dockerImage) (contextIgnores []v1alpha1.IgnoreDef, fileWatchIgnores []v1alpha1.IgnoreDef, err error) { 1701 dockerignores, err := s.dockerignoresForImage(image) 1702 if err != nil { 1703 return nil, nil, fmt.Errorf("reading dockerignore for %s: %v", image.configurationRef.RefFamiliarString(), err) 1704 } 1705 if image.tiltfilePath != "" { 1706 contextIgnores = append(contextIgnores, v1alpha1.IgnoreDef{BasePath: image.tiltfilePath}) 1707 } 1708 contextIgnores = append(contextIgnores, s.repoIgnoresForImage(image)...) 1709 contextIgnores = append(contextIgnores, model.DockerignoresToIgnores(dockerignores)...) 1710 1711 for i := range contextIgnores { 1712 fileWatchIgnores = append(fileWatchIgnores, *contextIgnores[i].DeepCopy()) 1713 } 1714 if image.dbDockerfilePath != "" { 1715 // while this might seem unusual, we actually do NOT want the 1716 // ImageTarget to watch the Dockerfile itself because the image 1717 // builder does not actually use the Dockerfile on-disk! instead, 1718 // the Tiltfile watches the Dockerfile and always reads it in as 1719 // part of execution, storing the full contents in the ImageTarget 1720 // so that we can rewrite it in memory to inject image references 1721 // and more 1722 // as a result, if BOTH the Tiltfile and the ImageTarget watch the 1723 // Dockerfile, it'll result in a race condition, as the ImageTarget 1724 // build might see the change first and re-execute _before_ the 1725 // Tiltfile, meaning it's running with a stale version of the 1726 // Dockerfile 1727 fileWatchIgnores = append(fileWatchIgnores, v1alpha1.IgnoreDef{BasePath: image.dbDockerfilePath}) 1728 } 1729 1730 if image.Type() == DockerComposeBuild { 1731 // Docker Compose local volumes are mounted into the running container, 1732 // so we don't want to watch these paths, as that'd trigger rebuilds 1733 // instead of the desired Live Update-ish behavior 1734 // note that they ARE eligible for usage within the Docker context, as 1735 // it's a common pattern to include some files (e.g. config) in the 1736 // image but then mount a local volume on top of it for local dev 1737 for _, p := range image.dockerComposeLocalVolumePaths { 1738 fileWatchIgnores = append(fileWatchIgnores, v1alpha1.IgnoreDef{BasePath: p}) 1739 } 1740 } 1741 1742 return 1743 } 1744 1745 var _ starkit.Plugin = &tiltfileState{} 1746 var _ starkit.OnExecPlugin = &tiltfileState{} 1747 var _ starkit.OnBuiltinCallPlugin = &tiltfileState{}