github.com/tilt-dev/tilt@v0.36.0/internal/engine/buildcontrol/build_control.go (about)

     1  package buildcontrol
     2  
     3  import (
     4  	"time"
     5  
     6  	v1 "k8s.io/api/core/v1"
     7  
     8  	"github.com/tilt-dev/tilt/internal/build"
     9  	"github.com/tilt-dev/tilt/internal/controllers/apis/liveupdate"
    10  	"github.com/tilt-dev/tilt/internal/store"
    11  	"github.com/tilt-dev/tilt/internal/store/k8sconv"
    12  	"github.com/tilt-dev/tilt/internal/store/liveupdates"
    13  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    14  	"github.com/tilt-dev/tilt/pkg/model"
    15  )
    16  
    17  // Algorithm to choose a manifest to build next.
    18  //
    19  // The HoldSet is used in the UI to display why a resource is waiting.
    20  func NextTargetToBuild(state store.EngineState) (*store.ManifestTarget, HoldSet) {
    21  	holds := HoldSet{}
    22  
    23  	// Only grab the targets that need any builds at all,
    24  	// so that we don't put holds on builds that aren't even eligible.
    25  	targets := FindTargetsNeedingAnyBuild(state)
    26  
    27  	// Don't build anything if there are pending config file changes.
    28  	// We want the Tiltfile to re-run first.
    29  	for _, ms := range state.GetTiltfileStates() {
    30  		tiltfileHasPendingChanges, _ := ms.HasPendingChanges()
    31  		if tiltfileHasPendingChanges {
    32  			holds.Fill(targets, store.Hold{
    33  				Reason: store.HoldReasonTiltfileReload,
    34  				HoldOn: []model.TargetID{ms.TargetID()},
    35  			})
    36  			return nil, holds
    37  		}
    38  	}
    39  
    40  	// We do not know whether targets are enabled or disabled until their configmaps + uiresources are synced
    41  	// and reconciled. This happens very quickly after the first Tiltfile execution.
    42  	// If any targets have an unknown EnableStatus, then we don't have enough information to schedule builds:
    43  	// - If we treat an unknown as disabled but it is actually enabled, then we break our heuristic prioritization
    44  	//   (e.g., we might schedule k8s resources before local resources).
    45  	// - If we treat an unknown as enabled but it is actually disabled, then we start logging + side-effecting
    46  	//   a build that might immediately be canceled.
    47  	if pending := TargetsWithPendingEnableStatus(targets); len(pending) > 0 {
    48  		holds.Fill(targets, store.Hold{
    49  			Reason: store.HoldReasonTiltfileReload,
    50  			HoldOn: pending,
    51  		})
    52  		return nil, holds
    53  	}
    54  
    55  	// If we're already building an unparallelizable local target, bail immediately.
    56  	if mn, _, building := IsBuildingUnparallelizableLocalTarget(state); building {
    57  		holds.Fill(targets, store.Hold{
    58  			Reason: store.HoldReasonWaitingForUnparallelizableTarget,
    59  			HoldOn: []model.TargetID{mn.TargetID()},
    60  		})
    61  		return nil, holds
    62  	}
    63  
    64  	if IsBuildingAnything(state) {
    65  		// If we're building a target already, remove anything that's not parallelizable
    66  		// with what's currently building.
    67  		HoldUnparallelizableLocalTargets(targets, holds)
    68  	}
    69  
    70  	// Uncategorized YAML might contain namespaces or volumes that
    71  	// we don't want to parallelize.
    72  	//
    73  	// TODO(nick): Long-term, we should try to infer dependencies between Kubernetes
    74  	// resources. A general library might make sense.
    75  	if IsBuildingUncategorizedYAML(state) {
    76  		HoldK8sTargets(targets, holds)
    77  	}
    78  
    79  	HoldTargetsWithBuildingComponents(state, targets, holds)
    80  	HoldTargetsWaitingOnDependencies(state, targets, holds)
    81  	HoldTargetsWaitingOnCluster(state, targets, holds)
    82  
    83  	// If any of the manifest targets haven't been built yet, build them now.
    84  	targets = holds.RemoveIneligibleTargets(targets)
    85  	unbuilt := FindTargetsNeedingInitialBuild(targets)
    86  
    87  	if len(unbuilt) > 0 {
    88  		return NextUnbuiltTargetToBuild(unbuilt), holds
    89  	}
    90  
    91  	// Check to see if any targets are currently being successfully reconciled,
    92  	// and so full rebuilt should be held back. This takes manual triggers into account.
    93  	HoldLiveUpdateTargetsHandledByReconciler(state, targets, holds)
    94  
    95  	// Next prioritize builds that have been manually triggered.
    96  	for _, mn := range state.TriggerQueue {
    97  		mt, ok := state.ManifestTargets[mn]
    98  		if ok && holds.IsEligible(mt) {
    99  			return mt, holds
   100  		}
   101  	}
   102  
   103  	// Check to see if any targets
   104  	//
   105  	// 1) Have live updates
   106  	// 2) All the pending file changes are completely captured by the live updates
   107  	// 3) The runtime is in a pending state
   108  	//
   109  	// This will ensure that a file change doesn't accidentally overwrite
   110  	// a pending pod.
   111  	//
   112  	// https://github.com/tilt-dev/tilt/issues/3759
   113  	HoldLiveUpdateTargetsWaitingOnDeploy(state, targets, holds)
   114  
   115  	targets = holds.RemoveIneligibleTargets(targets)
   116  
   117  	return EarliestPendingAutoTriggerTarget(targets), holds
   118  }
   119  
   120  func NextManifestNameToBuild(state store.EngineState) model.ManifestName {
   121  	mt, _ := NextTargetToBuild(state)
   122  	if mt == nil {
   123  		return ""
   124  	}
   125  	return mt.Manifest.Name
   126  }
   127  
   128  func waitingOnDependencies(state store.EngineState, mt *store.ManifestTarget) []model.TargetID {
   129  	// dependencies only block the first build, so if this manifest has ever built, ignore dependencies
   130  	if mt.State.StartedFirstBuild() {
   131  		return nil
   132  	}
   133  
   134  	var waitingOn []model.TargetID
   135  	for _, mn := range mt.Manifest.ResourceDependencies {
   136  		ms, ok := state.ManifestState(mn)
   137  		if !ok || ms == nil || ms.RuntimeState == nil || !ms.RuntimeState.HasEverBeenReadyOrSucceeded() {
   138  			waitingOn = append(waitingOn, mn.TargetID())
   139  		}
   140  	}
   141  
   142  	return waitingOn
   143  }
   144  
   145  // Check to see if this is an ImageTarget where the built image
   146  // can be potentially reused.
   147  //
   148  // Note that this is a quick heuristic check for making parallelization decisions.
   149  //
   150  // The "correct" decision about whether an image can be re-used is more complex
   151  // and expensive, and includes:
   152  //
   153  // 1) Checks of dependent images
   154  // 2) Live-update sync checks
   155  // 3) Checks that the image still exists on the image store
   156  //
   157  // But in this particular context, we can cheat a bit.
   158  func canReuseImageTargetHeuristic(spec model.TargetSpec, status *store.BuildStatus) bool {
   159  	id := spec.ID()
   160  	if id.Type != model.TargetTypeImage {
   161  		return false
   162  	}
   163  
   164  	// NOTE(nick): A more accurate check might see if the pending file changes
   165  	// are potentially live-updatable, but this is OK for the case of a base image.
   166  	if status.HasPendingFileChanges() || status.HasPendingDependencyChanges() {
   167  		return false
   168  	}
   169  
   170  	result := status.LastResult
   171  	if result == nil {
   172  		return false
   173  	}
   174  
   175  	switch result.(type) {
   176  	case store.ImageBuildResult:
   177  		return true
   178  	}
   179  	return false
   180  }
   181  
   182  func HoldTargetsWithBuildingComponents(state store.EngineState, mts []*store.ManifestTarget, holds HoldSet) {
   183  	building := make(map[model.TargetID]bool)
   184  
   185  	for _, mt := range state.Targets() {
   186  		if mt.State.IsBuilding() {
   187  			building[mt.Manifest.ID()] = true
   188  
   189  			for _, spec := range mt.Manifest.TargetSpecs() {
   190  				bs, ok := mt.State.BuildStatus(spec.ID())
   191  				if !ok {
   192  					continue
   193  				}
   194  
   195  				if canReuseImageTargetHeuristic(spec, bs) {
   196  					continue
   197  				}
   198  
   199  				building[spec.ID()] = true
   200  			}
   201  		}
   202  	}
   203  
   204  	hasBuildingComponent := func(mt *store.ManifestTarget) ([]model.TargetID, bool) {
   205  		var targetIDs []model.TargetID
   206  		var shouldHold bool
   207  
   208  		m := mt.Manifest
   209  		if building[m.ID()] {
   210  			// mark as holding but don't add self as a dependency
   211  			shouldHold = true
   212  		}
   213  
   214  		for _, spec := range m.TargetSpecs() {
   215  			bs, ok := mt.State.BuildStatus(spec.ID())
   216  			if !ok {
   217  				continue
   218  			}
   219  
   220  			if canReuseImageTargetHeuristic(spec, bs) {
   221  				continue
   222  			}
   223  
   224  			if building[spec.ID()] {
   225  				targetIDs = append(targetIDs, spec.ID())
   226  				shouldHold = true
   227  			}
   228  		}
   229  		return targetIDs, shouldHold
   230  	}
   231  
   232  	for _, mt := range mts {
   233  		if waitingOn, shouldHold := hasBuildingComponent(mt); shouldHold {
   234  			holds.AddHold(mt, store.Hold{
   235  				Reason: store.HoldReasonBuildingComponent,
   236  				HoldOn: waitingOn,
   237  			})
   238  		}
   239  	}
   240  }
   241  
   242  func targetsByCluster(mts []*store.ManifestTarget) map[string][]*store.ManifestTarget {
   243  	clusters := make(map[string][]*store.ManifestTarget)
   244  	for _, mt := range mts {
   245  		clusterName := mt.Manifest.ClusterName()
   246  		if clusterName == "" {
   247  			continue
   248  		}
   249  
   250  		targets, ok := clusters[clusterName]
   251  		if !ok {
   252  			targets = []*store.ManifestTarget{}
   253  		}
   254  		clusters[clusterName] = append(targets, mt)
   255  	}
   256  	return clusters
   257  }
   258  
   259  // We use the cluster to detect what architecture we're building for.
   260  // Until the cluster connection has been established, we block any
   261  // image builds.
   262  func HoldTargetsWaitingOnCluster(state store.EngineState, mts []*store.ManifestTarget, holds HoldSet) {
   263  	for clusterName, targets := range targetsByCluster(mts) {
   264  		cluster, ok := state.Clusters[clusterName]
   265  		isClusterOK := ok && cluster.Status.Error == "" && cluster.Status.Arch != ""
   266  		if isClusterOK {
   267  			continue
   268  		}
   269  
   270  		gvk := v1alpha1.SchemeGroupVersion.WithKind("Cluster")
   271  		for _, mt := range targets {
   272  			holds.AddHold(mt, store.Hold{
   273  				Reason: store.HoldReasonCluster,
   274  				OnRefs: []v1alpha1.UIResourceStateWaitingOnRef{{
   275  					Group:      gvk.Group,
   276  					APIVersion: gvk.Version,
   277  					Kind:       gvk.Kind,
   278  					Name:       clusterName,
   279  				}},
   280  			})
   281  		}
   282  	}
   283  }
   284  
   285  func HoldTargetsWaitingOnDependencies(state store.EngineState, mts []*store.ManifestTarget, holds HoldSet) {
   286  	for _, mt := range mts {
   287  		if waitingOn := waitingOnDependencies(state, mt); len(waitingOn) != 0 {
   288  			holds.AddHold(mt, store.Hold{
   289  				Reason: store.HoldReasonWaitingForDep,
   290  				HoldOn: waitingOn,
   291  			})
   292  		}
   293  	}
   294  }
   295  
   296  // Helper function for ordering targets that have never been built before.
   297  func NextUnbuiltTargetToBuild(unbuilt []*store.ManifestTarget) *store.ManifestTarget {
   298  	// Local resources come before all cluster resources, because they
   299  	// can't be parallelized. (LR's may change things on disk that cluster
   300  	// resources then pull in).
   301  	localTargets := FindLocalTargets(unbuilt)
   302  	if len(localTargets) > 0 {
   303  		return localTargets[0]
   304  	}
   305  
   306  	// unresourced YAML goes next
   307  	unresourced := FindUnresourcedYAML(unbuilt)
   308  	if unresourced != nil {
   309  		return unresourced
   310  	}
   311  
   312  	// If this is Kubernetes, unbuilt resources go first.
   313  	// (If this is Docker Compose, we want to trust the ordering
   314  	// that docker-compose put things in.)
   315  	deployOnlyK8sTargets := FindDeployOnlyK8sManifestTargets(unbuilt)
   316  	if len(deployOnlyK8sTargets) > 0 {
   317  		return deployOnlyK8sTargets[0]
   318  	}
   319  
   320  	return unbuilt[0]
   321  }
   322  
   323  func FindUnresourcedYAML(targets []*store.ManifestTarget) *store.ManifestTarget {
   324  	for _, target := range targets {
   325  		if target.Manifest.ManifestName() == model.UnresourcedYAMLManifestName {
   326  			return target
   327  		}
   328  	}
   329  	return nil
   330  }
   331  
   332  func FindDeployOnlyK8sManifestTargets(targets []*store.ManifestTarget) []*store.ManifestTarget {
   333  	result := []*store.ManifestTarget{}
   334  	for _, target := range targets {
   335  		if target.Manifest.IsK8s() && len(target.Manifest.ImageTargets) == 0 {
   336  			result = append(result, target)
   337  		}
   338  	}
   339  	return result
   340  }
   341  
   342  func FindLocalTargets(targets []*store.ManifestTarget) []*store.ManifestTarget {
   343  	result := []*store.ManifestTarget{}
   344  	for _, target := range targets {
   345  		if target.Manifest.IsLocal() {
   346  			result = append(result, target)
   347  		}
   348  	}
   349  	return result
   350  }
   351  
   352  func HoldUnparallelizableLocalTargets(targets []*store.ManifestTarget, holds map[model.ManifestName]store.Hold) {
   353  	for _, target := range targets {
   354  		if target.Manifest.IsLocal() && !target.Manifest.LocalTarget().AllowParallel {
   355  			holds[target.Manifest.Name] = store.Hold{Reason: store.HoldReasonIsUnparallelizableTarget}
   356  		}
   357  	}
   358  }
   359  
   360  func HoldK8sTargets(targets []*store.ManifestTarget, holds HoldSet) {
   361  	for _, target := range targets {
   362  		if target.Manifest.IsK8s() {
   363  			holds.AddHold(target, store.Hold{
   364  				Reason: store.HoldReasonWaitingForUncategorized,
   365  				HoldOn: []model.TargetID{model.UnresourcedYAMLManifestName.TargetID()},
   366  			})
   367  		}
   368  	}
   369  }
   370  
   371  func TargetsWithPendingEnableStatus(targets []*store.ManifestTarget) []model.TargetID {
   372  	var result []model.TargetID
   373  	for _, target := range targets {
   374  		if target.State.DisableState == v1alpha1.DisableStatePending {
   375  			result = append(result, target.Spec().ID())
   376  		}
   377  	}
   378  	return result
   379  }
   380  
   381  func IsBuildingAnything(state store.EngineState) bool {
   382  	mts := state.Targets()
   383  	for _, mt := range mts {
   384  		if mt.State.IsBuilding() {
   385  			return true
   386  		}
   387  	}
   388  	return false
   389  }
   390  
   391  func IsBuildingUnparallelizableLocalTarget(state store.EngineState) (model.ManifestName, model.TargetName, bool) {
   392  	mts := state.Targets()
   393  	for _, mt := range mts {
   394  		if mt.State.IsBuilding() && mt.Manifest.IsLocal() &&
   395  			!mt.Manifest.LocalTarget().AllowParallel {
   396  			return mt.Manifest.Name, mt.Manifest.LocalTarget().Name, true
   397  		}
   398  	}
   399  	return "", "", false
   400  }
   401  
   402  func IsBuildingUncategorizedYAML(state store.EngineState) bool {
   403  	mts := state.Targets()
   404  	for _, mt := range mts {
   405  		if mt.State.IsBuilding() && mt.Manifest.Name == model.UnresourcedYAMLManifestName {
   406  			return true
   407  		}
   408  	}
   409  	return false
   410  }
   411  
   412  // Go through all the manifests, and check:
   413  // 1) all pending file changes
   414  // 2) all pending dependency changes (where an image has been rebuilt by another manifest), and
   415  // 3) all pending manifest changes
   416  // The earliest one is the one we want.
   417  //
   418  // If no targets are pending, return nil
   419  func EarliestPendingAutoTriggerTarget(targets []*store.ManifestTarget) *store.ManifestTarget {
   420  	var choice *store.ManifestTarget
   421  	earliest := time.Now()
   422  
   423  	for _, mt := range targets {
   424  		ok, newTime := mt.State.HasPendingChangesBeforeOrEqual(earliest)
   425  		if ok {
   426  			if !mt.Manifest.TriggerMode.AutoOnChange() {
   427  				// Don't trigger update of a manual manifest just b/c if has
   428  				// pending changes; must come through the TriggerQueue, above.
   429  				continue
   430  			}
   431  			if choice != nil && newTime.Equal(earliest) {
   432  				// If two choices are equal, use the first one in target order.
   433  				continue
   434  			}
   435  			choice = mt
   436  			earliest = newTime
   437  		}
   438  	}
   439  
   440  	return choice
   441  }
   442  
   443  // Grab all the targets that are build-eligible from
   444  // the engine state.
   445  //
   446  // We apply this filter first, then layer on individual build decisions about
   447  // what to build next. This MUST be the union of all checks in all downstream
   448  // build decisions in NextTargetToBuild.
   449  func FindTargetsNeedingAnyBuild(state store.EngineState) []*store.ManifestTarget {
   450  	queue := make(map[model.ManifestName]bool, len(state.TriggerQueue))
   451  	for _, mn := range state.TriggerQueue {
   452  		queue[mn] = true
   453  	}
   454  
   455  	result := []*store.ManifestTarget{}
   456  	for _, target := range state.Targets() {
   457  		// Skip disabled targets.
   458  		if target.State.DisableState == v1alpha1.DisableStateDisabled {
   459  			continue
   460  		}
   461  
   462  		if !target.State.StartedFirstBuild() && target.Manifest.TriggerMode.AutoInitial() {
   463  			result = append(result, target)
   464  			continue
   465  		}
   466  
   467  		if queue[target.Manifest.Name] {
   468  			result = append(result, target)
   469  			continue
   470  		}
   471  
   472  		hasPendingChanges, _ := target.State.HasPendingChanges()
   473  		if hasPendingChanges && target.Manifest.TriggerMode.AutoOnChange() {
   474  			result = append(result, target)
   475  			continue
   476  		}
   477  	}
   478  
   479  	return result
   480  }
   481  
   482  func FindTargetsNeedingInitialBuild(targets []*store.ManifestTarget) []*store.ManifestTarget {
   483  	result := []*store.ManifestTarget{}
   484  	for _, target := range targets {
   485  		if !target.State.StartedFirstBuild() && target.Manifest.TriggerMode.AutoInitial() {
   486  			result = append(result, target)
   487  		}
   488  	}
   489  	return result
   490  }
   491  
   492  func HoldLiveUpdateTargetsWaitingOnDeploy(state store.EngineState, mts []*store.ManifestTarget, holds HoldSet) {
   493  	for _, mt := range mts {
   494  		if IsLiveUpdateTargetWaitingOnDeploy(state, mt) {
   495  			holds.AddHold(mt, store.Hold{Reason: store.HoldReasonWaitingForDeploy})
   496  		}
   497  	}
   498  }
   499  
   500  func IsLiveUpdateTargetWaitingOnDeploy(state store.EngineState, mt *store.ManifestTarget) bool {
   501  	// We only care about targets where file changes are the ONLY build reason.
   502  	if mt.NextBuildReason() != model.BuildReasonFlagChangedFiles {
   503  		return false
   504  	}
   505  
   506  	// Make sure the last build succeeded.
   507  	if mt.State.LastBuild().Empty() || mt.State.LastBuild().Error != nil {
   508  		return false
   509  	}
   510  
   511  	// Never hold back a deploy in an error state.
   512  	if mt.State.RuntimeState.RuntimeStatus() == v1alpha1.RuntimeStatusError {
   513  		return false
   514  	}
   515  
   516  	// Go through all the files, and make sure they're live-update-able.
   517  	for id, status := range mt.State.BuildStatuses {
   518  		if !status.HasPendingFileChanges() {
   519  			continue
   520  		}
   521  
   522  		// We have an image target with changes!
   523  		// First, make sure that all the changes match a sync.
   524  		files := status.PendingFileChangesList()
   525  		iTarget := mt.Manifest.ImageTargetWithID(id)
   526  		luSpec := iTarget.LiveUpdateSpec
   527  		_, pathsMatchingNoSync, err := build.FilesToPathMappings(files, liveupdate.SyncSteps(luSpec))
   528  		if err != nil || len(pathsMatchingNoSync) > 0 {
   529  			return false
   530  		}
   531  
   532  		// If any changed files match a FallBackOn file, fall back to next BuildAndDeployer
   533  		anyMatch, _, err := liveupdate.FallBackOnFiles(luSpec).AnyMatch(files)
   534  		if err != nil || anyMatch {
   535  			return false
   536  		}
   537  
   538  		// All changes match a sync!
   539  		//
   540  		// We only care about targets where there are 0 running containers for the current build.
   541  		// This is the mechanism that live update uses to determine if the container to live-update
   542  		// is still pending.
   543  		if mt.Manifest.IsK8s() && iTarget.LiveUpdateSpec.Selector.Kubernetes != nil {
   544  			kResource := state.KubernetesResources[mt.Manifest.Name.String()]
   545  			if kResource == nil {
   546  				return true // Wait for the k8s resource to appear.
   547  			}
   548  
   549  			cInfos, err := liveupdates.RunningContainersForOnePod(
   550  				iTarget.LiveUpdateSpec.Selector.Kubernetes,
   551  				kResource,
   552  				state.ImageMaps[iTarget.ImageMapName()],
   553  			)
   554  			if err != nil {
   555  				return false
   556  			}
   557  
   558  			if len(cInfos) != 0 {
   559  				return false
   560  			}
   561  
   562  			// If the container in this pod is in a crash loop, then don't hold back
   563  			// updates until the deploy finishes -- this is a pretty good signal
   564  			// that it might not become healthy.
   565  			pod := k8sconv.MostRecentPod(kResource.FilteredPods)
   566  			for _, c := range pod.Containers {
   567  				if c.Restarts > 0 {
   568  					return false
   569  				}
   570  			}
   571  
   572  			// If the pod is in a finished state, then the containers
   573  			// may never re-enter Running.
   574  			if pod.Phase == string(v1.PodSucceeded) || pod.Phase == string(v1.PodFailed) {
   575  				return false
   576  			}
   577  
   578  		} else if mt.Manifest.IsDC() {
   579  			dcs := state.DockerComposeServices[mt.Manifest.Name.String()]
   580  			cInfos := liveupdates.RunningContainersForDC(dcs)
   581  			if len(cInfos) != 0 {
   582  				return false
   583  			}
   584  		} else {
   585  			return false
   586  		}
   587  	}
   588  
   589  	// If we've gotten this far, that means we should wait until this deploy
   590  	// finishes before processing these file changes.
   591  	return true
   592  }
   593  
   594  // Hold back live update targets that are being successfully
   595  // handled by a reconciler.
   596  func HoldLiveUpdateTargetsHandledByReconciler(state store.EngineState, mts []*store.ManifestTarget, holds HoldSet) {
   597  	for _, mt := range mts {
   598  		// Most types of build reasons trigger a full rebuild. The two exceptions are:
   599  		// - File-change only
   600  		// - Live-update eligible manual triggers
   601  		reason := mt.NextBuildReason()
   602  		isLiveUpdateEligible := reason == model.BuildReasonFlagChangedFiles
   603  		if reason.HasTrigger() {
   604  			isLiveUpdateEligible = IsLiveUpdateEligibleTrigger(mt.Manifest, reason)
   605  		}
   606  
   607  		if !isLiveUpdateEligible {
   608  			continue
   609  		}
   610  
   611  		// Changes to the deploy target can't be live-updated.
   612  		if mt.Manifest.DeployTarget != nil {
   613  			bs, hasBuildStatus := mt.State.BuildStatuses[mt.Manifest.DeployTarget.ID()]
   614  			hasPendingChanges := hasBuildStatus && bs.HasPendingFileChanges()
   615  			if hasPendingChanges {
   616  				continue
   617  			}
   618  		}
   619  
   620  		allHandledByLiveUpdate := true
   621  		iTargets := mt.Manifest.ImageTargets
   622  		for _, iTarget := range iTargets {
   623  			bs, hasBuildStatus := mt.State.BuildStatuses[iTarget.ID()]
   624  			hasPendingChanges := hasBuildStatus && bs.HasPendingFileChanges()
   625  			if !hasPendingChanges {
   626  				continue
   627  			}
   628  
   629  			handlers := findLiveUpdateHandlers(iTarget, mt, &state)
   630  			if len(handlers) == 0 {
   631  				allHandledByLiveUpdate = false
   632  			}
   633  
   634  			for _, lu := range handlers {
   635  				isFailing := lu.Status.Failed != nil
   636  				if isFailing {
   637  					allHandledByLiveUpdate = false
   638  				}
   639  			}
   640  
   641  			if !allHandledByLiveUpdate {
   642  				break
   643  			}
   644  		}
   645  
   646  		if allHandledByLiveUpdate {
   647  			holds.AddHold(mt, store.Hold{Reason: store.HoldReasonReconciling})
   648  		}
   649  	}
   650  }
   651  
   652  // Find all the live update objects responsible for syncing this image.
   653  //
   654  // Base image live updates are modeled with a LiveUpdate object attached to
   655  // each deploy image.
   656  //
   657  // The LiveUpdate watches:
   658  // - The Deploy image's container
   659  // - The Base image's filewatch
   660  //
   661  // The Tiltfile assembler will guarantee that there will be one LiveUpdate
   662  // object for each deployed image, and they will all sync in the same way.
   663  func findLiveUpdateHandlers(changedImage model.ImageTarget, mt *store.ManifestTarget, state *store.EngineState) []*v1alpha1.LiveUpdate {
   664  	result := []*v1alpha1.LiveUpdate{}
   665  
   666  	for _, candidate := range mt.Manifest.ImageTargets {
   667  		isHandledByReconciler := !liveupdate.IsEmptySpec(candidate.LiveUpdateSpec) &&
   668  			candidate.LiveUpdateReconciler
   669  		if !isHandledByReconciler {
   670  			continue
   671  		}
   672  
   673  		lu := state.LiveUpdates[candidate.LiveUpdateName]
   674  		if lu == nil {
   675  			continue
   676  		}
   677  
   678  		isHandled := false
   679  		for _, source := range lu.Spec.Sources {
   680  			// Relies on the assumption that image targets create filewatches
   681  			// with the same name.
   682  			if source.FileWatch == changedImage.ID().String() {
   683  				isHandled = true
   684  				break
   685  			}
   686  		}
   687  
   688  		if isHandled {
   689  			result = append(result, lu)
   690  		}
   691  	}
   692  
   693  	return result
   694  }
   695  
   696  // In automatic trigger mode:
   697  // - Clicking the trigger button always triggers a full rebuild.
   698  //
   699  // In manual trigger mode:
   700  // - If there are no pending changes, clicking the trigger button triggers a full rebuild.
   701  // - If there are only pending changes, clicking the trigger button triggers a live-update.
   702  func IsLiveUpdateEligibleTrigger(manifest model.Manifest, reason model.BuildReason) bool {
   703  	return reason.HasTrigger() &&
   704  		reason.WithoutTriggers() == model.BuildReasonFlagChangedFiles &&
   705  		!manifest.TriggerMode.AutoOnChange()
   706  }