github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/engine/buildcontrol/build_control.go (about)

     1  package buildcontrol
     2  
     3  import (
     4  	"time"
     5  
     6  	v1 "k8s.io/api/core/v1"
     7  
     8  	"github.com/tilt-dev/tilt/internal/build"
     9  	"github.com/tilt-dev/tilt/internal/controllers/apis/liveupdate"
    10  	"github.com/tilt-dev/tilt/internal/store"
    11  	"github.com/tilt-dev/tilt/internal/store/k8sconv"
    12  	"github.com/tilt-dev/tilt/internal/store/liveupdates"
    13  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    14  	"github.com/tilt-dev/tilt/pkg/model"
    15  )
    16  
    17  // Algorithm to choose a manifest to build next.
    18  //
    19  // The HoldSet is used in the UI to display why a resource is waiting.
    20  func NextTargetToBuild(state store.EngineState) (*store.ManifestTarget, HoldSet) {
    21  	holds := HoldSet{}
    22  
    23  	// Only grab the targets that need any builds at all,
    24  	// so that we don't put holds on builds that aren't even eligible.
    25  	targets := FindTargetsNeedingAnyBuild(state)
    26  
    27  	// Don't build anything if there are pending config file changes.
    28  	// We want the Tiltfile to re-run first.
    29  	for _, ms := range state.GetTiltfileStates() {
    30  		tiltfileHasPendingChanges, _ := ms.HasPendingChanges()
    31  		if tiltfileHasPendingChanges {
    32  			holds.Fill(targets, store.Hold{
    33  				Reason: store.HoldReasonTiltfileReload,
    34  				HoldOn: []model.TargetID{ms.TargetID()},
    35  			})
    36  			return nil, holds
    37  		}
    38  	}
    39  
    40  	// We do not know whether targets are enabled or disabled until their configmaps + uiresources are synced
    41  	// and reconciled. This happens very quickly after the first Tiltfile execution.
    42  	// If any targets have an unknown EnableStatus, then we don't have enough information to schedule builds:
    43  	// - If we treat an unknown as disabled but it is actually enabled, then we break our heuristic prioritization
    44  	//   (e.g., we might schedule k8s resources before local resources).
    45  	// - If we treat an unknown as enabled but it is actually disabled, then we start logging + side-effecting
    46  	//   a build that might immediately be canceled.
    47  	if pending := TargetsWithPendingEnableStatus(targets); len(pending) > 0 {
    48  		holds.Fill(targets, store.Hold{
    49  			Reason: store.HoldReasonTiltfileReload,
    50  			HoldOn: pending,
    51  		})
    52  		return nil, holds
    53  	}
    54  
    55  	// If we're already building an unparallelizable local target, bail immediately.
    56  	if mn, _, building := IsBuildingUnparallelizableLocalTarget(state); building {
    57  		holds.Fill(targets, store.Hold{
    58  			Reason: store.HoldReasonWaitingForUnparallelizableTarget,
    59  			HoldOn: []model.TargetID{mn.TargetID()},
    60  		})
    61  		return nil, holds
    62  	}
    63  
    64  	if IsBuildingAnything(state) {
    65  		// If we're building a target already, remove anything that's not parallelizable
    66  		// with what's currently building.
    67  		HoldUnparallelizableLocalTargets(targets, holds)
    68  	}
    69  
    70  	// Uncategorized YAML might contain namespaces or volumes that
    71  	// we don't want to parallelize.
    72  	//
    73  	// TODO(nick): Long-term, we should try to infer dependencies between Kubernetes
    74  	// resources. A general library might make sense.
    75  	if IsBuildingUncategorizedYAML(state) {
    76  		HoldK8sTargets(targets, holds)
    77  	}
    78  
    79  	HoldTargetsWithBuildingComponents(state, targets, holds)
    80  	HoldTargetsWaitingOnDependencies(state, targets, holds)
    81  	HoldTargetsWaitingOnCluster(state, targets, holds)
    82  
    83  	// If any of the manifest targets haven't been built yet, build them now.
    84  	targets = holds.RemoveIneligibleTargets(targets)
    85  	unbuilt := FindTargetsNeedingInitialBuild(targets)
    86  
    87  	if len(unbuilt) > 0 {
    88  		return NextUnbuiltTargetToBuild(unbuilt), holds
    89  	}
    90  
    91  	// Check to see if any targets are currently being successfully reconciled,
    92  	// and so full rebuilt should be held back. This takes manual triggers into account.
    93  	HoldLiveUpdateTargetsHandledByReconciler(state, targets, holds)
    94  
    95  	// Next prioritize builds that have been manually triggered.
    96  	for _, mn := range state.TriggerQueue {
    97  		mt, ok := state.ManifestTargets[mn]
    98  		if ok && holds.IsEligible(mt) {
    99  			return mt, holds
   100  		}
   101  	}
   102  
   103  	// Check to see if any targets
   104  	//
   105  	// 1) Have live updates
   106  	// 2) All the pending file changes are completely captured by the live updates
   107  	// 3) The runtime is in a pending state
   108  	//
   109  	// This will ensure that a file change doesn't accidentally overwrite
   110  	// a pending pod.
   111  	//
   112  	// https://github.com/tilt-dev/tilt/issues/3759
   113  	HoldLiveUpdateTargetsWaitingOnDeploy(state, targets, holds)
   114  
   115  	targets = holds.RemoveIneligibleTargets(targets)
   116  
   117  	return EarliestPendingAutoTriggerTarget(targets), holds
   118  }
   119  
   120  func NextManifestNameToBuild(state store.EngineState) model.ManifestName {
   121  	mt, _ := NextTargetToBuild(state)
   122  	if mt == nil {
   123  		return ""
   124  	}
   125  	return mt.Manifest.Name
   126  }
   127  
   128  func waitingOnDependencies(state store.EngineState, mt *store.ManifestTarget) []model.TargetID {
   129  	// dependencies only block the first build, so if this manifest has ever built, ignore dependencies
   130  	if mt.State.StartedFirstBuild() {
   131  		return nil
   132  	}
   133  
   134  	var waitingOn []model.TargetID
   135  	for _, mn := range mt.Manifest.ResourceDependencies {
   136  		ms, ok := state.ManifestState(mn)
   137  		if !ok || ms == nil || ms.RuntimeState == nil || !ms.RuntimeState.HasEverBeenReadyOrSucceeded() {
   138  			waitingOn = append(waitingOn, mn.TargetID())
   139  		}
   140  	}
   141  
   142  	return waitingOn
   143  }
   144  
   145  // Check to see if this is an ImageTarget where the built image
   146  // can be potentially reused.
   147  //
   148  // Note that this is a quick heuristic check for making parallelization decisions.
   149  //
   150  // The "correct" decision about whether an image can be re-used is more complex
   151  // and expensive, and includes:
   152  //
   153  // 1) Checks of dependent images
   154  // 2) Live-update sync checks
   155  // 3) Checks that the image still exists on the image store
   156  //
   157  // But in this particular context, we can cheat a bit.
   158  func canReuseImageTargetHeuristic(spec model.TargetSpec, status store.BuildStatus) bool {
   159  	id := spec.ID()
   160  	if id.Type != model.TargetTypeImage {
   161  		return false
   162  	}
   163  
   164  	// NOTE(nick): A more accurate check might see if the pending file changes
   165  	// are potentially live-updatable, but this is OK for the case of a base image.
   166  	if len(status.PendingFileChanges) > 0 || len(status.PendingDependencyChanges) > 0 {
   167  		return false
   168  	}
   169  
   170  	result := status.LastResult
   171  	if result == nil {
   172  		return false
   173  	}
   174  
   175  	switch result.(type) {
   176  	case store.ImageBuildResult:
   177  		return true
   178  	}
   179  	return false
   180  }
   181  
   182  func HoldTargetsWithBuildingComponents(state store.EngineState, mts []*store.ManifestTarget, holds HoldSet) {
   183  	building := make(map[model.TargetID]bool)
   184  
   185  	for _, mt := range state.Targets() {
   186  		if mt.State.IsBuilding() {
   187  			building[mt.Manifest.ID()] = true
   188  
   189  			for _, spec := range mt.Manifest.TargetSpecs() {
   190  				if canReuseImageTargetHeuristic(spec, mt.State.BuildStatus(spec.ID())) {
   191  					continue
   192  				}
   193  
   194  				building[spec.ID()] = true
   195  			}
   196  		}
   197  	}
   198  
   199  	hasBuildingComponent := func(mt *store.ManifestTarget) ([]model.TargetID, bool) {
   200  		var targetIDs []model.TargetID
   201  		var shouldHold bool
   202  
   203  		m := mt.Manifest
   204  		if building[m.ID()] {
   205  			// mark as holding but don't add self as a dependency
   206  			shouldHold = true
   207  		}
   208  
   209  		for _, spec := range m.TargetSpecs() {
   210  			if canReuseImageTargetHeuristic(spec, mt.State.BuildStatus(spec.ID())) {
   211  				continue
   212  			}
   213  
   214  			if building[spec.ID()] {
   215  				targetIDs = append(targetIDs, spec.ID())
   216  				shouldHold = true
   217  			}
   218  		}
   219  		return targetIDs, shouldHold
   220  	}
   221  
   222  	for _, mt := range mts {
   223  		if waitingOn, shouldHold := hasBuildingComponent(mt); shouldHold {
   224  			holds.AddHold(mt, store.Hold{
   225  				Reason: store.HoldReasonBuildingComponent,
   226  				HoldOn: waitingOn,
   227  			})
   228  		}
   229  	}
   230  }
   231  
   232  func targetsByCluster(mts []*store.ManifestTarget) map[string][]*store.ManifestTarget {
   233  	clusters := make(map[string][]*store.ManifestTarget)
   234  	for _, mt := range mts {
   235  		clusterName := mt.Manifest.ClusterName()
   236  		if clusterName == "" {
   237  			continue
   238  		}
   239  
   240  		targets, ok := clusters[clusterName]
   241  		if !ok {
   242  			targets = []*store.ManifestTarget{}
   243  		}
   244  		clusters[clusterName] = append(targets, mt)
   245  	}
   246  	return clusters
   247  }
   248  
   249  // We use the cluster to detect what architecture we're building for.
   250  // Until the cluster connection has been established, we block any
   251  // image builds.
   252  func HoldTargetsWaitingOnCluster(state store.EngineState, mts []*store.ManifestTarget, holds HoldSet) {
   253  	for clusterName, targets := range targetsByCluster(mts) {
   254  		cluster, ok := state.Clusters[clusterName]
   255  		isClusterOK := ok && cluster.Status.Error == "" && cluster.Status.Arch != ""
   256  		if isClusterOK {
   257  			return
   258  		}
   259  
   260  		gvk := v1alpha1.SchemeGroupVersion.WithKind("Cluster")
   261  		for _, mt := range targets {
   262  			holds.AddHold(mt, store.Hold{
   263  				Reason: store.HoldReasonCluster,
   264  				OnRefs: []v1alpha1.UIResourceStateWaitingOnRef{{
   265  					Group:      gvk.Group,
   266  					APIVersion: gvk.Version,
   267  					Kind:       gvk.Kind,
   268  					Name:       clusterName,
   269  				}},
   270  			})
   271  		}
   272  	}
   273  }
   274  
   275  func HoldTargetsWaitingOnDependencies(state store.EngineState, mts []*store.ManifestTarget, holds HoldSet) {
   276  	for _, mt := range mts {
   277  		if waitingOn := waitingOnDependencies(state, mt); len(waitingOn) != 0 {
   278  			holds.AddHold(mt, store.Hold{
   279  				Reason: store.HoldReasonWaitingForDep,
   280  				HoldOn: waitingOn,
   281  			})
   282  		}
   283  	}
   284  }
   285  
   286  // Helper function for ordering targets that have never been built before.
   287  func NextUnbuiltTargetToBuild(unbuilt []*store.ManifestTarget) *store.ManifestTarget {
   288  	// Local resources come before all cluster resources, because they
   289  	// can't be parallelized. (LR's may change things on disk that cluster
   290  	// resources then pull in).
   291  	localTargets := FindLocalTargets(unbuilt)
   292  	if len(localTargets) > 0 {
   293  		return localTargets[0]
   294  	}
   295  
   296  	// unresourced YAML goes next
   297  	unresourced := FindUnresourcedYAML(unbuilt)
   298  	if unresourced != nil {
   299  		return unresourced
   300  	}
   301  
   302  	// If this is Kubernetes, unbuilt resources go first.
   303  	// (If this is Docker Compose, we want to trust the ordering
   304  	// that docker-compose put things in.)
   305  	deployOnlyK8sTargets := FindDeployOnlyK8sManifestTargets(unbuilt)
   306  	if len(deployOnlyK8sTargets) > 0 {
   307  		return deployOnlyK8sTargets[0]
   308  	}
   309  
   310  	return unbuilt[0]
   311  }
   312  
   313  func FindUnresourcedYAML(targets []*store.ManifestTarget) *store.ManifestTarget {
   314  	for _, target := range targets {
   315  		if target.Manifest.ManifestName() == model.UnresourcedYAMLManifestName {
   316  			return target
   317  		}
   318  	}
   319  	return nil
   320  }
   321  
   322  func FindDeployOnlyK8sManifestTargets(targets []*store.ManifestTarget) []*store.ManifestTarget {
   323  	result := []*store.ManifestTarget{}
   324  	for _, target := range targets {
   325  		if target.Manifest.IsK8s() && len(target.Manifest.ImageTargets) == 0 {
   326  			result = append(result, target)
   327  		}
   328  	}
   329  	return result
   330  }
   331  
   332  func FindLocalTargets(targets []*store.ManifestTarget) []*store.ManifestTarget {
   333  	result := []*store.ManifestTarget{}
   334  	for _, target := range targets {
   335  		if target.Manifest.IsLocal() {
   336  			result = append(result, target)
   337  		}
   338  	}
   339  	return result
   340  }
   341  
   342  func HoldUnparallelizableLocalTargets(targets []*store.ManifestTarget, holds map[model.ManifestName]store.Hold) {
   343  	for _, target := range targets {
   344  		if target.Manifest.IsLocal() && !target.Manifest.LocalTarget().AllowParallel {
   345  			holds[target.Manifest.Name] = store.Hold{Reason: store.HoldReasonIsUnparallelizableTarget}
   346  		}
   347  	}
   348  }
   349  
   350  func HoldK8sTargets(targets []*store.ManifestTarget, holds HoldSet) {
   351  	for _, target := range targets {
   352  		if target.Manifest.IsK8s() {
   353  			holds.AddHold(target, store.Hold{
   354  				Reason: store.HoldReasonWaitingForUncategorized,
   355  				HoldOn: []model.TargetID{model.UnresourcedYAMLManifestName.TargetID()},
   356  			})
   357  		}
   358  	}
   359  }
   360  
   361  func TargetsWithPendingEnableStatus(targets []*store.ManifestTarget) []model.TargetID {
   362  	var result []model.TargetID
   363  	for _, target := range targets {
   364  		if target.State.DisableState == v1alpha1.DisableStatePending {
   365  			result = append(result, target.Spec().ID())
   366  		}
   367  	}
   368  	return result
   369  }
   370  
   371  func IsBuildingAnything(state store.EngineState) bool {
   372  	mts := state.Targets()
   373  	for _, mt := range mts {
   374  		if mt.State.IsBuilding() {
   375  			return true
   376  		}
   377  	}
   378  	return false
   379  }
   380  
   381  func IsBuildingUnparallelizableLocalTarget(state store.EngineState) (model.ManifestName, model.TargetName, bool) {
   382  	mts := state.Targets()
   383  	for _, mt := range mts {
   384  		if mt.State.IsBuilding() && mt.Manifest.IsLocal() &&
   385  			!mt.Manifest.LocalTarget().AllowParallel {
   386  			return mt.Manifest.Name, mt.Manifest.LocalTarget().Name, true
   387  		}
   388  	}
   389  	return "", "", false
   390  }
   391  
   392  func IsBuildingUncategorizedYAML(state store.EngineState) bool {
   393  	mts := state.Targets()
   394  	for _, mt := range mts {
   395  		if mt.State.IsBuilding() && mt.Manifest.Name == model.UnresourcedYAMLManifestName {
   396  			return true
   397  		}
   398  	}
   399  	return false
   400  }
   401  
   402  // Go through all the manifests, and check:
   403  // 1) all pending file changes
   404  // 2) all pending dependency changes (where an image has been rebuilt by another manifest), and
   405  // 3) all pending manifest changes
   406  // The earliest one is the one we want.
   407  //
   408  // If no targets are pending, return nil
   409  func EarliestPendingAutoTriggerTarget(targets []*store.ManifestTarget) *store.ManifestTarget {
   410  	var choice *store.ManifestTarget
   411  	earliest := time.Now()
   412  
   413  	for _, mt := range targets {
   414  		ok, newTime := mt.State.HasPendingChangesBeforeOrEqual(earliest)
   415  		if ok {
   416  			if !mt.Manifest.TriggerMode.AutoOnChange() {
   417  				// Don't trigger update of a manual manifest just b/c if has
   418  				// pending changes; must come through the TriggerQueue, above.
   419  				continue
   420  			}
   421  			if choice != nil && newTime.Equal(earliest) {
   422  				// If two choices are equal, use the first one in target order.
   423  				continue
   424  			}
   425  			choice = mt
   426  			earliest = newTime
   427  		}
   428  	}
   429  
   430  	return choice
   431  }
   432  
   433  // Grab all the targets that are build-eligible from
   434  // the engine state.
   435  //
   436  // We apply this filter first, then layer on individual build decisions about
   437  // what to build next. This MUST be the union of all checks in all downstream
   438  // build decisions in NextTargetToBuild.
   439  func FindTargetsNeedingAnyBuild(state store.EngineState) []*store.ManifestTarget {
   440  	queue := make(map[model.ManifestName]bool, len(state.TriggerQueue))
   441  	for _, mn := range state.TriggerQueue {
   442  		queue[mn] = true
   443  	}
   444  
   445  	result := []*store.ManifestTarget{}
   446  	for _, target := range state.Targets() {
   447  		// Skip disabled targets.
   448  		if target.State.DisableState == v1alpha1.DisableStateDisabled {
   449  			continue
   450  		}
   451  
   452  		if !target.State.StartedFirstBuild() && target.Manifest.TriggerMode.AutoInitial() {
   453  			result = append(result, target)
   454  			continue
   455  		}
   456  
   457  		if queue[target.Manifest.Name] {
   458  			result = append(result, target)
   459  			continue
   460  		}
   461  
   462  		hasPendingChanges, _ := target.State.HasPendingChanges()
   463  		if hasPendingChanges && target.Manifest.TriggerMode.AutoOnChange() {
   464  			result = append(result, target)
   465  			continue
   466  		}
   467  	}
   468  
   469  	return result
   470  }
   471  
   472  func FindTargetsNeedingInitialBuild(targets []*store.ManifestTarget) []*store.ManifestTarget {
   473  	result := []*store.ManifestTarget{}
   474  	for _, target := range targets {
   475  		if !target.State.StartedFirstBuild() && target.Manifest.TriggerMode.AutoInitial() {
   476  			result = append(result, target)
   477  		}
   478  	}
   479  	return result
   480  }
   481  
   482  func HoldLiveUpdateTargetsWaitingOnDeploy(state store.EngineState, mts []*store.ManifestTarget, holds HoldSet) {
   483  	for _, mt := range mts {
   484  		if IsLiveUpdateTargetWaitingOnDeploy(state, mt) {
   485  			holds.AddHold(mt, store.Hold{Reason: store.HoldReasonWaitingForDeploy})
   486  		}
   487  	}
   488  }
   489  
   490  func IsLiveUpdateTargetWaitingOnDeploy(state store.EngineState, mt *store.ManifestTarget) bool {
   491  	// We only care about targets where file changes are the ONLY build reason.
   492  	if mt.NextBuildReason() != model.BuildReasonFlagChangedFiles {
   493  		return false
   494  	}
   495  
   496  	// Make sure the last build succeeded.
   497  	if mt.State.LastBuild().Empty() || mt.State.LastBuild().Error != nil {
   498  		return false
   499  	}
   500  
   501  	// Never hold back a deploy in an error state.
   502  	if mt.State.RuntimeState.RuntimeStatus() == v1alpha1.RuntimeStatusError {
   503  		return false
   504  	}
   505  
   506  	// Go through all the files, and make sure they're live-update-able.
   507  	for id, status := range mt.State.BuildStatuses {
   508  		if len(status.PendingFileChanges) == 0 {
   509  			continue
   510  		}
   511  
   512  		// We have an image target with changes!
   513  		// First, make sure that all the changes match a sync.
   514  		files := make([]string, 0, len(status.PendingFileChanges))
   515  		for f := range status.PendingFileChanges {
   516  			files = append(files, f)
   517  		}
   518  
   519  		iTarget := mt.Manifest.ImageTargetWithID(id)
   520  		luSpec := iTarget.LiveUpdateSpec
   521  		_, pathsMatchingNoSync, err := build.FilesToPathMappings(files, liveupdate.SyncSteps(luSpec))
   522  		if err != nil || len(pathsMatchingNoSync) > 0 {
   523  			return false
   524  		}
   525  
   526  		// If any changed files match a FallBackOn file, fall back to next BuildAndDeployer
   527  		anyMatch, _, err := liveupdate.FallBackOnFiles(luSpec).AnyMatch(files)
   528  		if err != nil || anyMatch {
   529  			return false
   530  		}
   531  
   532  		// All changes match a sync!
   533  		//
   534  		// We only care about targets where there are 0 running containers for the current build.
   535  		// This is the mechanism that live update uses to determine if the container to live-update
   536  		// is still pending.
   537  		if mt.Manifest.IsK8s() && iTarget.LiveUpdateSpec.Selector.Kubernetes != nil {
   538  			kResource := state.KubernetesResources[mt.Manifest.Name.String()]
   539  			if kResource == nil {
   540  				return true // Wait for the k8s resource to appear.
   541  			}
   542  
   543  			cInfos, err := liveupdates.RunningContainersForOnePod(
   544  				iTarget.LiveUpdateSpec.Selector.Kubernetes,
   545  				kResource,
   546  				state.ImageMaps[iTarget.ImageMapName()],
   547  			)
   548  			if err != nil {
   549  				return false
   550  			}
   551  
   552  			if len(cInfos) != 0 {
   553  				return false
   554  			}
   555  
   556  			// If the container in this pod is in a crash loop, then don't hold back
   557  			// updates until the deploy finishes -- this is a pretty good signal
   558  			// that it might not become healthy.
   559  			pod := k8sconv.MostRecentPod(kResource.FilteredPods)
   560  			for _, c := range pod.Containers {
   561  				if c.Restarts > 0 {
   562  					return false
   563  				}
   564  			}
   565  
   566  			// If the pod is in a finished state, then the containers
   567  			// may never re-enter Running.
   568  			if pod.Phase == string(v1.PodSucceeded) || pod.Phase == string(v1.PodFailed) {
   569  				return false
   570  			}
   571  
   572  		} else if mt.Manifest.IsDC() {
   573  			dcs := state.DockerComposeServices[mt.Manifest.Name.String()]
   574  			cInfos := liveupdates.RunningContainersForDC(dcs)
   575  			if len(cInfos) != 0 {
   576  				return false
   577  			}
   578  		} else {
   579  			return false
   580  		}
   581  	}
   582  
   583  	// If we've gotten this far, that means we should wait until this deploy
   584  	// finishes before processing these file changes.
   585  	return true
   586  }
   587  
   588  // Hold back live update targets that are being successfully
   589  // handled by a reconciler.
   590  func HoldLiveUpdateTargetsHandledByReconciler(state store.EngineState, mts []*store.ManifestTarget, holds HoldSet) {
   591  	for _, mt := range mts {
   592  		// Most types of build reasons trigger a full rebuild. The two exceptions are:
   593  		// - File-change only
   594  		// - Live-update eligible manual triggers
   595  		reason := mt.NextBuildReason()
   596  		isLiveUpdateEligible := reason == model.BuildReasonFlagChangedFiles
   597  		if reason.HasTrigger() {
   598  			isLiveUpdateEligible = IsLiveUpdateEligibleTrigger(mt.Manifest, reason)
   599  		}
   600  
   601  		if !isLiveUpdateEligible {
   602  			continue
   603  		}
   604  
   605  		// Changes to the deploy target can't be live-updated.
   606  		if mt.Manifest.DeployTarget != nil {
   607  			bs, hasBuildStatus := mt.State.BuildStatuses[mt.Manifest.DeployTarget.ID()]
   608  			hasPendingChanges := hasBuildStatus && len(bs.PendingFileChanges) > 0
   609  			if hasPendingChanges {
   610  				continue
   611  			}
   612  		}
   613  
   614  		allHandledByLiveUpdate := true
   615  		iTargets := mt.Manifest.ImageTargets
   616  		for _, iTarget := range iTargets {
   617  			bs, hasBuildStatus := mt.State.BuildStatuses[iTarget.ID()]
   618  			hasPendingChanges := hasBuildStatus && len(bs.PendingFileChanges) > 0
   619  			if !hasPendingChanges {
   620  				continue
   621  			}
   622  
   623  			handlers := findLiveUpdateHandlers(iTarget, mt, &state)
   624  			if len(handlers) == 0 {
   625  				allHandledByLiveUpdate = false
   626  			}
   627  
   628  			for _, lu := range handlers {
   629  				isFailing := lu.Status.Failed != nil
   630  				if isFailing {
   631  					allHandledByLiveUpdate = false
   632  				}
   633  			}
   634  
   635  			if !allHandledByLiveUpdate {
   636  				break
   637  			}
   638  		}
   639  
   640  		if allHandledByLiveUpdate {
   641  			holds.AddHold(mt, store.Hold{Reason: store.HoldReasonReconciling})
   642  		}
   643  	}
   644  }
   645  
   646  // Find all the live update objects responsible for syncing this image.
   647  //
   648  // Base image live updates are modeled with a LiveUpdate object attached to
   649  // each deploy image.
   650  //
   651  // The LiveUpdate watches:
   652  // - The Deploy image's container
   653  // - The Base image's filewatch
   654  //
   655  // The Tiltfile assembler will guarantee that there will be one LiveUpdate
   656  // object for each deployed image, and they will all sync in the same way.
   657  func findLiveUpdateHandlers(changedImage model.ImageTarget, mt *store.ManifestTarget, state *store.EngineState) []*v1alpha1.LiveUpdate {
   658  	result := []*v1alpha1.LiveUpdate{}
   659  
   660  	for _, candidate := range mt.Manifest.ImageTargets {
   661  		isHandledByReconciler := !liveupdate.IsEmptySpec(candidate.LiveUpdateSpec) &&
   662  			candidate.LiveUpdateReconciler
   663  		if !isHandledByReconciler {
   664  			continue
   665  		}
   666  
   667  		lu := state.LiveUpdates[candidate.LiveUpdateName]
   668  		if lu == nil {
   669  			continue
   670  		}
   671  
   672  		isHandled := false
   673  		for _, source := range lu.Spec.Sources {
   674  			// Relies on the assumption that image targets create filewatches
   675  			// with the same name.
   676  			if source.FileWatch == changedImage.ID().String() {
   677  				isHandled = true
   678  				break
   679  			}
   680  		}
   681  
   682  		if isHandled {
   683  			result = append(result, lu)
   684  		}
   685  	}
   686  
   687  	return result
   688  }
   689  
   690  // In automatic trigger mode:
   691  // - Clicking the trigger button always triggers a full rebuild.
   692  //
   693  // In manual trigger mode:
   694  // - If there are no pending changes, clicking the trigger button triggers a full rebuild.
   695  // - If there are only pending changes, clicking the trigger button triggers a live-update.
   696  func IsLiveUpdateEligibleTrigger(manifest model.Manifest, reason model.BuildReason) bool {
   697  	return reason.HasTrigger() &&
   698  		reason.WithoutTriggers() == model.BuildReasonFlagChangedFiles &&
   699  		!manifest.TriggerMode.AutoOnChange()
   700  }