github.com/tilt-dev/tilt@v0.36.0/internal/engine/buildcontrol/build_control_test.go (about)

     1  package buildcontrol
     2  
     3  import (
     4  	"fmt"
     5  	"path/filepath"
     6  	"testing"
     7  	"time"
     8  
     9  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    10  
    11  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    12  
    13  	"github.com/stretchr/testify/assert"
    14  	"github.com/stretchr/testify/require"
    15  	v1 "k8s.io/api/core/v1"
    16  
    17  	"github.com/tilt-dev/tilt/internal/container"
    18  	"github.com/tilt-dev/tilt/internal/k8s"
    19  	"github.com/tilt-dev/tilt/internal/k8s/testyaml"
    20  	"github.com/tilt-dev/tilt/internal/store"
    21  	"github.com/tilt-dev/tilt/internal/store/k8sconv"
    22  	"github.com/tilt-dev/tilt/internal/testutils/manifestbuilder"
    23  	"github.com/tilt-dev/tilt/internal/testutils/tempdir"
    24  	"github.com/tilt-dev/tilt/pkg/model"
    25  )
    26  
    27  func TestNextTargetToBuildDoesntReturnCurrentlyBuildingTarget(t *testing.T) {
    28  	f := newTestFixture(t)
    29  
    30  	mt := f.upsertK8sManifest("k8s1")
    31  	f.st.UpsertManifestTarget(mt)
    32  
    33  	// Verify this target is normally next-to-build
    34  	f.assertNextTargetToBuild(mt.Manifest.Name)
    35  
    36  	// If target is currently building, should NOT be next-to-build
    37  	mt.State.CurrentBuilds["buildcontrol"] = model.BuildRecord{StartTime: time.Now()}
    38  	f.assertNoTargetNextToBuild()
    39  }
    40  
    41  func TestCurrentlyBuildingK8sResourceDisablesLocalScheduling(t *testing.T) {
    42  	f := newTestFixture(t)
    43  
    44  	k8s1 := f.upsertK8sManifest("k8s1")
    45  	k8s2 := f.upsertK8sManifest("k8s2")
    46  	f.upsertLocalManifest("local1")
    47  
    48  	f.assertNextTargetToBuild("local1")
    49  
    50  	k8s1.State.CurrentBuilds["buildcontrol"] = model.BuildRecord{StartTime: time.Now()}
    51  	f.assertNextTargetToBuild("k8s2")
    52  	f.assertHold("local1", store.HoldReasonIsUnparallelizableTarget)
    53  
    54  	k8s2.State.CurrentBuilds["buildcontrol"] = model.BuildRecord{StartTime: time.Now()}
    55  	f.assertNoTargetNextToBuild()
    56  }
    57  
    58  func TestCurrentlyBuildingK8sResourceDoesNotCreateHoldIfResourceNotPending(t *testing.T) {
    59  	f := newTestFixture(t)
    60  
    61  	k8s1 := f.upsertK8sManifest("k8s1")
    62  	k8s2 := f.upsertK8sManifest("k8s2")
    63  	f.upsertLocalManifest("local1", func(m manifestbuilder.ManifestBuilder) manifestbuilder.ManifestBuilder {
    64  		return m.WithTriggerMode(model.TriggerModeManual)
    65  	})
    66  
    67  	f.assertHold("local1", store.HoldReasonNone)
    68  
    69  	k8s1.State.CurrentBuilds["buildcontrol"] = model.BuildRecord{StartTime: time.Now()}
    70  	f.assertNextTargetToBuild("k8s2")
    71  	f.assertHold("local1", store.HoldReasonNone)
    72  
    73  	k8s2.State.CurrentBuilds["buildcontrol"] = model.BuildRecord{StartTime: time.Now()}
    74  	f.assertNoTargetNextToBuild()
    75  	f.assertHold("local1", store.HoldReasonNone)
    76  }
    77  
    78  func TestCurrentlyBuildingUncategorizedDisablesOtherK8sTargets(t *testing.T) {
    79  	f := newTestFixture(t)
    80  
    81  	_ = f.upsertK8sManifest("k8s1")
    82  	k8sUnresourced := f.upsertK8sManifest(model.UnresourcedYAMLManifestName)
    83  	_ = f.upsertK8sManifest("k8s2")
    84  
    85  	f.assertNextTargetToBuild(model.UnresourcedYAMLManifestName)
    86  	k8sUnresourced.State.CurrentBuilds["buildcontrol"] = model.BuildRecord{StartTime: time.Now()}
    87  	f.assertNoTargetNextToBuild()
    88  	for _, mn := range []model.ManifestName{"k8s1", "k8s2"} {
    89  		f.assertHold(mn, store.HoldReasonWaitingForUncategorized, model.ManifestName("uncategorized").TargetID())
    90  	}
    91  }
    92  
    93  func TestK8sDependsOnLocal(t *testing.T) {
    94  	f := newTestFixture(t)
    95  
    96  	k8s1 := f.upsertK8sManifest("k8s1", withResourceDeps("local1"))
    97  	k8s2 := f.upsertK8sManifest("k8s2")
    98  	local1 := f.upsertLocalManifest("local1")
    99  
   100  	f.assertNextTargetToBuild("local1")
   101  
   102  	f.assertHold("k8s1", store.HoldReasonWaitingForDep, model.ManifestName("local1").TargetID())
   103  	f.assertHold("k8s2", store.HoldReasonNone)
   104  
   105  	local1.State.AddCompletedBuild(model.BuildRecord{
   106  		StartTime:  time.Now(),
   107  		FinishTime: time.Now(),
   108  	})
   109  	lrs := local1.State.LocalRuntimeState()
   110  	lrs.LastReadyOrSucceededTime = time.Now()
   111  	local1.State.RuntimeState = lrs
   112  
   113  	f.assertNextTargetToBuild("k8s1")
   114  	k8s1.State.CurrentBuilds["buildcontrol"] = model.BuildRecord{StartTime: time.Now()}
   115  	f.assertNextTargetToBuild("k8s2")
   116  
   117  	_ = k8s2
   118  }
   119  
   120  func TestLocalDependsOnNonWorkloadK8s(t *testing.T) {
   121  	f := newTestFixture(t)
   122  
   123  	local1 := f.upsertLocalManifest("local1", withResourceDeps("k8s1"))
   124  	k8s1 := f.upsertK8sManifest("k8s1", withK8sPodReadiness(model.PodReadinessIgnore))
   125  	k8s2 := f.upsertK8sManifest("k8s2", withK8sPodReadiness(model.PodReadinessIgnore))
   126  
   127  	f.assertNextTargetToBuild("k8s1")
   128  	f.assertHold("local1", store.HoldReasonWaitingForDep, model.ManifestName("k8s1").TargetID())
   129  	f.assertHold("k8s1", store.HoldReasonNone)
   130  	f.assertHold("k8s2", store.HoldReasonNone)
   131  
   132  	k8s1.State.AddCompletedBuild(model.BuildRecord{
   133  		StartTime:  time.Now(),
   134  		FinishTime: time.Now(),
   135  	})
   136  	k8s1.State.RuntimeState = store.K8sRuntimeState{
   137  		PodReadinessMode:            model.PodReadinessIgnore,
   138  		HasEverDeployedSuccessfully: true,
   139  	}
   140  
   141  	f.assertNextTargetToBuild("local1")
   142  	local1.State.AddCompletedBuild(model.BuildRecord{
   143  		StartTime:  time.Now(),
   144  		FinishTime: time.Now(),
   145  	})
   146  	f.assertNextTargetToBuild("k8s2")
   147  
   148  	_ = k8s2
   149  }
   150  
   151  func TestK8sDependsOnCluster(t *testing.T) {
   152  	f := newTestFixture(t)
   153  
   154  	f.st.Clusters["default"].Status.Error = "connection error"
   155  
   156  	_ = f.upsertK8sManifest("k8s1")
   157  	f.assertNoTargetNextToBuild()
   158  	f.assertHoldOnRefs("k8s1", store.HoldReasonCluster, v1alpha1.UIResourceStateWaitingOnRef{
   159  		Group:      "tilt.dev",
   160  		APIVersion: "v1alpha1",
   161  		Kind:       "Cluster",
   162  		Name:       "default",
   163  	})
   164  
   165  	f.st.Clusters["default"].Status.Error = ""
   166  	f.assertNextTargetToBuild("k8s1")
   167  }
   168  
   169  func TestK8sDependsOnCluster_TwoClusters(t *testing.T) {
   170  	f := newTestFixture(t)
   171  
   172  	f.st.Clusters["default"].Status.Error = "connection error"
   173  
   174  	_ = f.upsertK8sManifest("k8s1")
   175  	dc1 := f.upsertDCManifest("dc1")
   176  	f.assertNextTargetToBuild("dc1")
   177  	dc1.State.AddCompletedBuild(model.BuildRecord{
   178  		StartTime:  time.Now(),
   179  		FinishTime: time.Now(),
   180  	})
   181  	f.assertNoTargetNextToBuild()
   182  
   183  	f.assertHoldOnRefs("k8s1", store.HoldReasonCluster, v1alpha1.UIResourceStateWaitingOnRef{
   184  		Group:      "tilt.dev",
   185  		APIVersion: "v1alpha1",
   186  		Kind:       "Cluster",
   187  		Name:       "default",
   188  	})
   189  
   190  	f.st.Clusters["default"].Status.Error = ""
   191  	f.assertNextTargetToBuild("k8s1")
   192  }
   193  
   194  func TestCurrentlyBuildingLocalResourceDisablesK8sScheduling(t *testing.T) {
   195  	f := newTestFixture(t)
   196  
   197  	f.upsertK8sManifest("k8s1")
   198  	f.upsertK8sManifest("k8s2")
   199  	local1 := f.upsertLocalManifest("local1")
   200  	f.upsertLocalManifest("local2")
   201  
   202  	f.assertNextTargetToBuild("local1")
   203  	local1.State.CurrentBuilds["buildcontrol"] = model.BuildRecord{StartTime: time.Now()}
   204  	f.assertNoTargetNextToBuild()
   205  	for _, mn := range []model.ManifestName{"k8s1", "k8s2", "local2"} {
   206  		f.assertHold(mn, store.HoldReasonWaitingForUnparallelizableTarget, model.ManifestName("local1").TargetID())
   207  	}
   208  }
   209  
   210  func TestCurrentlyBuildingParallelLocalResource(t *testing.T) {
   211  	f := newTestFixture(t)
   212  
   213  	f.upsertK8sManifest("k8s1")
   214  	local1 := f.upsertLocalManifest("local1", func(m manifestbuilder.ManifestBuilder) manifestbuilder.ManifestBuilder {
   215  		return m.WithLocalAllowParallel(true)
   216  	})
   217  	local2 := f.upsertLocalManifest("local2", func(m manifestbuilder.ManifestBuilder) manifestbuilder.ManifestBuilder {
   218  		return m.WithLocalAllowParallel(true)
   219  	})
   220  
   221  	f.assertNextTargetToBuild("local1")
   222  
   223  	local1.State.CurrentBuilds["buildcontrol"] = model.BuildRecord{StartTime: time.Now()}
   224  	f.assertNextTargetToBuild("local2")
   225  
   226  	local2.State.CurrentBuilds["buildcontrol"] = model.BuildRecord{StartTime: time.Now()}
   227  	f.assertNextTargetToBuild("k8s1")
   228  }
   229  
   230  func TestTriggerIneligibleResource(t *testing.T) {
   231  	f := newTestFixture(t)
   232  
   233  	// local1 has a build in progress
   234  	local1 := f.upsertLocalManifest("local1", func(m manifestbuilder.ManifestBuilder) manifestbuilder.ManifestBuilder {
   235  		return m.WithLocalAllowParallel(true)
   236  	})
   237  	local1.State.CurrentBuilds["buildcontrol"] = model.BuildRecord{StartTime: time.Now()}
   238  
   239  	// local2 is not parallelizable
   240  	local2 := f.upsertLocalManifest("local2")
   241  
   242  	f.st.AppendToTriggerQueue(local1.Manifest.Name, model.BuildReasonFlagTriggerCLI)
   243  	f.st.AppendToTriggerQueue(local2.Manifest.Name, model.BuildReasonFlagTriggerCLI)
   244  	f.assertNoTargetNextToBuild()
   245  }
   246  
   247  func TestTwoK8sTargetsWithBaseImage(t *testing.T) {
   248  	f := newTestFixture(t)
   249  
   250  	baseImage := newDockerImageTarget("sancho-base")
   251  	sanchoOneImage := newDockerImageTarget("sancho-one").
   252  		WithImageMapDeps([]string{baseImage.ImageMapName()})
   253  	sanchoTwoImage := newDockerImageTarget("sancho-two").
   254  		WithImageMapDeps([]string{baseImage.ImageMapName()})
   255  
   256  	sanchoOne := f.upsertManifest(manifestbuilder.New(f, "sancho-one").
   257  		WithImageTargets(baseImage, sanchoOneImage).
   258  		WithK8sYAML(testyaml.SanchoYAML).
   259  		Build())
   260  	f.upsertManifest(manifestbuilder.New(f, "sancho-two").
   261  		WithImageTargets(baseImage, sanchoTwoImage).
   262  		WithK8sYAML(testyaml.SanchoYAML).
   263  		Build())
   264  
   265  	f.assertNextTargetToBuild("sancho-one")
   266  
   267  	sanchoOne.State.CurrentBuilds["buildcontrol"] = model.BuildRecord{StartTime: time.Now()}
   268  
   269  	f.assertNoTargetNextToBuild()
   270  	f.assertHold("sancho-two", store.HoldReasonBuildingComponent, baseImage.ID())
   271  
   272  	delete(sanchoOne.State.CurrentBuilds, "buildcontrol")
   273  	sanchoOne.State.AddCompletedBuild(model.BuildRecord{
   274  		StartTime:  time.Now(),
   275  		FinishTime: time.Now(),
   276  	})
   277  
   278  	f.assertNextTargetToBuild("sancho-two")
   279  }
   280  
   281  func TestLiveUpdateMainImageHold(t *testing.T) {
   282  	f := newTestFixture(t)
   283  
   284  	srcFile := f.JoinPath("src", "a.txt")
   285  	f.WriteFile(srcFile, "hello")
   286  	luSpec := v1alpha1.LiveUpdateSpec{
   287  		BasePath: f.Path(),
   288  		Syncs: []v1alpha1.LiveUpdateSync{
   289  			{LocalPath: "src", ContainerPath: "/src"},
   290  		},
   291  		Sources: []v1alpha1.LiveUpdateSource{
   292  			{FileWatch: "image:sancho"},
   293  		},
   294  		Selector: v1alpha1.LiveUpdateSelector{
   295  			Kubernetes: &v1alpha1.LiveUpdateKubernetesSelector{
   296  				ContainerName: "c",
   297  			},
   298  		},
   299  	}
   300  	f.st.LiveUpdates["sancho"] = &v1alpha1.LiveUpdate{Spec: luSpec}
   301  
   302  	baseImage := newDockerImageTarget("sancho-base")
   303  	sanchoImage := newDockerImageTarget("sancho").
   304  		WithLiveUpdateSpec("sancho", luSpec).
   305  		WithImageMapDeps([]string{baseImage.ImageMapName()})
   306  
   307  	sancho := f.upsertManifest(manifestbuilder.New(f, "sancho").
   308  		WithImageTargets(baseImage, sanchoImage).
   309  		WithK8sYAML(testyaml.SanchoYAML).
   310  		Build())
   311  
   312  	f.assertNextTargetToBuild("sancho")
   313  	sancho.State.AddCompletedBuild(model.BuildRecord{
   314  		StartTime:  time.Now(),
   315  		FinishTime: time.Now(),
   316  	})
   317  
   318  	resource := &k8sconv.KubernetesResource{
   319  		FilteredPods: []v1alpha1.Pod{
   320  			*readyPod("pod-1", sanchoImage.ImageMapSpec.Selector),
   321  		},
   322  	}
   323  	f.st.KubernetesResources["sancho"] = resource
   324  
   325  	bs, ok := sancho.State.BuildStatus(sanchoImage.ID())
   326  	require.True(t, ok)
   327  	bs.FileChanges[srcFile] = time.Now()
   328  	f.assertNoTargetNextToBuild()
   329  	f.assertHold("sancho", store.HoldReasonReconciling)
   330  
   331  	// If the live update is failing, we have to rebuild.
   332  	f.st.LiveUpdates["sancho"] = &v1alpha1.LiveUpdate{
   333  		Spec:   luSpec,
   334  		Status: v1alpha1.LiveUpdateStatus{Failed: &v1alpha1.LiveUpdateStateFailed{Reason: "fake-reason"}},
   335  	}
   336  	f.assertNextTargetToBuild("sancho")
   337  
   338  	// reset to a good state.
   339  	f.st.LiveUpdates["sancho"] = &v1alpha1.LiveUpdate{Spec: luSpec}
   340  	f.assertNoTargetNextToBuild()
   341  
   342  	// If the base image has a change, we have to rebuild.
   343  	bs, ok = sancho.State.BuildStatus(baseImage.ID())
   344  	require.True(t, ok)
   345  	bs.FileChanges[srcFile] = time.Now()
   346  	f.assertNextTargetToBuild("sancho")
   347  }
   348  
   349  // Test to make sure the buildcontroller does the translation
   350  // correctly between the image target with the file watch
   351  // and the image target matching the deployed container.
   352  func TestLiveUpdateBaseImageHold(t *testing.T) {
   353  	f := newTestFixture(t)
   354  
   355  	srcFile := f.JoinPath("base", "a.txt")
   356  	f.WriteFile(srcFile, "hello")
   357  
   358  	luSpec := v1alpha1.LiveUpdateSpec{
   359  		BasePath: f.Path(),
   360  		Syncs: []v1alpha1.LiveUpdateSync{
   361  			{LocalPath: "base", ContainerPath: "/base"},
   362  		},
   363  		Sources: []v1alpha1.LiveUpdateSource{
   364  			{FileWatch: "image:sancho-base"},
   365  		},
   366  	}
   367  	f.st.LiveUpdates["sancho"] = &v1alpha1.LiveUpdate{Spec: luSpec}
   368  
   369  	baseImage := newDockerImageTarget("sancho-base")
   370  	sanchoImage := newDockerImageTarget("sancho").
   371  		WithLiveUpdateSpec("sancho", luSpec).
   372  		WithImageMapDeps([]string{baseImage.ImageMapName()})
   373  
   374  	sancho := f.upsertManifest(manifestbuilder.New(f, "sancho").
   375  		WithImageTargets(baseImage, sanchoImage).
   376  		WithK8sYAML(testyaml.SanchoYAML).
   377  		Build())
   378  
   379  	f.assertNextTargetToBuild("sancho")
   380  	sancho.State.AddCompletedBuild(model.BuildRecord{
   381  		StartTime:  time.Now(),
   382  		FinishTime: time.Now(),
   383  	})
   384  
   385  	resource := &k8sconv.KubernetesResource{
   386  		FilteredPods: []v1alpha1.Pod{
   387  			*readyPod("pod-1", sanchoImage.Selector),
   388  		},
   389  	}
   390  	f.st.KubernetesResources["sancho"] = resource
   391  
   392  	bs, ok := sancho.State.BuildStatus(baseImage.ID())
   393  	require.True(t, ok)
   394  	bs.FileChanges[srcFile] = time.Now()
   395  	f.assertNoTargetNextToBuild()
   396  	f.assertHold("sancho", store.HoldReasonReconciling)
   397  
   398  	// If the live update is failing, we have to rebuild.
   399  	f.st.LiveUpdates["sancho"] = &v1alpha1.LiveUpdate{
   400  		Spec:   luSpec,
   401  		Status: v1alpha1.LiveUpdateStatus{Failed: &v1alpha1.LiveUpdateStateFailed{Reason: "fake-reason"}},
   402  	}
   403  	f.assertNextTargetToBuild("sancho")
   404  
   405  	// reset to a good state.
   406  	f.st.LiveUpdates["sancho"] = &v1alpha1.LiveUpdate{Spec: luSpec}
   407  	f.assertNoTargetNextToBuild()
   408  
   409  	// If the deploy image has a change, we have to rebuild.
   410  	bs, ok = sancho.State.BuildStatus(sanchoImage.ID())
   411  	require.True(t, ok)
   412  	bs.FileChanges[srcFile] = time.Now()
   413  	f.assertNextTargetToBuild("sancho")
   414  }
   415  
   416  func TestTwoK8sTargetsWithBaseImagePrebuilt(t *testing.T) {
   417  	f := newTestFixture(t)
   418  
   419  	baseImage := newDockerImageTarget("sancho-base")
   420  	sanchoOneImage := newDockerImageTarget("sancho-one").
   421  		WithImageMapDeps([]string{baseImage.ImageMapName()})
   422  	sanchoTwoImage := newDockerImageTarget("sancho-two").
   423  		WithImageMapDeps([]string{baseImage.ImageMapName()})
   424  
   425  	sanchoOne := f.upsertManifest(manifestbuilder.New(f, "sancho-one").
   426  		WithImageTargets(baseImage, sanchoOneImage).
   427  		WithK8sYAML(testyaml.SanchoYAML).
   428  		Build())
   429  	sanchoTwo := f.upsertManifest(manifestbuilder.New(f, "sancho-two").
   430  		WithImageTargets(baseImage, sanchoTwoImage).
   431  		WithK8sYAML(testyaml.SanchoYAML).
   432  		Build())
   433  
   434  	bs, ok := sanchoOne.State.BuildStatus(baseImage.ID())
   435  	require.True(t, ok)
   436  	bs.LastResult = store.ImageBuildResult{}
   437  	bs, ok = sanchoTwo.State.BuildStatus(baseImage.ID())
   438  	require.True(t, ok)
   439  	bs.LastResult = store.ImageBuildResult{}
   440  
   441  	f.assertNextTargetToBuild("sancho-one")
   442  
   443  	sanchoOne.State.CurrentBuilds["buildcontrol"] = model.BuildRecord{StartTime: time.Now()}
   444  
   445  	// Make sure sancho-two can start while sanchoOne is still pending.
   446  	f.assertNextTargetToBuild("sancho-two")
   447  }
   448  
   449  func TestHoldForDeploy(t *testing.T) {
   450  	f := newTestFixture(t)
   451  
   452  	srcFile := f.JoinPath("src", "a.txt")
   453  	objFile := f.JoinPath("obj", "a.out")
   454  	fallbackFile := f.JoinPath("src", "package.json")
   455  	f.WriteFile(srcFile, "hello")
   456  	f.WriteFile(objFile, "hello")
   457  	f.WriteFile(fallbackFile, "hello")
   458  
   459  	luSpec := v1alpha1.LiveUpdateSpec{
   460  		BasePath:  f.Path(),
   461  		StopPaths: []string{filepath.Join("src", "package.json")},
   462  		Syncs:     []v1alpha1.LiveUpdateSync{{LocalPath: "src", ContainerPath: "/src"}},
   463  		Selector: v1alpha1.LiveUpdateSelector{
   464  			Kubernetes: &v1alpha1.LiveUpdateKubernetesSelector{
   465  				ContainerName: "c",
   466  			},
   467  		},
   468  	}
   469  	sanchoImage := newDockerImageTarget("sancho").
   470  		WithLiveUpdateSpec("sancho", luSpec).
   471  		WithDockerImage(v1alpha1.DockerImageSpec{Context: f.Path()})
   472  	sancho := f.upsertManifest(manifestbuilder.New(f, "sancho").
   473  		WithImageTargets(sanchoImage).
   474  		WithK8sYAML(testyaml.SanchoYAML).
   475  		Build())
   476  
   477  	f.assertNextTargetToBuild("sancho")
   478  
   479  	sancho.State.AddCompletedBuild(model.BuildRecord{
   480  		StartTime:  time.Now(),
   481  		FinishTime: time.Now(),
   482  	})
   483  	f.assertNoTargetNextToBuild()
   484  
   485  	status, ok := sancho.State.BuildStatus(sanchoImage.ID())
   486  	require.True(t, ok)
   487  	status.FileChanges[objFile] = time.Now().Add(-2 * time.Second)
   488  	f.assertNextTargetToBuild("sancho")
   489  	status.ConsumedChanges = time.Now().Add(-2 * time.Second)
   490  
   491  	status.FileChanges[fallbackFile] = time.Now().Add(-time.Second)
   492  	f.assertNextTargetToBuild("sancho")
   493  	status.ConsumedChanges = time.Now().Add(-time.Second)
   494  
   495  	status.FileChanges[srcFile] = time.Now()
   496  	f.assertNoTargetNextToBuild()
   497  	f.assertHold("sancho", store.HoldReasonWaitingForDeploy)
   498  
   499  	resource := &k8sconv.KubernetesResource{
   500  		FilteredPods: []v1alpha1.Pod{},
   501  	}
   502  	f.st.KubernetesResources["sancho"] = resource
   503  
   504  	resource.FilteredPods = append(resource.FilteredPods, *readyPod("pod-1", sanchoImage.Selector))
   505  	f.assertNextTargetToBuild("sancho")
   506  
   507  	resource.FilteredPods[0] = *crashingPod("pod-1", sanchoImage.Selector)
   508  	f.assertNextTargetToBuild("sancho")
   509  
   510  	resource.FilteredPods[0] = *crashedInThePastPod("pod-1", sanchoImage.Selector)
   511  	f.assertNextTargetToBuild("sancho")
   512  
   513  	resource.FilteredPods[0] = *sidecarCrashedPod("pod-1", sanchoImage.Selector)
   514  	f.assertNextTargetToBuild("sancho")
   515  
   516  	resource.FilteredPods[0] = *completedPod("pod-1", sanchoImage.Selector)
   517  	f.assertNextTargetToBuild("sancho")
   518  }
   519  
   520  func TestHoldForManualLiveUpdate(t *testing.T) {
   521  	f := newTestFixture(t)
   522  
   523  	srcFile := f.JoinPath("src", "a.txt")
   524  	f.WriteFile(srcFile, "hello")
   525  
   526  	luSpec := v1alpha1.LiveUpdateSpec{
   527  		BasePath: f.Path(),
   528  		Syncs:    []v1alpha1.LiveUpdateSync{{LocalPath: "src", ContainerPath: "/src"}},
   529  		Sources: []v1alpha1.LiveUpdateSource{
   530  			{FileWatch: "image:sancho"},
   531  		},
   532  	}
   533  	sanchoImage := newDockerImageTarget("sancho").
   534  		WithLiveUpdateSpec("sancho", luSpec).
   535  		WithDockerImage(v1alpha1.DockerImageSpec{Context: f.Path()})
   536  	sancho := f.upsertManifest(manifestbuilder.New(f, "sancho").
   537  		WithImageTargets(sanchoImage).
   538  		WithK8sYAML(testyaml.SanchoYAML).
   539  		WithTriggerMode(model.TriggerModeManualWithAutoInit).
   540  		Build())
   541  
   542  	f.assertNextTargetToBuild("sancho")
   543  
   544  	// Set the live-update state to healthy.
   545  	sancho.State.AddCompletedBuild(model.BuildRecord{
   546  		StartTime:  time.Now(),
   547  		FinishTime: time.Now(),
   548  	})
   549  	resource := &k8sconv.KubernetesResource{
   550  		FilteredPods: []v1alpha1.Pod{*completedPod("pod-1", sanchoImage.Selector)},
   551  	}
   552  	f.st.KubernetesResources["sancho"] = resource
   553  	f.st.LiveUpdates["sancho"] = &v1alpha1.LiveUpdate{Spec: luSpec}
   554  	f.assertNoTargetNextToBuild()
   555  
   556  	// This shouldn't trigger a full-build, because it will be handled by the live-updater.
   557  	status, ok := sancho.State.BuildStatus(sanchoImage.ID())
   558  	require.True(t, ok)
   559  	f.st.AppendToTriggerQueue(sancho.Manifest.Name, model.BuildReasonFlagTriggerCLI)
   560  	status.FileChanges[srcFile] = time.Now()
   561  	f.assertNoTargetNextToBuild()
   562  
   563  	// This should trigger a full-rebuild, because we have a trigger without pending changes.
   564  	status.ConsumedChanges = time.Now()
   565  	f.assertNextTargetToBuild("sancho")
   566  }
   567  
   568  func TestHoldDisabled(t *testing.T) {
   569  	f := newTestFixture(t)
   570  
   571  	f.upsertLocalManifest("local")
   572  	f.st.ManifestTargets["local"].State.DisableState = v1alpha1.DisableStateDisabled
   573  	f.assertNoTargetNextToBuild()
   574  }
   575  
   576  func TestHoldIfAnyDisableStatusPending(t *testing.T) {
   577  	f := newTestFixture(t)
   578  
   579  	f.upsertLocalManifest("local1")
   580  	f.upsertLocalManifest("local2")
   581  	f.upsertLocalManifest("local3")
   582  	f.st.ManifestTargets["local2"].State.DisableState = v1alpha1.DisableStatePending
   583  
   584  	f.assertHold("local1", store.HoldReasonTiltfileReload, model.TargetID{Type: "manifest", Name: "local2"})
   585  	f.assertHold("local2", store.HoldReasonTiltfileReload, model.TargetID{Type: "manifest", Name: "local2"})
   586  	f.assertHold("local3", store.HoldReasonTiltfileReload, model.TargetID{Type: "manifest", Name: "local2"})
   587  	f.assertNoTargetNextToBuild()
   588  }
   589  
   590  func readyPod(podID k8s.PodID, ref string) *v1alpha1.Pod {
   591  	return &v1alpha1.Pod{
   592  		Name:   podID.String(),
   593  		Phase:  string(v1.PodRunning),
   594  		Status: "Running",
   595  		Containers: []v1alpha1.Container{
   596  			{
   597  				ID:    string(podID + "-container"),
   598  				Name:  "c",
   599  				Ready: true,
   600  				Image: ref,
   601  				State: v1alpha1.ContainerState{
   602  					Running: &v1alpha1.ContainerStateRunning{StartedAt: metav1.Now()},
   603  				},
   604  			},
   605  		},
   606  	}
   607  }
   608  
   609  func crashingPod(podID k8s.PodID, ref string) *v1alpha1.Pod {
   610  	return &v1alpha1.Pod{
   611  		Name:   podID.String(),
   612  		Phase:  string(v1.PodRunning),
   613  		Status: "CrashLoopBackOff",
   614  		Containers: []v1alpha1.Container{
   615  			{
   616  				ID:       string(podID + "-container"),
   617  				Name:     "c",
   618  				Ready:    false,
   619  				Image:    ref,
   620  				Restarts: 1,
   621  				State: v1alpha1.ContainerState{
   622  					Terminated: &v1alpha1.ContainerStateTerminated{
   623  						StartedAt:  metav1.Now(),
   624  						FinishedAt: metav1.Now(),
   625  						Reason:     "Error",
   626  						ExitCode:   127,
   627  					}},
   628  			},
   629  		},
   630  	}
   631  }
   632  
   633  func crashedInThePastPod(podID k8s.PodID, ref string) *v1alpha1.Pod {
   634  	return &v1alpha1.Pod{
   635  		Name:   podID.String(),
   636  		Phase:  string(v1.PodRunning),
   637  		Status: "Ready",
   638  		Containers: []v1alpha1.Container{
   639  			{
   640  				ID:       string(podID + "-container"),
   641  				Name:     "c",
   642  				Ready:    true,
   643  				Image:    ref,
   644  				Restarts: 1,
   645  				State: v1alpha1.ContainerState{
   646  					Running: &v1alpha1.ContainerStateRunning{StartedAt: metav1.Now()},
   647  				},
   648  			},
   649  		},
   650  	}
   651  }
   652  
   653  func sidecarCrashedPod(podID k8s.PodID, ref string) *v1alpha1.Pod {
   654  	return &v1alpha1.Pod{
   655  		Name:   podID.String(),
   656  		Phase:  string(v1.PodRunning),
   657  		Status: "Ready",
   658  		Containers: []v1alpha1.Container{
   659  			{
   660  				ID:       string(podID + "-container"),
   661  				Name:     "c",
   662  				Ready:    true,
   663  				Image:    ref,
   664  				Restarts: 0,
   665  				State: v1alpha1.ContainerState{
   666  					Running: &v1alpha1.ContainerStateRunning{StartedAt: metav1.Now()},
   667  				},
   668  			},
   669  			{
   670  				ID:       string(podID + "-sidecar"),
   671  				Name:     "s",
   672  				Ready:    false,
   673  				Image:    container.MustParseNamed("sidecar").String(),
   674  				Restarts: 1,
   675  				State: v1alpha1.ContainerState{
   676  					Terminated: &v1alpha1.ContainerStateTerminated{
   677  						StartedAt:  metav1.Now(),
   678  						FinishedAt: metav1.Now(),
   679  						Reason:     "Error",
   680  						ExitCode:   127,
   681  					}},
   682  			},
   683  		},
   684  	}
   685  }
   686  
   687  func completedPod(podID k8s.PodID, ref string) *v1alpha1.Pod {
   688  	return &v1alpha1.Pod{
   689  		Name:   podID.String(),
   690  		Phase:  string(v1.PodSucceeded),
   691  		Status: "Completed",
   692  		Containers: []v1alpha1.Container{
   693  			{
   694  				ID:       string(podID + "-container"),
   695  				Name:     "c",
   696  				Ready:    false,
   697  				Image:    ref,
   698  				Restarts: 0,
   699  				State: v1alpha1.ContainerState{
   700  					Terminated: &v1alpha1.ContainerStateTerminated{
   701  						StartedAt:  metav1.Now(),
   702  						FinishedAt: metav1.Now(),
   703  						Reason:     "Success!",
   704  						ExitCode:   0,
   705  					}},
   706  			},
   707  		},
   708  	}
   709  }
   710  
   711  type testFixture struct {
   712  	*tempdir.TempDirFixture
   713  	t  *testing.T
   714  	st *store.EngineState
   715  }
   716  
   717  func newTestFixture(t *testing.T) testFixture {
   718  	f := tempdir.NewTempDirFixture(t)
   719  	st := store.NewState()
   720  	st.Clusters["docker"] = &v1alpha1.Cluster{
   721  		Status: v1alpha1.ClusterStatus{
   722  			Arch: "amd64",
   723  		},
   724  	}
   725  	st.Clusters["default"] = &v1alpha1.Cluster{
   726  		Status: v1alpha1.ClusterStatus{
   727  			Arch: "amd64",
   728  		},
   729  	}
   730  	return testFixture{
   731  		TempDirFixture: f,
   732  		t:              t,
   733  		st:             st,
   734  	}
   735  }
   736  
   737  func (f *testFixture) assertHold(m model.ManifestName, reason store.HoldReason, holdOn ...model.TargetID) {
   738  	f.T().Helper()
   739  	_, hs := NextTargetToBuild(*f.st)
   740  	hold := store.Hold{
   741  		Reason: reason,
   742  		HoldOn: holdOn,
   743  	}
   744  	assert.Equal(f.t, hold, hs[m])
   745  }
   746  
   747  func (f *testFixture) assertHoldOnRefs(m model.ManifestName, reason store.HoldReason, onRefs ...v1alpha1.UIResourceStateWaitingOnRef) {
   748  	f.T().Helper()
   749  	_, hs := NextTargetToBuild(*f.st)
   750  	hold := store.Hold{
   751  		Reason: reason,
   752  		OnRefs: onRefs,
   753  	}
   754  	assert.Equal(f.t, hold, hs[m])
   755  }
   756  
   757  func (f *testFixture) assertNextTargetToBuild(expected model.ManifestName) {
   758  	f.T().Helper()
   759  	next, holds := NextTargetToBuild(*f.st)
   760  	require.NotNil(f.t, next, "expected next target %s but got: nil. holds: %v", expected, holds)
   761  	actual := next.Manifest.Name
   762  	assert.Equal(f.t, expected, actual, "expected next target to be %s but got %s", expected, actual)
   763  }
   764  
   765  func (f *testFixture) assertNoTargetNextToBuild() {
   766  	f.T().Helper()
   767  	next, _ := NextTargetToBuild(*f.st)
   768  	if next != nil {
   769  		f.t.Fatalf("expected no next target to build, but got %s", next.Manifest.Name)
   770  	}
   771  }
   772  
   773  func (f *testFixture) upsertManifest(m model.Manifest) *store.ManifestTarget {
   774  	mt := store.NewManifestTarget(m)
   775  	mt.State.DisableState = v1alpha1.DisableStateEnabled
   776  	f.st.UpsertManifestTarget(mt)
   777  	return mt
   778  }
   779  
   780  func (f *testFixture) upsertK8sManifest(name model.ManifestName, opts ...manifestOption) *store.ManifestTarget {
   781  	b := manifestbuilder.New(f, name)
   782  	for _, o := range opts {
   783  		b = o(b)
   784  	}
   785  	return f.upsertManifest(b.WithK8sYAML(testyaml.SanchoYAML).Build())
   786  }
   787  
   788  func (f *testFixture) upsertDCManifest(name model.ManifestName, opts ...manifestOption) *store.ManifestTarget {
   789  	b := manifestbuilder.New(f, name)
   790  	for _, o := range opts {
   791  		b = o(b)
   792  	}
   793  	return f.upsertManifest(b.WithDockerCompose().Build())
   794  }
   795  
   796  func (f *testFixture) upsertLocalManifest(name model.ManifestName, opts ...manifestOption) *store.ManifestTarget {
   797  	b := manifestbuilder.New(f, name)
   798  	for _, o := range opts {
   799  		b = o(b)
   800  	}
   801  	return f.upsertManifest(b.WithLocalResource(fmt.Sprintf("exec-%s", name), nil).Build())
   802  }
   803  
   804  type manifestOption func(manifestbuilder.ManifestBuilder) manifestbuilder.ManifestBuilder
   805  
   806  func withResourceDeps(deps ...string) manifestOption {
   807  	return manifestOption(func(m manifestbuilder.ManifestBuilder) manifestbuilder.ManifestBuilder {
   808  		return m.WithResourceDeps(deps...)
   809  	})
   810  }
   811  func withK8sPodReadiness(pr model.PodReadinessMode) manifestOption {
   812  	return manifestOption(func(m manifestbuilder.ManifestBuilder) manifestbuilder.ManifestBuilder {
   813  		return m.WithK8sPodReadiness(pr)
   814  	})
   815  }