github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/engine/upper_test.go (about)

     1  package engine
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"log"
     9  	"os"
    10  	"path"
    11  	"path/filepath"
    12  	"regexp"
    13  	"runtime"
    14  	"sort"
    15  	"strings"
    16  	"sync"
    17  	"testing"
    18  	"time"
    19  
    20  	"github.com/davecgh/go-spew/spew"
    21  	"github.com/distribution/reference"
    22  	dockertypes "github.com/docker/docker/api/types"
    23  	"github.com/google/uuid"
    24  	"github.com/jonboulle/clockwork"
    25  	"github.com/stretchr/testify/assert"
    26  	"github.com/stretchr/testify/require"
    27  	v1 "k8s.io/api/core/v1"
    28  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    29  	"k8s.io/apimachinery/pkg/types"
    30  	ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
    31  
    32  	"github.com/tilt-dev/clusterid"
    33  	tiltanalytics "github.com/tilt-dev/tilt/internal/analytics"
    34  	"github.com/tilt-dev/tilt/internal/build"
    35  	"github.com/tilt-dev/tilt/internal/cloud"
    36  	"github.com/tilt-dev/tilt/internal/container"
    37  	"github.com/tilt-dev/tilt/internal/containerupdate"
    38  	"github.com/tilt-dev/tilt/internal/controllers"
    39  	apitiltfile "github.com/tilt-dev/tilt/internal/controllers/apis/tiltfile"
    40  	"github.com/tilt-dev/tilt/internal/controllers/core/cluster"
    41  	"github.com/tilt-dev/tilt/internal/controllers/core/cmd"
    42  	"github.com/tilt-dev/tilt/internal/controllers/core/cmdimage"
    43  	"github.com/tilt-dev/tilt/internal/controllers/core/configmap"
    44  	"github.com/tilt-dev/tilt/internal/controllers/core/dockercomposelogstream"
    45  	"github.com/tilt-dev/tilt/internal/controllers/core/dockercomposeservice"
    46  	"github.com/tilt-dev/tilt/internal/controllers/core/dockerimage"
    47  	"github.com/tilt-dev/tilt/internal/controllers/core/extension"
    48  	"github.com/tilt-dev/tilt/internal/controllers/core/extensionrepo"
    49  	"github.com/tilt-dev/tilt/internal/controllers/core/filewatch"
    50  	"github.com/tilt-dev/tilt/internal/controllers/core/filewatch/fsevent"
    51  	"github.com/tilt-dev/tilt/internal/controllers/core/imagemap"
    52  	"github.com/tilt-dev/tilt/internal/controllers/core/kubernetesapply"
    53  	"github.com/tilt-dev/tilt/internal/controllers/core/kubernetesdiscovery"
    54  	"github.com/tilt-dev/tilt/internal/controllers/core/liveupdate"
    55  	"github.com/tilt-dev/tilt/internal/controllers/core/podlogstream"
    56  	apiportforward "github.com/tilt-dev/tilt/internal/controllers/core/portforward"
    57  	ctrlsession "github.com/tilt-dev/tilt/internal/controllers/core/session"
    58  	ctrltiltfile "github.com/tilt-dev/tilt/internal/controllers/core/tiltfile"
    59  	"github.com/tilt-dev/tilt/internal/controllers/core/togglebutton"
    60  	ctrluibutton "github.com/tilt-dev/tilt/internal/controllers/core/uibutton"
    61  	ctrluiresource "github.com/tilt-dev/tilt/internal/controllers/core/uiresource"
    62  	ctrluisession "github.com/tilt-dev/tilt/internal/controllers/core/uisession"
    63  	"github.com/tilt-dev/tilt/internal/docker"
    64  	"github.com/tilt-dev/tilt/internal/dockercompose"
    65  	engineanalytics "github.com/tilt-dev/tilt/internal/engine/analytics"
    66  	"github.com/tilt-dev/tilt/internal/engine/buildcontrol"
    67  	"github.com/tilt-dev/tilt/internal/engine/configs"
    68  	"github.com/tilt-dev/tilt/internal/engine/dockerprune"
    69  	"github.com/tilt-dev/tilt/internal/engine/k8srollout"
    70  	"github.com/tilt-dev/tilt/internal/engine/k8swatch"
    71  	"github.com/tilt-dev/tilt/internal/engine/local"
    72  	"github.com/tilt-dev/tilt/internal/engine/session"
    73  	"github.com/tilt-dev/tilt/internal/engine/telemetry"
    74  	"github.com/tilt-dev/tilt/internal/engine/uiresource"
    75  	"github.com/tilt-dev/tilt/internal/engine/uisession"
    76  	"github.com/tilt-dev/tilt/internal/feature"
    77  	"github.com/tilt-dev/tilt/internal/hud"
    78  	"github.com/tilt-dev/tilt/internal/hud/prompt"
    79  	"github.com/tilt-dev/tilt/internal/hud/server"
    80  	"github.com/tilt-dev/tilt/internal/hud/view"
    81  	"github.com/tilt-dev/tilt/internal/k8s"
    82  	"github.com/tilt-dev/tilt/internal/k8s/testyaml"
    83  	"github.com/tilt-dev/tilt/internal/localexec"
    84  	"github.com/tilt-dev/tilt/internal/openurl"
    85  	"github.com/tilt-dev/tilt/internal/store"
    86  	"github.com/tilt-dev/tilt/internal/store/buildcontrols"
    87  	"github.com/tilt-dev/tilt/internal/store/k8sconv"
    88  	"github.com/tilt-dev/tilt/internal/store/tiltfiles"
    89  	"github.com/tilt-dev/tilt/internal/testutils"
    90  	"github.com/tilt-dev/tilt/internal/testutils/bufsync"
    91  	tiltconfigmap "github.com/tilt-dev/tilt/internal/testutils/configmap"
    92  	"github.com/tilt-dev/tilt/internal/testutils/httptest"
    93  	"github.com/tilt-dev/tilt/internal/testutils/manifestbuilder"
    94  	"github.com/tilt-dev/tilt/internal/testutils/podbuilder"
    95  	"github.com/tilt-dev/tilt/internal/testutils/servicebuilder"
    96  	"github.com/tilt-dev/tilt/internal/testutils/tempdir"
    97  	"github.com/tilt-dev/tilt/internal/tiltfile"
    98  	"github.com/tilt-dev/tilt/internal/tiltfile/cisettings"
    99  	"github.com/tilt-dev/tilt/internal/tiltfile/config"
   100  	"github.com/tilt-dev/tilt/internal/tiltfile/k8scontext"
   101  	"github.com/tilt-dev/tilt/internal/tiltfile/tiltextension"
   102  	"github.com/tilt-dev/tilt/internal/tiltfile/version"
   103  	"github.com/tilt-dev/tilt/internal/token"
   104  	"github.com/tilt-dev/tilt/internal/tracer"
   105  	"github.com/tilt-dev/tilt/internal/watch"
   106  	"github.com/tilt-dev/tilt/internal/xdg"
   107  	"github.com/tilt-dev/tilt/pkg/apis"
   108  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
   109  	"github.com/tilt-dev/tilt/pkg/assets"
   110  	"github.com/tilt-dev/tilt/pkg/logger"
   111  	"github.com/tilt-dev/tilt/pkg/model"
   112  	"github.com/tilt-dev/wmclient/pkg/analytics"
   113  )
   114  
   115  var originalWD string
   116  
   117  const stdTimeout = 2 * time.Second
   118  
   119  type buildCompletionChannel chan bool
   120  
   121  func init() {
   122  	wd, err := os.Getwd()
   123  	if err != nil {
   124  		panic(err)
   125  	}
   126  	originalWD = wd
   127  }
   128  
   129  const (
   130  	simpleTiltfile = `
   131  docker_build('gcr.io/windmill-public-containers/servantes/snack', '.')
   132  k8s_yaml('snack.yaml')
   133  `
   134  	simpleYAML = testyaml.SnackYaml
   135  )
   136  
   137  // represents a single call to `BuildAndDeploy`
   138  type buildAndDeployCall struct {
   139  	count int
   140  	specs []model.TargetSpec
   141  	state store.BuildStateSet
   142  }
   143  
   144  func (c buildAndDeployCall) firstImgTarg() model.ImageTarget {
   145  	iTargs := c.imageTargets()
   146  	if len(iTargs) > 0 {
   147  		return iTargs[0]
   148  	}
   149  	return model.ImageTarget{}
   150  }
   151  
   152  func (c buildAndDeployCall) imageTargets() []model.ImageTarget {
   153  	targs := make([]model.ImageTarget, 0, len(c.specs))
   154  	for _, spec := range c.specs {
   155  		t, ok := spec.(model.ImageTarget)
   156  		if ok {
   157  			targs = append(targs, t)
   158  		}
   159  	}
   160  	return targs
   161  }
   162  
   163  func (c buildAndDeployCall) k8s() model.K8sTarget {
   164  	for _, spec := range c.specs {
   165  		t, ok := spec.(model.K8sTarget)
   166  		if ok {
   167  			return t
   168  		}
   169  	}
   170  	return model.K8sTarget{}
   171  }
   172  
   173  func (c buildAndDeployCall) dc() model.DockerComposeTarget {
   174  	for _, spec := range c.specs {
   175  		t, ok := spec.(model.DockerComposeTarget)
   176  		if ok {
   177  			return t
   178  		}
   179  	}
   180  	return model.DockerComposeTarget{}
   181  }
   182  
   183  func (c buildAndDeployCall) local() model.LocalTarget {
   184  	for _, spec := range c.specs {
   185  		t, ok := spec.(model.LocalTarget)
   186  		if ok {
   187  			return t
   188  		}
   189  	}
   190  	return model.LocalTarget{}
   191  }
   192  
   193  func (c buildAndDeployCall) dcState() store.BuildState {
   194  	return c.state[c.dc().ID()]
   195  }
   196  
   197  func (c buildAndDeployCall) k8sState() store.BuildState {
   198  	return c.state[c.k8s().ID()]
   199  }
   200  
   201  func (c buildAndDeployCall) oneImageState() store.BuildState {
   202  	imageStates := make([]store.BuildState, 0)
   203  	for k, v := range c.state {
   204  		if k.Type == model.TargetTypeImage {
   205  			imageStates = append(imageStates, v)
   206  		}
   207  	}
   208  
   209  	if len(imageStates) != 1 {
   210  		panic(fmt.Sprintf("More than one state: %v", c.state))
   211  	}
   212  	return imageStates[0]
   213  }
   214  
   215  type fakeBuildAndDeployer struct {
   216  	t     *testing.T
   217  	mu    sync.Mutex
   218  	calls chan buildAndDeployCall
   219  
   220  	completeBuildsManually bool
   221  	buildCompletionChans   sync.Map // map[string]buildCompletionChannel; close channel at buildCompletionChans[k(targs)] to
   222  	// complete the build started for targs (where k(targs) generates a unique string key for the set of targets)
   223  
   224  	buildCount int
   225  
   226  	// Inject the container ID of the container started by Docker Compose.
   227  	// If not set, we will auto-generate an ID.
   228  	nextDockerComposeContainerID    container.ID
   229  	nextDockerComposeContainerState *dockertypes.ContainerState
   230  
   231  	targetObjectTree        map[model.TargetID]podbuilder.PodObjectTree
   232  	nextDeployedUID         types.UID
   233  	nextPodTemplateSpecHash k8s.PodTemplateSpecHash
   234  
   235  	// Set this to simulate a build with no results and an error.
   236  	// Do not set this directly, use fixture.SetNextBuildError
   237  	nextBuildError error
   238  
   239  	buildLogOutput map[model.TargetID]string
   240  
   241  	resultsByID store.BuildResultSet
   242  
   243  	// kClient registers deployed entities for subsequent retrieval.
   244  	kClient  *k8s.FakeK8sClient
   245  	dcClient *dockercompose.FakeDCClient
   246  
   247  	ctrlClient ctrlclient.Client
   248  
   249  	kaReconciler *kubernetesapply.Reconciler
   250  	dcReconciler *dockercomposeservice.Reconciler
   251  }
   252  
   253  var _ buildcontrol.BuildAndDeployer = &fakeBuildAndDeployer{}
   254  
   255  func (b *fakeBuildAndDeployer) nextImageBuildResult(ctx context.Context, iTarget model.ImageTarget) (store.ImageBuildResult, error) {
   256  	var clusterNN types.NamespacedName
   257  	if iTarget.IsDockerBuild() {
   258  		clusterNN = types.NamespacedName{Name: iTarget.DockerBuildInfo().Cluster}
   259  	} else if iTarget.IsCustomBuild() {
   260  		clusterNN = types.NamespacedName{Name: iTarget.CustomBuildInfo().Cluster}
   261  	} else if iTarget.IsDockerComposeBuild() {
   262  		clusterNN = types.NamespacedName{Name: v1alpha1.ClusterNameDocker}
   263  	} else {
   264  		return store.ImageBuildResult{}, fmt.Errorf("Unknown build type. ImageTarget: %s", iTarget.ID().String())
   265  	}
   266  
   267  	if clusterNN.Name == "" {
   268  		clusterNN.Name = v1alpha1.ClusterNameDefault
   269  	}
   270  
   271  	var cluster v1alpha1.Cluster
   272  	err := b.ctrlClient.Get(ctx, clusterNN, &cluster)
   273  	if err != nil {
   274  		return store.ImageBuildResult{}, err
   275  	}
   276  	refs, err := iTarget.Refs(&cluster)
   277  	if err != nil {
   278  		return store.ImageBuildResult{}, fmt.Errorf("determining refs: %v", err)
   279  	}
   280  
   281  	tag := fmt.Sprintf("tilt-%d", b.buildCount)
   282  	localRefTagged := container.MustWithTag(refs.LocalRef(), tag)
   283  	clusterRefTagged := container.MustWithTag(refs.ClusterRef(), tag)
   284  	return store.NewImageBuildResult(iTarget.ID(), localRefTagged, clusterRefTagged), nil
   285  }
   286  
   287  func (b *fakeBuildAndDeployer) BuildAndDeploy(ctx context.Context, st store.RStore, specs []model.TargetSpec, state store.BuildStateSet) (brs store.BuildResultSet, err error) {
   288  	b.t.Helper()
   289  
   290  	b.mu.Lock()
   291  	b.buildCount++
   292  	buildKey := stringifyTargetIDs(specs)
   293  	b.registerBuild(buildKey)
   294  
   295  	if !b.completeBuildsManually {
   296  		// i.e. we should complete builds automatically: mark the build for completion now,
   297  		// so we return immediately at the end of BuildAndDeploy.
   298  		b.completeBuild(buildKey)
   299  	}
   300  
   301  	call := buildAndDeployCall{count: b.buildCount, specs: specs, state: state}
   302  	if call.dc().Empty() && call.k8s().Empty() && call.local().Empty() {
   303  		b.t.Fatalf("Invalid call: %+v", call)
   304  	}
   305  
   306  	ids := []model.TargetID{}
   307  	for _, spec := range specs {
   308  		id := spec.ID()
   309  		ids = append(ids, id)
   310  		output, ok := b.buildLogOutput[id]
   311  		if ok {
   312  			logger.Get(ctx).Infof("%s", output)
   313  		}
   314  	}
   315  
   316  	defer func() {
   317  		b.mu.Unlock()
   318  
   319  		// block until we know we're supposed to resolve this build
   320  		err2 := b.waitUntilBuildCompleted(ctx, buildKey)
   321  		if err == nil {
   322  			err = err2
   323  		}
   324  
   325  		// don't update b.calls until the end, to ensure appropriate actions have been dispatched first
   326  		select {
   327  		case b.calls <- call:
   328  		default:
   329  			b.t.Error("writing to fakeBuildAndDeployer would block. either there's a bug or the buffer size needs to be increased")
   330  		}
   331  
   332  		logger.Get(ctx).Infof("fake built %s. error: %v", ids, err)
   333  	}()
   334  
   335  	err = b.nextBuildError
   336  	b.nextBuildError = nil
   337  	if err != nil {
   338  		return nil, err
   339  	}
   340  
   341  	iTargets := model.ExtractImageTargets(specs)
   342  	fakeImageExistsCheck := func(ctx context.Context, iTarget model.ImageTarget, namedTagged reference.NamedTagged) (bool, error) {
   343  		return true, nil
   344  	}
   345  	queue, err := buildcontrol.NewImageTargetQueue(ctx, iTargets, state, fakeImageExistsCheck)
   346  	if err != nil {
   347  		return nil, err
   348  	}
   349  
   350  	err = queue.RunBuilds(func(target model.TargetSpec, depResults []store.ImageBuildResult) (store.ImageBuildResult, error) {
   351  		b.t.Helper()
   352  		iTarget := target.(model.ImageTarget)
   353  		ibr, err := b.nextImageBuildResult(ctx, iTarget)
   354  		if err != nil {
   355  			return store.ImageBuildResult{}, err
   356  		}
   357  
   358  		var im v1alpha1.ImageMap
   359  		if err := b.ctrlClient.Get(ctx, types.NamespacedName{Name: iTarget.ImageMapName()}, &im); err != nil {
   360  			return store.ImageBuildResult{}, err
   361  		}
   362  
   363  		im.Status = *ibr.ImageMapStatus.DeepCopy()
   364  		buildStartTime := apis.NowMicro()
   365  		im.Status.BuildStartTime = &buildStartTime
   366  
   367  		if err := b.ctrlClient.Status().Update(ctx, &im); err != nil {
   368  			return store.ImageBuildResult{}, err
   369  		}
   370  
   371  		return ibr, nil
   372  	})
   373  	result := queue.NewResults().ToBuildResultSet()
   374  	if err != nil {
   375  		return result, err
   376  	}
   377  
   378  	if !call.dc().Empty() {
   379  		dcContainerID := container.ID(fmt.Sprintf("dc-%s", path.Base(call.dc().ID().Name.String())))
   380  		if b.nextDockerComposeContainerID != "" {
   381  			dcContainerID = b.nextDockerComposeContainerID
   382  		}
   383  		b.dcClient.ContainerIDDefault = dcContainerID
   384  
   385  		err = b.updateDockerComposeServiceStatus(ctx, call.dc(), iTargets)
   386  		if err != nil {
   387  			return result, err
   388  		}
   389  
   390  		dcContainerState := b.nextDockerComposeContainerState
   391  		result[call.dc().ID()] = store.NewDockerComposeDeployResult(
   392  			call.dc().ID(), dockercompose.ToServiceStatus(dcContainerID, string(dcContainerID), dcContainerState, nil))
   393  	}
   394  
   395  	if kTarg := call.k8s(); !kTarg.Empty() {
   396  		nextK8sResult := b.nextK8sDeployResult(kTarg)
   397  		err = b.updateKubernetesApplyStatus(ctx, kTarg, iTargets)
   398  		if err != nil {
   399  			return result, err
   400  		}
   401  		result[call.k8s().ID()] = nextK8sResult
   402  	}
   403  
   404  	b.nextDockerComposeContainerID = ""
   405  
   406  	for key, val := range result {
   407  		b.resultsByID[key] = val
   408  	}
   409  
   410  	return result, nil
   411  }
   412  
   413  func (b *fakeBuildAndDeployer) updateKubernetesApplyStatus(ctx context.Context, kTarg model.K8sTarget, iTargets []model.ImageTarget) error {
   414  	imageMapSet := make(map[types.NamespacedName]*v1alpha1.ImageMap, len(kTarg.ImageMaps))
   415  	for _, iTarget := range iTargets {
   416  		if iTarget.IsLiveUpdateOnly {
   417  			continue
   418  		}
   419  
   420  		var im v1alpha1.ImageMap
   421  		nn := types.NamespacedName{Name: iTarget.ImageMapName()}
   422  		err := b.ctrlClient.Get(ctx, nn, &im)
   423  		if err != nil {
   424  			return err
   425  		}
   426  		imageMapSet[nn] = &im
   427  	}
   428  
   429  	clusterName := kTarg.KubernetesApplySpec.Cluster
   430  	if clusterName == "" {
   431  		clusterName = v1alpha1.ClusterNameDefault
   432  	}
   433  
   434  	var cluster v1alpha1.Cluster
   435  	err := b.ctrlClient.Get(ctx, types.NamespacedName{Name: clusterName}, &cluster)
   436  	if err != nil {
   437  		return err
   438  	}
   439  
   440  	nn := types.NamespacedName{Name: kTarg.ID().Name.String()}
   441  	status := b.kaReconciler.ForceApply(ctx, nn, kTarg.KubernetesApplySpec, &cluster, imageMapSet)
   442  
   443  	// We want our fake stub to only propagate apiserver problems.
   444  	_ = status
   445  
   446  	return nil
   447  }
   448  
   449  func (b *fakeBuildAndDeployer) updateDockerComposeServiceStatus(ctx context.Context, dcTarg model.DockerComposeTarget, iTargets []model.ImageTarget) error {
   450  	imageMapSet := make(map[types.NamespacedName]*v1alpha1.ImageMap, len(dcTarg.Spec.ImageMaps))
   451  	for _, iTarget := range iTargets {
   452  		if iTarget.IsLiveUpdateOnly {
   453  			continue
   454  		}
   455  
   456  		var im v1alpha1.ImageMap
   457  		nn := types.NamespacedName{Name: iTarget.ImageMapName()}
   458  		err := b.ctrlClient.Get(ctx, nn, &im)
   459  		if err != nil {
   460  			return err
   461  		}
   462  		imageMapSet[nn] = &im
   463  	}
   464  
   465  	nn := types.NamespacedName{Name: dcTarg.ID().Name.String()}
   466  	status := b.dcReconciler.ForceApply(ctx, nn, dcTarg.Spec, imageMapSet, false)
   467  
   468  	// We want our fake stub to only propagate apiserver problems.
   469  	_ = status
   470  
   471  	return nil
   472  }
   473  
   474  func (b *fakeBuildAndDeployer) nextK8sDeployResult(kTarg model.K8sTarget) store.K8sBuildResult {
   475  	var err error
   476  	var deployed []k8s.K8sEntity
   477  
   478  	explicitDeploymentEntities := b.targetObjectTree[kTarg.ID()]
   479  	if len(explicitDeploymentEntities) != 0 {
   480  		if b.nextDeployedUID != "" {
   481  			b.t.Fatalf("Cannot set both explicit deployed entities + next deployed UID")
   482  		}
   483  		if b.nextPodTemplateSpecHash != "" {
   484  			b.t.Fatalf("Cannot set both explicit deployed entities + next pod template spec hashes")
   485  		}
   486  
   487  		// register Deployment + ReplicaSet so that other parts of the system can properly retrieve them
   488  		b.kClient.Inject(
   489  			explicitDeploymentEntities.Deployment(),
   490  			explicitDeploymentEntities.ReplicaSet())
   491  
   492  		// only return the Deployment entity as deployed since the ReplicaSet + Pod are created implicitly,
   493  		// i.e. they are not returned in a normal apply call for a Deployment
   494  		deployed = []k8s.K8sEntity{explicitDeploymentEntities.Deployment()}
   495  	} else {
   496  		deployed, err = k8s.ParseYAMLFromString(kTarg.YAML)
   497  		require.NoError(b.t, err)
   498  
   499  		for i := 0; i < len(deployed); i++ {
   500  			uid := types.UID(uuid.New().String())
   501  			if b.nextDeployedUID != "" {
   502  				uid = b.nextDeployedUID
   503  				b.nextDeployedUID = ""
   504  			}
   505  			deployed[i].SetUID(string(uid))
   506  		}
   507  
   508  		for i, e := range deployed {
   509  			if b.nextPodTemplateSpecHash != "" {
   510  				e = e.DeepCopy()
   511  				templateSpecs, err := k8s.ExtractPodTemplateSpec(&e)
   512  				require.NoError(b.t, err)
   513  				for _, ts := range templateSpecs {
   514  					ts.Labels = map[string]string{k8s.TiltPodTemplateHashLabel: string(b.nextPodTemplateSpecHash)}
   515  				}
   516  				deployed[i] = e
   517  			} else {
   518  				deployed[i], err = k8s.InjectPodTemplateSpecHashes(e)
   519  				require.NoError(b.t, err)
   520  			}
   521  		}
   522  	}
   523  
   524  	resultYAML, err := k8s.SerializeSpecYAML(deployed)
   525  	require.NoError(b.t, err)
   526  
   527  	b.kClient.UpsertResult = deployed
   528  
   529  	filter, err := k8sconv.NewKubernetesApplyFilter(resultYAML)
   530  	require.NoError(b.t, err)
   531  	return store.NewK8sDeployResult(kTarg.ID(), filter)
   532  }
   533  
   534  func (b *fakeBuildAndDeployer) getOrCreateBuildCompletionChannel(key string) buildCompletionChannel {
   535  	ch := make(buildCompletionChannel)
   536  	val, _ := b.buildCompletionChans.LoadOrStore(key, ch)
   537  
   538  	var ok bool
   539  	ch, ok = val.(buildCompletionChannel)
   540  	if !ok {
   541  		panic(fmt.Sprintf("expected map value of type: buildCompletionChannel, got %T", val))
   542  	}
   543  
   544  	return ch
   545  }
   546  
   547  func (b *fakeBuildAndDeployer) registerBuild(key string) {
   548  	b.getOrCreateBuildCompletionChannel(key)
   549  }
   550  
   551  func (b *fakeBuildAndDeployer) waitUntilBuildCompleted(ctx context.Context, key string) error {
   552  	ch := b.getOrCreateBuildCompletionChannel(key)
   553  
   554  	defer b.buildCompletionChans.Delete(key)
   555  
   556  	// wait until channel for this build is closed, or context is canceled/finished.
   557  	select {
   558  	case <-ch:
   559  		return nil
   560  	case <-ctx.Done():
   561  		return ctx.Err()
   562  	}
   563  }
   564  
   565  func newFakeBuildAndDeployer(t *testing.T, kClient *k8s.FakeK8sClient, dcClient *dockercompose.FakeDCClient, ctrlClient ctrlclient.Client, kaReconciler *kubernetesapply.Reconciler, dcReconciler *dockercomposeservice.Reconciler) *fakeBuildAndDeployer {
   566  	return &fakeBuildAndDeployer{
   567  		t:                t,
   568  		calls:            make(chan buildAndDeployCall, 20),
   569  		buildLogOutput:   make(map[model.TargetID]string),
   570  		resultsByID:      store.BuildResultSet{},
   571  		kClient:          kClient,
   572  		dcClient:         dcClient,
   573  		ctrlClient:       ctrlClient,
   574  		kaReconciler:     kaReconciler,
   575  		dcReconciler:     dcReconciler,
   576  		targetObjectTree: make(map[model.TargetID]podbuilder.PodObjectTree),
   577  	}
   578  }
   579  
   580  func (b *fakeBuildAndDeployer) completeBuild(key string) {
   581  	ch := b.getOrCreateBuildCompletionChannel(key)
   582  	close(ch)
   583  }
   584  
   585  func TestUpper_Up(t *testing.T) {
   586  	f := newTestFixture(t)
   587  	manifest := f.newManifest("foobar")
   588  
   589  	f.setManifests([]model.Manifest{manifest})
   590  
   591  	storeErr := make(chan error, 1)
   592  	go func() {
   593  		storeErr <- f.upper.Init(f.ctx, InitAction{
   594  			TiltfilePath: f.JoinPath("Tiltfile"),
   595  			StartTime:    f.Now(),
   596  		})
   597  	}()
   598  
   599  	call := f.nextCallComplete()
   600  	assert.Equal(t, manifest.K8sTarget().ID(), call.k8s().ID())
   601  	close(f.b.calls)
   602  
   603  	// cancel the context to simulate a Ctrl-C
   604  	f.cancel()
   605  	err := <-storeErr
   606  	if assert.NotNil(t, err, "Store returned nil error (expected context canceled)") {
   607  		assert.Contains(t, err.Error(), context.Canceled.Error(), "Store error was not as expected")
   608  	}
   609  
   610  	state := f.upper.store.RLockState()
   611  	defer f.upper.store.RUnlockState()
   612  
   613  	buildRecord := state.ManifestTargets[manifest.Name].Status().LastBuild()
   614  	lines := strings.Split(state.LogStore.SpanLog(buildRecord.SpanID), "\n")
   615  	assertLineMatches(t, lines, regexp.MustCompile("fake built .*foobar"))
   616  }
   617  
   618  func TestUpper_UpK8sEntityOrdering(t *testing.T) {
   619  	f := newTestFixture(t, fixtureOptions{engineMode: &store.EngineModeCI})
   620  	f.useRealTiltfileLoader()
   621  
   622  	postgresEntities, err := k8s.ParseYAMLFromString(testyaml.PostgresYAML)
   623  	require.NoError(t, err)
   624  	yaml, err := k8s.SerializeSpecYAML(postgresEntities[:3]) // only take entities that don't belong to a workload
   625  	require.NoError(t, err)
   626  	f.WriteFile("Tiltfile", `k8s_yaml('postgres.yaml')`)
   627  	f.WriteFile("postgres.yaml", yaml)
   628  
   629  	storeErr := make(chan error, 1)
   630  	go func() {
   631  		storeErr <- f.upper.Init(f.ctx, InitAction{
   632  			TiltfilePath: f.JoinPath("Tiltfile"),
   633  			StartTime:    f.Now(),
   634  		})
   635  	}()
   636  
   637  	call := f.nextCallComplete()
   638  	entities, err := k8s.ParseYAMLFromString(call.k8s().YAML)
   639  	require.NoError(t, err)
   640  	expectedKindOrder := []string{"PersistentVolume", "PersistentVolumeClaim", "ConfigMap"}
   641  	actualKindOrder := make([]string, len(entities))
   642  	for i, e := range entities {
   643  		actualKindOrder[i] = e.GVK().Kind
   644  	}
   645  	assert.Equal(t, expectedKindOrder, actualKindOrder,
   646  		"YAML on the manifest should be in sorted order")
   647  
   648  	f.assertAllBuildsConsumed()
   649  	require.NoError(t, <-storeErr)
   650  }
   651  
   652  func TestUpper_CI(t *testing.T) {
   653  	f := newTestFixture(t, fixtureOptions{engineMode: &store.EngineModeCI})
   654  
   655  	manifest := f.newManifest("foobar")
   656  	pb := f.registerForDeployer(manifest)
   657  	f.setManifests([]model.Manifest{manifest})
   658  
   659  	storeErr := make(chan error, 1)
   660  	go func() {
   661  		storeErr <- f.upper.Init(f.ctx, InitAction{
   662  			TiltfilePath: f.JoinPath("Tiltfile"),
   663  			UserArgs:     nil, // equivalent to `tilt up --watch=false` (i.e. not specifying any manifest names)
   664  			StartTime:    f.Now(),
   665  		})
   666  	}()
   667  
   668  	call := f.nextCallComplete()
   669  	close(f.b.calls)
   670  	assert.Equal(t, "foobar", call.k8s().ID().Name.String())
   671  
   672  	f.startPod(pb.WithPhase(string(v1.PodRunning)).Build(), manifest.Name)
   673  	require.NoError(t, <-storeErr)
   674  }
   675  
   676  func TestFirstBuildFails_Up(t *testing.T) {
   677  	f := newTestFixture(t)
   678  	manifest := f.newManifest("foobar")
   679  	f.SetNextBuildError(errors.New("Build failed"))
   680  
   681  	f.Start([]model.Manifest{manifest})
   682  
   683  	call := f.nextCall()
   684  	assert.True(t, call.oneImageState().IsEmpty())
   685  
   686  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("a.go"))
   687  
   688  	call = f.nextCall()
   689  	assert.True(t, call.oneImageState().IsEmpty())
   690  	assert.Equal(t, []string{f.JoinPath("a.go")}, call.oneImageState().FilesChanged())
   691  
   692  	err := f.Stop()
   693  	assert.NoError(t, err)
   694  	f.assertAllBuildsConsumed()
   695  }
   696  
   697  func TestFirstBuildCancels_Up(t *testing.T) {
   698  	f := newTestFixture(t)
   699  	manifest := f.newManifest("foobar")
   700  	f.SetNextBuildError(context.Canceled)
   701  
   702  	f.Start([]model.Manifest{manifest})
   703  
   704  	call := f.nextCall()
   705  	assert.True(t, call.oneImageState().IsEmpty())
   706  
   707  	err := f.Stop()
   708  	assert.NoError(t, err)
   709  	f.assertAllBuildsConsumed()
   710  }
   711  
   712  func TestFirstBuildFails_CI(t *testing.T) {
   713  	f := newTestFixture(t, fixtureOptions{engineMode: &store.EngineModeCI})
   714  	manifest := f.newManifest("foobar")
   715  	buildFailedToken := errors.New("doesn't compile")
   716  	f.SetNextBuildError(buildFailedToken)
   717  
   718  	f.setManifests([]model.Manifest{manifest})
   719  	f.Init(InitAction{
   720  		TiltfilePath: f.JoinPath("Tiltfile"),
   721  		TerminalMode: store.TerminalModeHUD,
   722  		StartTime:    f.Now(),
   723  	})
   724  
   725  	f.WaitUntilManifestState("build has failed", manifest.ManifestName(), func(st store.ManifestState) bool {
   726  		return st.LastBuild().Error != nil
   727  	})
   728  
   729  	select {
   730  	case err := <-f.upperInitResult:
   731  		require.NotNil(t, err)
   732  		assert.Contains(t, err.Error(), "doesn't compile")
   733  	case <-time.After(stdTimeout):
   734  		t.Fatal("Timed out waiting for exit action")
   735  	}
   736  
   737  	f.withState(func(es store.EngineState) {
   738  		assert.True(t, es.ExitSignal)
   739  	})
   740  }
   741  
   742  func TestCIIgnoresDisabledResources(t *testing.T) {
   743  	f := newTestFixture(t, fixtureOptions{engineMode: &store.EngineModeCI})
   744  
   745  	m1 := f.newManifest("m1")
   746  	pb := f.registerForDeployer(m1)
   747  	m2 := f.newManifest("m2")
   748  	f.setManifests([]model.Manifest{m1, m2})
   749  	f.tfl.Result.EnabledManifests = []model.ManifestName{m1.Name}
   750  
   751  	storeErr := make(chan error, 1)
   752  	go func() {
   753  		storeErr <- f.upper.Init(f.ctx, InitAction{
   754  			TiltfilePath: f.JoinPath("Tiltfile"),
   755  			StartTime:    f.Now(),
   756  		})
   757  	}()
   758  
   759  	call := f.nextCallComplete()
   760  	close(f.b.calls)
   761  	assert.Equal(t, "m1", call.k8s().ID().Name.String())
   762  
   763  	f.startPod(pb.WithPhase(string(v1.PodRunning)).Build(), m1.Name)
   764  	require.NoError(t, <-storeErr)
   765  }
   766  
   767  func TestConfigFileChangeClearsBuildStateToForceImageBuild(t *testing.T) {
   768  	f := newTestFixture(t)
   769  	f.useRealTiltfileLoader()
   770  
   771  	f.WriteFile("Tiltfile", `
   772  docker_build('gcr.io/windmill-public-containers/servantes/snack', '.', live_update=[sync('.', '/app')])
   773  k8s_yaml('snack.yaml')
   774  	`)
   775  	f.WriteFile("Dockerfile", `FROM iron/go:prod`)
   776  	f.WriteFile("snack.yaml", simpleYAML)
   777  
   778  	f.loadAndStart()
   779  
   780  	// First call: with the old manifest
   781  	call := f.nextCall("old manifest")
   782  	assert.Equal(t, `FROM iron/go:prod`, call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   783  
   784  	f.WriteConfigFiles("Dockerfile", `FROM iron/go:dev`)
   785  
   786  	// Second call: new manifest!
   787  	call = f.nextCall("new manifest")
   788  	assert.Equal(t, "FROM iron/go:dev", call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   789  	assert.Equal(t, testyaml.SnackYAMLPostConfig, call.k8s().YAML)
   790  
   791  	// Since the manifest changed, we cleared the previous build state to force an image build
   792  	// (i.e. check that we called BuildAndDeploy with no pre-existing state)
   793  	assert.False(t, call.oneImageState().HasLastResult())
   794  
   795  	err := f.Stop()
   796  	assert.NoError(t, err)
   797  	f.assertAllBuildsConsumed()
   798  }
   799  
   800  func TestMultipleChangesOnlyDeployOneManifest(t *testing.T) {
   801  	f := newTestFixture(t)
   802  	f.useRealTiltfileLoader()
   803  
   804  	f.WriteFile("Tiltfile", `
   805  # ensure builds happen in deterministic order
   806  update_settings(max_parallel_updates=1)
   807  
   808  docker_build("gcr.io/windmill-public-containers/servantes/snack", "./snack", dockerfile="Dockerfile1")
   809  docker_build("gcr.io/windmill-public-containers/servantes/doggos", "./doggos", dockerfile="Dockerfile2")
   810  
   811  k8s_yaml(['snack.yaml', 'doggos.yaml'])
   812  k8s_resource('snack', new_name='baz')
   813  k8s_resource('doggos', new_name='quux')
   814  `)
   815  	f.WriteFile("snack.yaml", simpleYAML)
   816  	f.WriteFile("Dockerfile1", `FROM iron/go:prod`)
   817  	f.WriteFile("Dockerfile2", `FROM iron/go:prod`)
   818  	f.WriteFile("doggos.yaml", testyaml.DoggosDeploymentYaml)
   819  
   820  	f.loadAndStart()
   821  
   822  	// First call: with the old manifests
   823  	call := f.nextCall("old manifest (baz)")
   824  	assert.Equal(t, `FROM iron/go:prod`, call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   825  	assert.Equal(t, "baz", string(call.k8s().Name))
   826  
   827  	call = f.nextCall("old manifest (quux)")
   828  	assert.Equal(t, `FROM iron/go:prod`, call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   829  	assert.Equal(t, "quux", string(call.k8s().Name))
   830  
   831  	// rewrite the dockerfiles
   832  	f.WriteConfigFiles(
   833  		"Dockerfile1", `FROM iron/go:dev1`,
   834  		"Dockerfile2", "FROM iron/go:dev2")
   835  
   836  	// Builds triggered by config file changes
   837  	call = f.nextCall("manifest from config files (baz)")
   838  	assert.Equal(t, `FROM iron/go:dev1`, call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   839  	assert.Equal(t, "baz", string(call.k8s().Name))
   840  
   841  	call = f.nextCall("manifest from config files (quux)")
   842  	assert.Equal(t, `FROM iron/go:dev2`, call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   843  	assert.Equal(t, "quux", string(call.k8s().Name))
   844  
   845  	// Now change (only one) dockerfile
   846  	f.WriteConfigFiles("Dockerfile1", `FROM node:10`)
   847  
   848  	// Second call: one new manifest!
   849  	call = f.nextCall("changed config file --> new manifest")
   850  
   851  	assert.Equal(t, "baz", string(call.k8s().Name))
   852  	assert.ElementsMatch(t, []string{}, call.oneImageState().FilesChanged())
   853  
   854  	// Since the manifest changed, we cleared the previous build state to force an image build
   855  	assert.False(t, call.oneImageState().HasLastResult())
   856  
   857  	// Importantly the other manifest, quux, is _not_ called -- the DF change didn't affect its manifest
   858  	err := f.Stop()
   859  	assert.Nil(t, err)
   860  	f.assertAllBuildsConsumed()
   861  }
   862  
   863  func TestSecondResourceIsBuilt(t *testing.T) {
   864  	f := newTestFixture(t)
   865  	f.useRealTiltfileLoader()
   866  
   867  	f.WriteFile("Tiltfile", `
   868  docker_build("gcr.io/windmill-public-containers/servantes/snack", "./snack", dockerfile="Dockerfile1")
   869  
   870  k8s_yaml('snack.yaml')
   871  k8s_resource('snack', new_name='baz')  # rename "snack" --> "baz"
   872  `)
   873  	f.WriteFile("snack.yaml", simpleYAML)
   874  	f.WriteFile("Dockerfile1", `FROM iron/go:dev1`)
   875  	f.WriteFile("Dockerfile2", `FROM iron/go:dev2`)
   876  	f.WriteFile("doggos.yaml", testyaml.DoggosDeploymentYaml)
   877  
   878  	f.loadAndStart()
   879  
   880  	// First call: with one resource
   881  	call := f.nextCall("old manifest (baz)")
   882  	assert.Equal(t, "FROM iron/go:dev1", call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   883  	assert.Equal(t, "baz", string(call.k8s().Name))
   884  
   885  	f.assertNoCall()
   886  
   887  	// Now add a second resource
   888  	f.WriteConfigFiles("Tiltfile", `
   889  docker_build("gcr.io/windmill-public-containers/servantes/snack", "./snack", dockerfile="Dockerfile1")
   890  docker_build("gcr.io/windmill-public-containers/servantes/doggos", "./doggos", dockerfile="Dockerfile2")
   891  
   892  k8s_yaml(['snack.yaml', 'doggos.yaml'])
   893  k8s_resource('snack', new_name='baz')  # rename "snack" --> "baz"
   894  k8s_resource('doggos', new_name='quux')  # rename "doggos" --> "quux"
   895  `)
   896  
   897  	// Expect a build of quux, the new resource
   898  	call = f.nextCall("changed config file --> new manifest")
   899  	assert.Equal(t, "quux", string(call.k8s().Name))
   900  	assert.ElementsMatch(t, []string{}, call.oneImageState().FilesChanged())
   901  
   902  	err := f.Stop()
   903  	assert.Nil(t, err)
   904  	f.assertAllBuildsConsumed()
   905  }
   906  
   907  func TestConfigChange_NoOpChange(t *testing.T) {
   908  	f := newTestFixture(t)
   909  	f.useRealTiltfileLoader()
   910  
   911  	f.WriteFile("Tiltfile", `
   912  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile')
   913  k8s_yaml('snack.yaml')`)
   914  	f.WriteFile("Dockerfile", `FROM iron/go:dev1`)
   915  	f.WriteFile("snack.yaml", simpleYAML)
   916  	f.WriteFile("src/main.go", "hello")
   917  
   918  	f.loadAndStart()
   919  
   920  	// First call: with the old manifests
   921  	call := f.nextCall("initial call")
   922  	assert.Equal(t, "FROM iron/go:dev1", call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   923  	assert.Equal(t, "snack", string(call.k8s().Name))
   924  
   925  	// Write same contents to Dockerfile -- an "edit" event for a config file,
   926  	// but it doesn't change the manifest at all.
   927  	f.WriteConfigFiles("Dockerfile", `FROM iron/go:dev1`)
   928  	f.assertNoCall("Dockerfile hasn't changed, so there shouldn't be any builds")
   929  
   930  	// Second call: Editing the Dockerfile means we have to reevaluate the Tiltfile.
   931  	// Editing the random file means we have to do a rebuild. BUT! The Dockerfile
   932  	// hasn't changed, so the manifest hasn't changed, so we can do an incremental build.
   933  	changed := f.WriteFile("src/main.go", "goodbye")
   934  	f.fsWatcher.Events <- watch.NewFileEvent(changed)
   935  
   936  	call = f.nextCall("build from file change")
   937  	assert.Equal(t, "snack", string(call.k8s().Name))
   938  	assert.ElementsMatch(t, []string{
   939  		f.JoinPath("src/main.go"),
   940  	}, call.oneImageState().FilesChanged())
   941  	assert.True(t, call.oneImageState().HasLastResult(), "Unchanged manifest --> we do NOT clear the build state")
   942  
   943  	err := f.Stop()
   944  	assert.Nil(t, err)
   945  	f.assertAllBuildsConsumed()
   946  }
   947  
   948  func TestConfigChange_TiltfileErrorAndFixWithNoChanges(t *testing.T) {
   949  	f := newTestFixture(t)
   950  	f.useRealTiltfileLoader()
   951  
   952  	origTiltfile := `
   953  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile')
   954  k8s_yaml('snack.yaml')`
   955  	f.WriteFile("Tiltfile", origTiltfile)
   956  	f.WriteFile("Dockerfile", `FROM iron/go:dev`)
   957  	f.WriteFile("snack.yaml", simpleYAML)
   958  
   959  	f.loadAndStart()
   960  
   961  	// First call: all is well
   962  	_ = f.nextCall("first call")
   963  
   964  	// Second call: change Tiltfile, break manifest
   965  	f.WriteConfigFiles("Tiltfile", "broken")
   966  	f.WaitUntil("tiltfile error set", func(st store.EngineState) bool {
   967  		return st.LastMainTiltfileError() != nil
   968  	})
   969  	f.assertNoCall("Tiltfile error should prevent BuildAndDeploy from being called")
   970  
   971  	// Third call: put Tiltfile back. No change to manifest or to synced files, so expect no build.
   972  	f.WriteConfigFiles("Tiltfile", origTiltfile)
   973  	f.WaitUntil("tiltfile error cleared", func(st store.EngineState) bool {
   974  		return st.LastMainTiltfileError() == nil
   975  	})
   976  
   977  	f.withState(func(state store.EngineState) {
   978  		assert.Equal(t, "", buildcontrol.NextManifestNameToBuild(state).String())
   979  	})
   980  }
   981  
   982  func TestConfigChange_TiltfileErrorAndFixWithFileChange(t *testing.T) {
   983  	f := newTestFixture(t)
   984  	f.useRealTiltfileLoader()
   985  
   986  	tiltfileWithCmd := func(cmd string) string {
   987  		return fmt.Sprintf(`
   988  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile',
   989      live_update=[
   990          sync('./src', '/src'),
   991          run('%s')
   992      ]
   993  )
   994  k8s_yaml('snack.yaml')
   995  `, cmd)
   996  	}
   997  
   998  	f.WriteFile("Tiltfile", tiltfileWithCmd("original"))
   999  	f.WriteFile("Dockerfile", `FROM iron/go:dev`)
  1000  	f.WriteFile("snack.yaml", simpleYAML)
  1001  
  1002  	f.loadAndStart()
  1003  
  1004  	// First call: all is well
  1005  	_ = f.nextCall("first call")
  1006  
  1007  	// Second call: change Tiltfile, break manifest
  1008  	f.WriteConfigFiles("Tiltfile", "broken")
  1009  	f.WaitUntil("tiltfile error set", func(st store.EngineState) bool {
  1010  		return st.LastMainTiltfileError() != nil
  1011  	})
  1012  
  1013  	f.assertNoCall("Tiltfile error should prevent BuildAndDeploy from being called")
  1014  
  1015  	// Third call: put Tiltfile back. manifest changed, so expect a build
  1016  	f.WriteConfigFiles("Tiltfile", tiltfileWithCmd("changed"))
  1017  
  1018  	call := f.nextCall("fixed broken config and rebuilt manifest")
  1019  	assert.False(t, call.oneImageState().HasLastResult(),
  1020  		"expected this call to have NO image (since we should have cleared it to force an image build)")
  1021  
  1022  	f.WaitUntil("tiltfile error cleared", func(state store.EngineState) bool {
  1023  		return state.LastMainTiltfileError() == nil
  1024  	})
  1025  
  1026  	f.withManifestTarget("snack", func(mt store.ManifestTarget) {
  1027  		assert.Equal(t,
  1028  			model.ToUnixCmd("changed").Argv,
  1029  			mt.Manifest.ImageTargetAt(0).LiveUpdateSpec.Execs[0].Args,
  1030  			"Tiltfile change should have propagated to manifest")
  1031  	})
  1032  
  1033  	err := f.Stop()
  1034  	assert.Nil(t, err)
  1035  	f.assertAllBuildsConsumed()
  1036  }
  1037  
  1038  func TestConfigChange_TriggerModeChangePropagatesButDoesntInvalidateBuild(t *testing.T) {
  1039  	f := newTestFixture(t)
  1040  	f.useRealTiltfileLoader()
  1041  
  1042  	origTiltfile := `
  1043  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile')
  1044  k8s_yaml('snack.yaml')`
  1045  	f.WriteFile("Tiltfile", origTiltfile)
  1046  	f.WriteFile("Dockerfile", `FROM iron/go:dev1`)
  1047  	f.WriteFile("snack.yaml", simpleYAML)
  1048  
  1049  	f.loadAndStart()
  1050  
  1051  	_ = f.nextCall("initial build")
  1052  	f.WaitUntilManifest("manifest has triggerMode = auto (default)", "snack", func(mt store.ManifestTarget) bool {
  1053  		return mt.Manifest.TriggerMode == model.TriggerModeAuto
  1054  	})
  1055  
  1056  	// Update Tiltfile to change the trigger mode of the manifest
  1057  	tiltfileWithTriggerMode := fmt.Sprintf(`%s
  1058  
  1059  trigger_mode(TRIGGER_MODE_MANUAL)`, origTiltfile)
  1060  	f.WriteConfigFiles("Tiltfile", tiltfileWithTriggerMode)
  1061  
  1062  	f.assertNoCall("A change to TriggerMode shouldn't trigger an update (doesn't invalidate current build)")
  1063  	f.WaitUntilManifest("triggerMode has changed on manifest", "snack", func(mt store.ManifestTarget) bool {
  1064  		return mt.Manifest.TriggerMode == model.TriggerModeManualWithAutoInit
  1065  	})
  1066  
  1067  	err := f.Stop()
  1068  	assert.Nil(t, err)
  1069  	f.assertAllBuildsConsumed()
  1070  }
  1071  
  1072  func TestConfigChange_ManifestWithPendingChangesBuildsIfTriggerModeChangedToAuto(t *testing.T) {
  1073  	f := newTestFixture(t)
  1074  	f.useRealTiltfileLoader()
  1075  
  1076  	baseTiltfile := `trigger_mode(%s)
  1077  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile')
  1078  k8s_yaml('snack.yaml')`
  1079  	triggerManualTiltfile := fmt.Sprintf(baseTiltfile, "TRIGGER_MODE_MANUAL")
  1080  	f.WriteFile("Tiltfile", triggerManualTiltfile)
  1081  	f.WriteFile("Dockerfile", `FROM iron/go:dev1`)
  1082  	f.WriteFile("snack.yaml", simpleYAML)
  1083  
  1084  	f.loadAndStart()
  1085  
  1086  	// First call: with the old manifests
  1087  	_ = f.nextCall("initial build")
  1088  	var imageTargetID model.TargetID
  1089  	f.WaitUntilManifest("manifest has triggerMode = manual_after_initial", "snack", func(mt store.ManifestTarget) bool {
  1090  		imageTargetID = mt.Manifest.ImageTargetAt(0).ID() // grab for later
  1091  		return mt.Manifest.TriggerMode == model.TriggerModeManualWithAutoInit
  1092  	})
  1093  
  1094  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("src/main.go"))
  1095  	f.WaitUntil("pending change appears", func(st store.EngineState) bool {
  1096  		return len(st.BuildStatus(imageTargetID).PendingFileChanges) > 0
  1097  	})
  1098  	f.assertNoCall("even tho there are pending changes, manual manifest shouldn't build w/o explicit trigger")
  1099  
  1100  	// Update Tiltfile to change the trigger mode of the manifest
  1101  	triggerAutoTiltfile := fmt.Sprintf(baseTiltfile, "TRIGGER_MODE_AUTO")
  1102  	f.WriteConfigFiles("Tiltfile", triggerAutoTiltfile)
  1103  
  1104  	call := f.nextCall("manifest updated b/c it's now TriggerModeAuto")
  1105  	assert.True(t, call.oneImageState().HasLastResult(),
  1106  		"we did NOT clear the build state (b/c a change to Manifest.TriggerMode does NOT invalidate the build")
  1107  	f.WaitUntilManifest("triggerMode has changed on manifest", "snack", func(mt store.ManifestTarget) bool {
  1108  		return mt.Manifest.TriggerMode == model.TriggerModeAuto
  1109  	})
  1110  	f.WaitUntil("manifest is no longer in trigger queue", func(st store.EngineState) bool {
  1111  		return len(st.TriggerQueue) == 0
  1112  	})
  1113  
  1114  	err := f.Stop()
  1115  	assert.Nil(t, err)
  1116  	f.assertAllBuildsConsumed()
  1117  }
  1118  
  1119  func TestConfigChange_ManifestIncludingInitialBuildsIfTriggerModeChangedToManualAfterInitial(t *testing.T) {
  1120  	f := newTestFixture(t)
  1121  
  1122  	foo := f.newManifest("foo").WithTriggerMode(model.TriggerModeManual)
  1123  	bar := f.newManifest("bar")
  1124  
  1125  	f.Start([]model.Manifest{foo, bar})
  1126  
  1127  	// foo should be skipped, and just bar built
  1128  	call := f.nextCallComplete("initial build")
  1129  	require.Equal(t, bar.ImageTargetAt(0), call.firstImgTarg())
  1130  
  1131  	// since foo is "Manual", it should not be built on startup
  1132  	// make sure there's nothing waiting to build
  1133  	f.withState(func(state store.EngineState) {
  1134  		n := buildcontrol.NextManifestNameToBuild(state)
  1135  		require.Equal(t, model.ManifestName(""), n)
  1136  	})
  1137  
  1138  	// change the trigger mode
  1139  	foo = foo.WithTriggerMode(model.TriggerModeManualWithAutoInit)
  1140  	f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{
  1141  		Name:       model.MainTiltfileManifestName,
  1142  		FinishTime: f.Now(),
  1143  		Manifests:  []model.Manifest{foo, bar},
  1144  	})
  1145  
  1146  	// now that it is a trigger mode that should build on startup, a build should kick off
  1147  	// even though we didn't trigger anything
  1148  	call = f.nextCallComplete("second build")
  1149  	require.Equal(t, foo.ImageTargetAt(0), call.firstImgTarg())
  1150  
  1151  	err := f.Stop()
  1152  	assert.Nil(t, err)
  1153  	f.assertAllBuildsConsumed()
  1154  }
  1155  
  1156  func TestConfigChange_FilenamesLoggedInManifestBuild(t *testing.T) {
  1157  	f := newTestFixture(t)
  1158  	f.useRealTiltfileLoader()
  1159  
  1160  	f.WriteFile("Tiltfile", `
  1161  k8s_yaml('snack.yaml')
  1162  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src')`)
  1163  	f.WriteFile("src/Dockerfile", `FROM iron/go:dev`)
  1164  	f.WriteFile("snack.yaml", simpleYAML)
  1165  
  1166  	f.loadAndStart()
  1167  
  1168  	f.WaitUntilManifestState("snack loaded", "snack", func(ms store.ManifestState) bool {
  1169  		return len(ms.BuildHistory) == 1
  1170  	})
  1171  
  1172  	// make a config file change to kick off a new build
  1173  	f.WriteFile("Tiltfile", `
  1174  k8s_yaml('snack.yaml')
  1175  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', ignore='Dockerfile')`)
  1176  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile"))
  1177  
  1178  	f.WaitUntilManifestState("snack reloaded", "snack", func(ms store.ManifestState) bool {
  1179  		return len(ms.BuildHistory) == 2
  1180  	})
  1181  
  1182  	f.withState(func(es store.EngineState) {
  1183  		expected := fmt.Sprintf("1 File Changed: [%s]", f.JoinPath("Tiltfile"))
  1184  		require.Contains(t, es.LogStore.ManifestLog("snack"), expected)
  1185  	})
  1186  
  1187  	err := f.Stop()
  1188  	assert.Nil(t, err)
  1189  }
  1190  
  1191  func TestConfigChange_LocalResourceChange(t *testing.T) {
  1192  	f := newTestFixture(t)
  1193  	f.useRealTiltfileLoader()
  1194  
  1195  	f.WriteFile("Tiltfile", `print('tiltfile 1')
  1196  local_resource('local', 'echo one fish two fish', deps='foo.bar')`)
  1197  
  1198  	f.loadAndStart()
  1199  
  1200  	// First call: with the old manifests
  1201  	call := f.nextCall("initial call")
  1202  	assert.Equal(t, "local", string(call.local().Name))
  1203  	assert.Equal(t, "echo one fish two fish", model.ArgListToString(call.local().UpdateCmdSpec.Args))
  1204  
  1205  	// Change the definition of the resource -- this changes the manifest which should trigger an updated
  1206  	f.WriteConfigFiles("Tiltfile", `print('tiltfile 2')
  1207  local_resource('local', 'echo red fish blue fish', deps='foo.bar')`)
  1208  	call = f.nextCall("rebuild from config change")
  1209  	assert.Equal(t, "echo red fish blue fish", model.ArgListToString(call.local().UpdateCmdSpec.Args))
  1210  
  1211  	err := f.Stop()
  1212  	assert.Nil(t, err)
  1213  	f.assertAllBuildsConsumed()
  1214  }
  1215  
  1216  func TestDockerRebuildWithChangedFiles(t *testing.T) {
  1217  	f := newTestFixture(t)
  1218  	df := `FROM golang
  1219  ADD ./ ./
  1220  go build ./...
  1221  `
  1222  	manifest := f.newManifest("foobar")
  1223  	iTarget := manifest.ImageTargetAt(0).
  1224  		WithLiveUpdateSpec("foobar", v1alpha1.LiveUpdateSpec{}).
  1225  		WithDockerImage(v1alpha1.DockerImageSpec{
  1226  			DockerfileContents: df,
  1227  			Context:            f.Path(),
  1228  		})
  1229  	manifest = manifest.WithImageTarget(iTarget)
  1230  
  1231  	f.Start([]model.Manifest{manifest})
  1232  
  1233  	call := f.nextCallComplete("first build")
  1234  	assert.True(t, call.oneImageState().IsEmpty())
  1235  
  1236  	// Simulate a change to main.go
  1237  	mainPath := filepath.Join(f.Path(), "main.go")
  1238  	f.fsWatcher.Events <- watch.NewFileEvent(mainPath)
  1239  
  1240  	// Check that this triggered a rebuild.
  1241  	call = f.nextCallComplete("rebuild triggered")
  1242  	assert.Equal(t, []string{mainPath}, call.oneImageState().FilesChanged())
  1243  
  1244  	err := f.Stop()
  1245  	assert.NoError(t, err)
  1246  	f.assertAllBuildsConsumed()
  1247  }
  1248  
  1249  func TestHudUpdated(t *testing.T) {
  1250  	f := newTestFixture(t)
  1251  
  1252  	manifest := f.newManifest("foobar")
  1253  
  1254  	f.Start([]model.Manifest{manifest})
  1255  	call := f.nextCall()
  1256  	assert.True(t, call.oneImageState().IsEmpty())
  1257  
  1258  	f.WaitUntilHUD("hud update", func(v view.View) bool {
  1259  		return len(v.Resources) == 2
  1260  	})
  1261  
  1262  	err := f.Stop()
  1263  	assert.Equal(t, nil, err)
  1264  
  1265  	assert.Equal(t, 2, len(f.fakeHud().LastView.Resources))
  1266  	assert.Equal(t, store.MainTiltfileManifestName, f.fakeHud().LastView.Resources[0].Name)
  1267  	rv := f.fakeHud().LastView.Resources[1]
  1268  	assert.Equal(t, manifest.Name, rv.Name)
  1269  	f.assertAllBuildsConsumed()
  1270  }
  1271  
  1272  func TestDisabledHudUpdated(t *testing.T) {
  1273  	if runtime.GOOS == "windows" {
  1274  		t.Skip("TODO(nick): Investigate")
  1275  	}
  1276  	f := newTestFixture(t)
  1277  
  1278  	manifest := f.newManifest("foobar")
  1279  	opt := func(ia InitAction) InitAction {
  1280  		ia.TerminalMode = store.TerminalModeStream
  1281  		return ia
  1282  	}
  1283  
  1284  	f.Start([]model.Manifest{manifest}, opt)
  1285  	call := f.nextCall()
  1286  	assert.True(t, call.oneImageState().IsEmpty())
  1287  
  1288  	// Make sure we're done logging stuff, then grab # processed bytes
  1289  	f.WaitUntil("foobar logs appear", func(es store.EngineState) bool {
  1290  		return strings.Contains(f.log.String(), "Initial Build")
  1291  	})
  1292  
  1293  	assert.True(t, f.ts.ProcessedLogs > 0)
  1294  	oldCheckpoint := f.ts.ProcessedLogs
  1295  
  1296  	// Log something new, make sure it's reflected
  1297  	msg := []byte("hello world!\n")
  1298  	f.store.Dispatch(store.NewGlobalLogAction(logger.InfoLvl, msg))
  1299  
  1300  	f.WaitUntil("hello world logs appear", func(es store.EngineState) bool {
  1301  		return strings.Contains(f.log.String(), "hello world!")
  1302  	})
  1303  
  1304  	assert.True(t, f.ts.ProcessedLogs > oldCheckpoint)
  1305  
  1306  	err := f.Stop()
  1307  	assert.Equal(t, nil, err)
  1308  
  1309  	f.assertAllBuildsConsumed()
  1310  
  1311  }
  1312  
  1313  func TestPodEvent(t *testing.T) {
  1314  	f := newTestFixture(t)
  1315  	manifest := f.newManifest("foobar")
  1316  	pb := f.registerForDeployer(manifest)
  1317  	f.Start([]model.Manifest{manifest})
  1318  
  1319  	call := f.nextCall()
  1320  	assert.True(t, call.oneImageState().IsEmpty())
  1321  
  1322  	pod := pb.WithPhase("CrashLoopBackOff").Build()
  1323  	f.podEvent(pod)
  1324  
  1325  	f.WaitUntilHUDResource("hud update", "foobar", func(res view.Resource) bool {
  1326  		return res.K8sInfo().PodName == pod.Name
  1327  	})
  1328  
  1329  	rv := f.hudResource("foobar")
  1330  	assert.Equal(t, pod.Name, rv.K8sInfo().PodName)
  1331  	assert.Equal(t, "CrashLoopBackOff", rv.K8sInfo().PodStatus)
  1332  
  1333  	assert.NoError(t, f.Stop())
  1334  	f.assertAllBuildsConsumed()
  1335  }
  1336  
  1337  func TestPodEventContainerStatus(t *testing.T) {
  1338  	f := newTestFixture(t)
  1339  	manifest := f.newManifest("foobar")
  1340  	pb := f.registerForDeployer(manifest)
  1341  	f.Start([]model.Manifest{manifest})
  1342  
  1343  	var ref reference.NamedTagged
  1344  	f.WaitUntilManifestState("image appears", "foobar", func(ms store.ManifestState) bool {
  1345  		result := ms.BuildStatus(manifest.ImageTargetAt(0).ID()).LastResult
  1346  		ref, _ = container.ParseNamedTagged(store.ClusterImageRefFromBuildResult(result))
  1347  		return ref != nil
  1348  	})
  1349  
  1350  	pod := pb.Build()
  1351  	pod.Status = k8s.FakePodStatus(ref, "Running")
  1352  	pod.Status.ContainerStatuses[0].ContainerID = ""
  1353  	pod.Spec = k8s.FakePodSpec(ref)
  1354  	f.podEvent(pod)
  1355  
  1356  	podState := v1alpha1.Pod{}
  1357  	f.WaitUntilManifestState("container status", "foobar", func(ms store.ManifestState) bool {
  1358  		podState = ms.MostRecentPod()
  1359  		return podState.Name == pod.Name && len(podState.Containers) > 0
  1360  	})
  1361  
  1362  	container := podState.Containers[0]
  1363  	assert.Equal(t, "", container.ID)
  1364  	assert.Equal(t, "main", container.Name)
  1365  	assert.Equal(t, []int32{8080}, container.Ports)
  1366  
  1367  	err := f.Stop()
  1368  	assert.Nil(t, err)
  1369  }
  1370  
  1371  func TestPodEventContainerStatusWithoutImage(t *testing.T) {
  1372  	f := newTestFixture(t)
  1373  	manifest := model.Manifest{
  1374  		Name: model.ManifestName("foobar"),
  1375  	}.WithDeployTarget(k8s.MustTarget("foobar", SanchoYAML))
  1376  	pb := f.registerForDeployer(manifest)
  1377  	ref := container.MustParseNamedTagged("dockerhub/we-didnt-build-this:foo")
  1378  	f.Start([]model.Manifest{manifest})
  1379  
  1380  	f.WaitUntilManifestState("first build complete", "foobar", func(ms store.ManifestState) bool {
  1381  		return len(ms.BuildHistory) > 0
  1382  	})
  1383  
  1384  	pod := pb.Build()
  1385  	pod.Status = k8s.FakePodStatus(ref, "Running")
  1386  
  1387  	// If we have no image target to match container status by image ref,
  1388  	// we should just take the first one, i.e. this one
  1389  	pod.Status.ContainerStatuses[0].Name = "first-container"
  1390  	pod.Status.ContainerStatuses[0].ContainerID = "docker://great-container-id"
  1391  
  1392  	pod.Spec = v1.PodSpec{
  1393  		Containers: []v1.Container{
  1394  			{
  1395  				Name:  "second-container",
  1396  				Image: "gcr.io/windmill-public-containers/tilt-synclet:latest",
  1397  				Ports: []v1.ContainerPort{{ContainerPort: 9999}},
  1398  			},
  1399  			// we match container spec by NAME, so we'll get this one even tho it comes second.
  1400  			{
  1401  				Name:  "first-container",
  1402  				Image: ref.Name(),
  1403  				Ports: []v1.ContainerPort{{ContainerPort: 8080}},
  1404  			},
  1405  		},
  1406  	}
  1407  
  1408  	f.podEvent(pod)
  1409  
  1410  	podState := v1alpha1.Pod{}
  1411  	f.WaitUntilManifestState("container status", "foobar", func(ms store.ManifestState) bool {
  1412  		podState = ms.MostRecentPod()
  1413  		return podState.Name == pod.Name && len(podState.Containers) > 0
  1414  	})
  1415  
  1416  	// If we have no image target to match container by image ref, we just take the first one
  1417  	container := podState.Containers[0]
  1418  	assert.Equal(t, "great-container-id", container.ID)
  1419  	assert.Equal(t, "first-container", container.Name)
  1420  	assert.Equal(t, []int32{8080}, store.AllPodContainerPorts(podState))
  1421  
  1422  	err := f.Stop()
  1423  	assert.Nil(t, err)
  1424  }
  1425  
  1426  func TestPodEventUpdateByTimestamp(t *testing.T) {
  1427  	f := newTestFixture(t)
  1428  	manifest := f.newManifest("foobar")
  1429  	pb := f.registerForDeployer(manifest)
  1430  	f.Start([]model.Manifest{manifest})
  1431  
  1432  	call := f.nextCall()
  1433  	assert.True(t, call.oneImageState().IsEmpty())
  1434  
  1435  	firstCreationTime := f.Now()
  1436  	pod := pb.
  1437  		WithCreationTime(firstCreationTime).
  1438  		WithPhase("CrashLoopBackOff").
  1439  		Build()
  1440  	f.podEvent(pod)
  1441  	f.WaitUntilHUDResource("hud update crash", "foobar", func(res view.Resource) bool {
  1442  		return res.K8sInfo().PodStatus == "CrashLoopBackOff"
  1443  	})
  1444  
  1445  	pb = podbuilder.New(t, manifest).
  1446  		WithPodName("my-new-pod").
  1447  		WithCreationTime(firstCreationTime.Add(time.Minute * 2))
  1448  	newPod := pb.Build()
  1449  	f.podEvent(newPod)
  1450  	f.WaitUntilHUDResource("hud update running", "foobar", func(res view.Resource) bool {
  1451  		return res.K8sInfo().PodStatus == "Running"
  1452  	})
  1453  
  1454  	rv := f.hudResource("foobar")
  1455  	assert.Equal(t, newPod.Name, rv.K8sInfo().PodName)
  1456  	assert.Equal(t, "Running", rv.K8sInfo().PodStatus)
  1457  
  1458  	assert.NoError(t, f.Stop())
  1459  	f.assertAllBuildsConsumed()
  1460  }
  1461  
  1462  func TestPodForgottenOnDisable(t *testing.T) {
  1463  	f := newTestFixture(t)
  1464  	manifest := f.newManifest("foobar")
  1465  	pb := f.registerForDeployer(manifest)
  1466  	f.Start([]model.Manifest{manifest})
  1467  
  1468  	call := f.nextCall()
  1469  	assert.True(t, call.oneImageState().IsEmpty())
  1470  
  1471  	pod := pb.WithPhase("CrashLoopBackOff").Build()
  1472  	f.podEvent(pod)
  1473  
  1474  	f.WaitUntilManifestState("pod seen", "foobar", func(ms store.ManifestState) bool {
  1475  		return ms.K8sRuntimeState().MostRecentPod().Status == "CrashLoopBackOff"
  1476  	})
  1477  
  1478  	f.setDisableState("foobar", true)
  1479  
  1480  	f.WaitUntilManifestState("pod unseen", "foobar", func(ms store.ManifestState) bool {
  1481  		return ms.K8sRuntimeState().PodLen() == 0
  1482  	})
  1483  
  1484  	assert.NoError(t, f.Stop())
  1485  	f.assertAllBuildsConsumed()
  1486  }
  1487  
  1488  func TestPodEventUpdateByPodName(t *testing.T) {
  1489  	f := newTestFixture(t)
  1490  	manifest := f.newManifest("foobar")
  1491  	pb := f.registerForDeployer(manifest)
  1492  	f.Start([]model.Manifest{manifest})
  1493  
  1494  	call := f.nextCallComplete()
  1495  	assert.True(t, call.oneImageState().IsEmpty())
  1496  
  1497  	creationTime := f.Now()
  1498  	pb = pb.
  1499  		WithCreationTime(creationTime).
  1500  		WithPhase("CrashLoopBackOff")
  1501  	f.podEvent(pb.Build())
  1502  
  1503  	f.WaitUntilHUDResource("pod crashes", "foobar", func(res view.Resource) bool {
  1504  		return res.K8sInfo().PodStatus == "CrashLoopBackOff"
  1505  	})
  1506  
  1507  	f.podEvent(pb.WithPhase("Running").Build())
  1508  
  1509  	f.WaitUntilHUDResource("pod comes back", "foobar", func(res view.Resource) bool {
  1510  		return res.K8sInfo().PodStatus == "Running"
  1511  	})
  1512  
  1513  	rv := f.hudResource("foobar")
  1514  	assert.Equal(t, pb.Build().Name, rv.K8sInfo().PodName)
  1515  	assert.Equal(t, "Running", rv.K8sInfo().PodStatus)
  1516  
  1517  	err := f.Stop()
  1518  	if err != nil {
  1519  		t.Fatal(err)
  1520  	}
  1521  
  1522  	f.assertAllBuildsConsumed()
  1523  }
  1524  
  1525  func TestPodEventIgnoreOlderPod(t *testing.T) {
  1526  	f := newTestFixture(t)
  1527  	manifest := f.newManifest("foobar")
  1528  	pb := f.registerForDeployer(manifest)
  1529  	f.Start([]model.Manifest{manifest})
  1530  
  1531  	call := f.nextCall()
  1532  	assert.True(t, call.oneImageState().IsEmpty())
  1533  
  1534  	creationTime := f.Now()
  1535  	pb = pb.
  1536  		WithPodName("my-new-pod").
  1537  		WithPhase("CrashLoopBackOff").
  1538  		WithCreationTime(creationTime)
  1539  	pod := pb.Build()
  1540  	f.podEvent(pod)
  1541  	f.WaitUntilHUDResource("hud update", "foobar", func(res view.Resource) bool {
  1542  		return res.K8sInfo().PodStatus == "CrashLoopBackOff"
  1543  	})
  1544  
  1545  	pb = pb.WithCreationTime(creationTime.Add(time.Minute * -1))
  1546  	oldPod := pb.Build()
  1547  	f.podEvent(oldPod)
  1548  	time.Sleep(10 * time.Millisecond)
  1549  
  1550  	assert.NoError(t, f.Stop())
  1551  	f.assertAllBuildsConsumed()
  1552  
  1553  	rv := f.hudResource("foobar")
  1554  	assert.Equal(t, pod.Name, rv.K8sInfo().PodName)
  1555  	assert.Equal(t, "CrashLoopBackOff", rv.K8sInfo().PodStatus)
  1556  }
  1557  
  1558  func TestPodContainerStatus(t *testing.T) {
  1559  	f := newTestFixture(t)
  1560  	manifest := f.newManifest("fe")
  1561  	pb := f.registerForDeployer(manifest)
  1562  	f.Start([]model.Manifest{manifest})
  1563  
  1564  	_ = f.nextCall()
  1565  
  1566  	var ref reference.NamedTagged
  1567  	f.WaitUntilManifestState("image appears", "fe", func(ms store.ManifestState) bool {
  1568  		result := ms.BuildStatus(manifest.ImageTargetAt(0).ID()).LastResult
  1569  		ref, _ = container.ParseNamedTagged(store.ClusterImageRefFromBuildResult(result))
  1570  		return ref != nil
  1571  	})
  1572  
  1573  	startedAt := f.Now()
  1574  	pb = pb.WithCreationTime(startedAt)
  1575  	pod := pb.Build()
  1576  	f.podEvent(pod)
  1577  	f.WaitUntilManifestState("pod appears", "fe", func(ms store.ManifestState) bool {
  1578  		return ms.MostRecentPod().Name == pod.Name
  1579  	})
  1580  
  1581  	pod = pb.Build()
  1582  	pod.Spec = k8s.FakePodSpec(ref)
  1583  	pod.Status = k8s.FakePodStatus(ref, "Running")
  1584  	f.podEvent(pod)
  1585  
  1586  	f.WaitUntilManifestState("container is ready", "fe", func(ms store.ManifestState) bool {
  1587  		ports := store.AllPodContainerPorts(ms.MostRecentPod())
  1588  		return len(ports) == 1 && ports[0] == 8080
  1589  	})
  1590  
  1591  	err := f.Stop()
  1592  	assert.NoError(t, err)
  1593  
  1594  	f.assertAllBuildsConsumed()
  1595  }
  1596  
  1597  func TestUpper_WatchDockerIgnoredFiles(t *testing.T) {
  1598  	f := newTestFixture(t)
  1599  	manifest := f.newManifest("foobar")
  1600  	manifest = manifest.WithImageTarget(manifest.ImageTargetAt(0).
  1601  		WithIgnores([]v1alpha1.IgnoreDef{
  1602  			{
  1603  				BasePath: f.Path(),
  1604  				Patterns: []string{"dignore.txt"},
  1605  			},
  1606  		}))
  1607  
  1608  	f.Start([]model.Manifest{manifest})
  1609  
  1610  	call := f.nextCall()
  1611  	assert.Equal(t, manifest.ImageTargetAt(0), call.firstImgTarg())
  1612  
  1613  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("dignore.txt"))
  1614  	f.assertNoCall("event for ignored file should not trigger build")
  1615  
  1616  	err := f.Stop()
  1617  	assert.NoError(t, err)
  1618  	f.assertAllBuildsConsumed()
  1619  }
  1620  
  1621  func TestUpper_ShowErrorPodLog(t *testing.T) {
  1622  	f := newTestFixture(t)
  1623  
  1624  	name := model.ManifestName("foobar")
  1625  	manifest := f.newManifest(name.String())
  1626  	pb := f.registerForDeployer(manifest)
  1627  
  1628  	f.Start([]model.Manifest{manifest})
  1629  	f.waitForCompletedBuildCount(1)
  1630  
  1631  	pod := pb.Build()
  1632  	f.startPod(pod, name)
  1633  	f.podLog(pod, name, "first string")
  1634  
  1635  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("go/a"))
  1636  
  1637  	f.waitForCompletedBuildCount(2)
  1638  	f.podLog(pod, name, "second string")
  1639  
  1640  	f.withState(func(state store.EngineState) {
  1641  		ms, _ := state.ManifestState(name)
  1642  		spanID := k8sconv.SpanIDForPod(name, k8s.PodID(ms.MostRecentPod().Name))
  1643  		assert.Equal(t, "first string\nsecond string\n", state.LogStore.SpanLog(spanID))
  1644  	})
  1645  
  1646  	err := f.Stop()
  1647  	assert.NoError(t, err)
  1648  }
  1649  
  1650  func TestUpperPodLogInCrashLoopThirdInstanceStillUp(t *testing.T) {
  1651  	f := newTestFixture(t)
  1652  
  1653  	name := model.ManifestName("foobar")
  1654  	manifest := f.newManifest(name.String())
  1655  	pb := f.registerForDeployer(manifest)
  1656  
  1657  	f.Start([]model.Manifest{manifest})
  1658  	f.waitForCompletedBuildCount(1)
  1659  
  1660  	f.startPod(pb.Build(), name)
  1661  	f.podLog(pb.Build(), name, "first string")
  1662  	pb = f.restartPod(pb)
  1663  	f.podLog(pb.Build(), name, "second string")
  1664  	pb = f.restartPod(pb)
  1665  	f.podLog(pb.Build(), name, "third string")
  1666  
  1667  	// the third instance is still up, so we want to show the log from the last crashed pod plus the log from the current pod
  1668  	f.withState(func(es store.EngineState) {
  1669  		ms, _ := es.ManifestState(name)
  1670  		spanID := k8sconv.SpanIDForPod(name, k8s.PodID(ms.MostRecentPod().Name))
  1671  		assert.Contains(t, es.LogStore.SpanLog(spanID), "third string\n")
  1672  		assert.Contains(t, es.LogStore.ManifestLog(name), "second string\n")
  1673  		assert.Contains(t, es.LogStore.ManifestLog(name), "third string\n")
  1674  		assert.Contains(t, es.LogStore.ManifestLog(name),
  1675  			"WARNING: Detected container restart. Pod: foobar-fakePodID. Container: sancho.\n")
  1676  		assert.Contains(t, es.LogStore.SpanLog(spanID), "third string\n")
  1677  	})
  1678  
  1679  	err := f.Stop()
  1680  	assert.NoError(t, err)
  1681  }
  1682  
  1683  func TestUpperPodLogInCrashLoopPodCurrentlyDown(t *testing.T) {
  1684  	f := newTestFixture(t)
  1685  
  1686  	name := model.ManifestName("foobar")
  1687  	manifest := f.newManifest(name.String())
  1688  	pb := f.registerForDeployer(manifest)
  1689  
  1690  	f.Start([]model.Manifest{manifest})
  1691  	f.waitForCompletedBuildCount(1)
  1692  
  1693  	f.startPod(pb.Build(), name)
  1694  	f.podLog(pb.Build(), name, "first string")
  1695  	pb = f.restartPod(pb)
  1696  	f.podLog(pb.Build(), name, "second string")
  1697  
  1698  	pod := pb.Build()
  1699  	pod.Status.ContainerStatuses[0].Ready = false
  1700  	f.notifyAndWaitForPodStatus(pod, name, func(pod v1alpha1.Pod) bool {
  1701  		return !store.AllPodContainersReady(pod)
  1702  	})
  1703  
  1704  	f.withState(func(state store.EngineState) {
  1705  		ms, _ := state.ManifestState(name)
  1706  		spanID := k8sconv.SpanIDForPod(name, k8s.PodID(ms.MostRecentPod().Name))
  1707  		assert.Equal(t, "first string\nWARNING: Detected container restart. Pod: foobar-fakePodID. Container: sancho.\nsecond string\n",
  1708  			state.LogStore.SpanLog(spanID))
  1709  	})
  1710  
  1711  	err := f.Stop()
  1712  	assert.NoError(t, err)
  1713  }
  1714  
  1715  func TestUpperRecordPodWithMultipleContainers(t *testing.T) {
  1716  	f := newTestFixture(t)
  1717  
  1718  	name := model.ManifestName("foobar")
  1719  	manifest := f.newManifest(name.String())
  1720  	pb := f.registerForDeployer(manifest)
  1721  
  1722  	f.Start([]model.Manifest{manifest})
  1723  	f.waitForCompletedBuildCount(1)
  1724  
  1725  	pod := pb.Build()
  1726  	pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, v1.ContainerStatus{
  1727  		Name:        "sidecar",
  1728  		Image:       "sidecar-image",
  1729  		Ready:       false,
  1730  		ContainerID: "docker://sidecar",
  1731  	})
  1732  
  1733  	f.startPod(pod, manifest.Name)
  1734  	f.notifyAndWaitForPodStatus(pod, manifest.Name, func(pod v1alpha1.Pod) bool {
  1735  		if len(pod.Containers) != 2 {
  1736  			return false
  1737  		}
  1738  
  1739  		c1 := pod.Containers[0]
  1740  		require.Equal(t, container.Name("sancho").String(), c1.Name)
  1741  		require.Equal(t, podbuilder.FakeContainerID().String(), c1.ID)
  1742  		require.True(t, c1.Ready)
  1743  
  1744  		c2 := pod.Containers[1]
  1745  		require.Equal(t, container.Name("sidecar").String(), c2.Name)
  1746  		require.Equal(t, container.ID("sidecar").String(), c2.ID)
  1747  		require.False(t, c2.Ready)
  1748  
  1749  		return true
  1750  	})
  1751  
  1752  	err := f.Stop()
  1753  	assert.NoError(t, err)
  1754  }
  1755  
  1756  func TestUpperProcessOtherContainersIfOneErrors(t *testing.T) {
  1757  	f := newTestFixture(t)
  1758  
  1759  	name := model.ManifestName("foobar")
  1760  	manifest := f.newManifest(name.String())
  1761  	pb := f.registerForDeployer(manifest)
  1762  
  1763  	f.Start([]model.Manifest{manifest})
  1764  	f.waitForCompletedBuildCount(1)
  1765  
  1766  	pod := pb.Build()
  1767  	pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, v1.ContainerStatus{
  1768  		Name:  "extra1",
  1769  		Image: "extra1-image",
  1770  		Ready: false,
  1771  		// when populating container info for this pod, we'll error when we try to parse
  1772  		// this cID -- we should still populate info for the other containers, though.
  1773  		ContainerID: "malformed",
  1774  	}, v1.ContainerStatus{
  1775  		Name:        "extra2",
  1776  		Image:       "extra2-image",
  1777  		Ready:       false,
  1778  		ContainerID: "docker://extra2",
  1779  	})
  1780  
  1781  	f.startPod(pod, manifest.Name)
  1782  	f.notifyAndWaitForPodStatus(pod, manifest.Name, func(pod v1alpha1.Pod) bool {
  1783  		if len(pod.Containers) != 2 {
  1784  			return false
  1785  		}
  1786  
  1787  		require.Equal(t, container.Name("sancho").String(), pod.Containers[0].Name)
  1788  		require.Equal(t, container.Name("extra2").String(), pod.Containers[1].Name)
  1789  
  1790  		return true
  1791  	})
  1792  
  1793  	err := f.Stop()
  1794  	assert.NoError(t, err)
  1795  }
  1796  
  1797  func TestUpper_ServiceEvent(t *testing.T) {
  1798  	f := newTestFixture(t)
  1799  
  1800  	manifest := f.newManifest("foobar")
  1801  
  1802  	f.Start([]model.Manifest{manifest})
  1803  	f.waitForCompletedBuildCount(1)
  1804  
  1805  	result := f.b.resultsByID[manifest.K8sTarget().ID()]
  1806  	uid := result.(store.K8sBuildResult).DeployedRefs[0].UID
  1807  	svc := servicebuilder.New(t, manifest).WithUID(uid).WithPort(8080).WithIP("1.2.3.4").Build()
  1808  	err := k8swatch.DispatchServiceChange(f.store, svc, manifest.Name, "")
  1809  	require.NoError(t, err)
  1810  
  1811  	f.WaitUntilManifestState("lb updated", "foobar", func(ms store.ManifestState) bool {
  1812  		return len(ms.K8sRuntimeState().LBs) > 0
  1813  	})
  1814  
  1815  	err = f.Stop()
  1816  	assert.NoError(t, err)
  1817  
  1818  	ms, _ := f.upper.store.RLockState().ManifestState(manifest.Name)
  1819  	defer f.upper.store.RUnlockState()
  1820  	lbs := ms.K8sRuntimeState().LBs
  1821  	assert.Equal(t, 1, len(lbs))
  1822  	url, ok := lbs[k8s.ServiceName(svc.Name)]
  1823  	if !ok {
  1824  		t.Fatalf("%v did not contain key 'myservice'", lbs)
  1825  	}
  1826  	assert.Equal(t, "http://1.2.3.4:8080/", url.String())
  1827  }
  1828  
  1829  func TestUpper_ServiceEventRemovesURL(t *testing.T) {
  1830  	f := newTestFixture(t)
  1831  
  1832  	manifest := f.newManifest("foobar")
  1833  
  1834  	f.Start([]model.Manifest{manifest})
  1835  	f.waitForCompletedBuildCount(1)
  1836  
  1837  	result := f.b.resultsByID[manifest.K8sTarget().ID()]
  1838  	uid := result.(store.K8sBuildResult).DeployedRefs[0].UID
  1839  	sb := servicebuilder.New(t, manifest).WithUID(uid).WithPort(8080).WithIP("1.2.3.4")
  1840  	svc := sb.Build()
  1841  	err := k8swatch.DispatchServiceChange(f.store, svc, manifest.Name, "")
  1842  	require.NoError(t, err)
  1843  
  1844  	f.WaitUntilManifestState("lb url added", "foobar", func(ms store.ManifestState) bool {
  1845  		url := ms.K8sRuntimeState().LBs[k8s.ServiceName(svc.Name)]
  1846  		if url == nil {
  1847  			return false
  1848  		}
  1849  		return "http://1.2.3.4:8080/" == url.String()
  1850  	})
  1851  
  1852  	svc = sb.WithIP("").Build()
  1853  	err = k8swatch.DispatchServiceChange(f.store, svc, manifest.Name, "")
  1854  	require.NoError(t, err)
  1855  
  1856  	f.WaitUntilManifestState("lb url removed", "foobar", func(ms store.ManifestState) bool {
  1857  		url := ms.K8sRuntimeState().LBs[k8s.ServiceName(svc.Name)]
  1858  		return url == nil
  1859  	})
  1860  
  1861  	err = f.Stop()
  1862  	assert.NoError(t, err)
  1863  }
  1864  
  1865  func TestUpper_PodLogs(t *testing.T) {
  1866  	f := newTestFixture(t)
  1867  
  1868  	name := model.ManifestName("fe")
  1869  	manifest := f.newManifest(string(name))
  1870  	pb := f.registerForDeployer(manifest)
  1871  
  1872  	f.Start([]model.Manifest{manifest})
  1873  	f.waitForCompletedBuildCount(1)
  1874  
  1875  	pod := pb.Build()
  1876  	f.startPod(pod, name)
  1877  	f.podLog(pod, name, "Hello world!\n")
  1878  
  1879  	err := f.Stop()
  1880  	assert.NoError(t, err)
  1881  }
  1882  
  1883  func TestK8sEventGlobalLogAndManifestLog(t *testing.T) {
  1884  	f := newTestFixture(t)
  1885  
  1886  	name := model.ManifestName("fe")
  1887  	manifest := f.newManifest(string(name))
  1888  
  1889  	f.Start([]model.Manifest{manifest})
  1890  	f.waitForCompletedBuildCount(1)
  1891  
  1892  	objRef := v1.ObjectReference{UID: f.lastDeployedUID(name)}
  1893  	warnEvt := &v1.Event{
  1894  		InvolvedObject: objRef,
  1895  		Message:        "something has happened zomg",
  1896  		Type:           v1.EventTypeWarning,
  1897  		ObjectMeta: metav1.ObjectMeta{
  1898  			CreationTimestamp: apis.NewTime(f.Now()),
  1899  			Namespace:         k8s.DefaultNamespace.String(),
  1900  		},
  1901  	}
  1902  	f.kClient.UpsertEvent(warnEvt)
  1903  
  1904  	f.WaitUntil("event message appears in manifest log", func(st store.EngineState) bool {
  1905  		return strings.Contains(st.LogStore.ManifestLog(name), "something has happened zomg")
  1906  	})
  1907  
  1908  	f.withState(func(st store.EngineState) {
  1909  		assert.Contains(t, st.LogStore.String(), "something has happened zomg", "event message not in global log")
  1910  	})
  1911  
  1912  	err := f.Stop()
  1913  	assert.NoError(t, err)
  1914  }
  1915  
  1916  func TestK8sEventNotLoggedIfNoManifestForUID(t *testing.T) {
  1917  	f := newTestFixture(t)
  1918  
  1919  	name := model.ManifestName("fe")
  1920  	manifest := f.newManifest(string(name))
  1921  
  1922  	f.Start([]model.Manifest{manifest})
  1923  	f.waitForCompletedBuildCount(1)
  1924  
  1925  	warnEvt := &v1.Event{
  1926  		InvolvedObject: v1.ObjectReference{UID: types.UID("someRandomUID")},
  1927  		Message:        "something has happened zomg",
  1928  		Type:           v1.EventTypeWarning,
  1929  		ObjectMeta: metav1.ObjectMeta{
  1930  			CreationTimestamp: apis.NewTime(f.Now()),
  1931  			Namespace:         k8s.DefaultNamespace.String(),
  1932  		},
  1933  	}
  1934  	f.kClient.UpsertEvent(warnEvt)
  1935  
  1936  	time.Sleep(10 * time.Millisecond)
  1937  
  1938  	assert.NotContains(t, f.log.String(), "something has happened zomg",
  1939  		"should not log event message b/c it doesn't have a UID -> Manifest mapping")
  1940  }
  1941  
  1942  func TestHudExitNoError(t *testing.T) {
  1943  	f := newTestFixture(t)
  1944  	f.Start([]model.Manifest{})
  1945  	f.store.Dispatch(hud.NewExitAction(nil))
  1946  	err := f.WaitForExit()
  1947  	assert.NoError(t, err)
  1948  }
  1949  
  1950  func TestHudExitWithError(t *testing.T) {
  1951  	f := newTestFixture(t)
  1952  	f.Start([]model.Manifest{})
  1953  	e := errors.New("helllllo")
  1954  	f.store.Dispatch(hud.NewExitAction(e))
  1955  	_ = f.WaitForNoExit()
  1956  }
  1957  
  1958  func TestNewConfigsAreWatchedAfterFailure(t *testing.T) {
  1959  	f := newTestFixture(t)
  1960  	f.useRealTiltfileLoader()
  1961  	f.loadAndStart()
  1962  
  1963  	f.WriteConfigFiles("Tiltfile", "read_file('foo.txt')")
  1964  	f.WaitUntil("foo.txt is a config file", func(state store.EngineState) bool {
  1965  		for _, s := range state.MainConfigPaths() {
  1966  			if s == f.JoinPath("foo.txt") {
  1967  				return true
  1968  			}
  1969  		}
  1970  		return false
  1971  	})
  1972  }
  1973  
  1974  func TestDockerComposeUp(t *testing.T) {
  1975  	f := newTestFixture(t)
  1976  	redis, server := f.setupDCFixture()
  1977  
  1978  	f.Start([]model.Manifest{redis, server})
  1979  	call := f.nextCall()
  1980  	assert.True(t, call.dcState().IsEmpty())
  1981  	assert.False(t, call.dc().ID().Empty())
  1982  	assert.Equal(t, redis.DockerComposeTarget().ID(), call.dc().ID())
  1983  	call = f.nextCall()
  1984  	assert.True(t, call.dcState().IsEmpty())
  1985  	assert.False(t, call.dc().ID().Empty())
  1986  	assert.Equal(t, server.DockerComposeTarget().ID(), call.dc().ID())
  1987  }
  1988  
  1989  func TestDockerComposeRedeployFromFileChange(t *testing.T) {
  1990  	f := newTestFixture(t)
  1991  	r, m := f.setupDCFixture()
  1992  
  1993  	f.Start([]model.Manifest{r, m})
  1994  	_ = f.nextCall()
  1995  	_ = f.nextCall()
  1996  
  1997  	// Change a file -- should trigger build
  1998  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("package.json"))
  1999  	call := f.nextCall()
  2000  	assert.Equal(t, []string{f.JoinPath("package.json")}, call.oneImageState().FilesChanged())
  2001  }
  2002  
  2003  func TestDockerComposeRecordsBuildLogs(t *testing.T) {
  2004  	f := newTestFixture(t)
  2005  	f.useRealTiltfileLoader()
  2006  
  2007  	m, _ := f.setupDCFixture()
  2008  	expected := "yarn install"
  2009  	f.setBuildLogOutput(m.DockerComposeTarget().ID(), expected)
  2010  
  2011  	f.loadAndStart()
  2012  	f.waitForCompletedBuildCount(2)
  2013  
  2014  	// recorded in global log
  2015  	f.withState(func(st store.EngineState) {
  2016  		assert.Contains(t, st.LogStore.String(), expected)
  2017  
  2018  		ms, _ := st.ManifestState(m.ManifestName())
  2019  		spanID := ms.LastBuild().SpanID
  2020  		assert.Contains(t, st.LogStore.SpanLog(spanID), expected)
  2021  	})
  2022  }
  2023  
  2024  func TestDockerComposeBuildCompletedSetsStatusToUpIfSuccessful(t *testing.T) {
  2025  	f := newTestFixture(t)
  2026  	f.useRealTiltfileLoader()
  2027  
  2028  	m1, _ := f.setupDCFixture()
  2029  
  2030  	expected := container.ID("aaaaaa")
  2031  	f.b.nextDockerComposeContainerID = expected
  2032  
  2033  	containerState := docker.NewRunningContainerState()
  2034  	f.b.nextDockerComposeContainerState = &containerState
  2035  
  2036  	f.loadAndStart()
  2037  
  2038  	f.waitForCompletedBuildCount(2)
  2039  
  2040  	f.withManifestState(m1.ManifestName(), func(st store.ManifestState) {
  2041  		state, ok := st.RuntimeState.(dockercompose.State)
  2042  		if !ok {
  2043  			t.Fatal("expected RuntimeState to be docker compose, but it wasn't")
  2044  		}
  2045  		assert.Equal(t, expected, state.ContainerID)
  2046  		assert.Equal(t, v1alpha1.RuntimeStatusOK, state.RuntimeStatus())
  2047  	})
  2048  }
  2049  
  2050  func TestDockerComposeStopOnDisable(t *testing.T) {
  2051  	f := newTestFixture(t)
  2052  	f.useRealTiltfileLoader()
  2053  
  2054  	m, _ := f.setupDCFixture()
  2055  
  2056  	expected := container.ID("aaaaaa")
  2057  	f.b.nextDockerComposeContainerID = expected
  2058  
  2059  	containerState := docker.NewRunningContainerState()
  2060  	f.b.nextDockerComposeContainerState = &containerState
  2061  
  2062  	f.loadAndStart()
  2063  
  2064  	f.waitForCompletedBuildCount(2)
  2065  
  2066  	f.setDisableState(m.Name, true)
  2067  
  2068  	require.Eventually(t, func() bool {
  2069  		return len(f.dcc.RmCalls()) > 0
  2070  	}, stdTimeout, time.Millisecond)
  2071  
  2072  	require.Len(t, f.dcc.RmCalls(), 1)
  2073  	require.Len(t, f.dcc.RmCalls()[0].Specs, 1)
  2074  	require.Equal(t, m.Name.String(), f.dcc.RmCalls()[0].Specs[0].Service)
  2075  }
  2076  
  2077  func TestDockerComposeStartOnReenable(t *testing.T) {
  2078  	f := newTestFixture(t)
  2079  	f.useRealTiltfileLoader()
  2080  
  2081  	m, _ := f.setupDCFixture()
  2082  
  2083  	expected := container.ID("aaaaaa")
  2084  	f.b.nextDockerComposeContainerID = expected
  2085  
  2086  	containerState := docker.NewRunningContainerState()
  2087  	f.b.nextDockerComposeContainerState = &containerState
  2088  
  2089  	f.loadAndStart()
  2090  
  2091  	f.waitForCompletedBuildCount(2)
  2092  
  2093  	f.setDisableState(m.Name, true)
  2094  
  2095  	require.Eventually(t, func() bool {
  2096  		return len(f.dcc.RmCalls()) > 0
  2097  	}, stdTimeout, time.Millisecond, "DC rm")
  2098  
  2099  	f.setDisableState(m.Name, false)
  2100  
  2101  	f.waitForCompletedBuildCount(3)
  2102  }
  2103  
  2104  func TestEmptyTiltfile(t *testing.T) {
  2105  	f := newTestFixture(t)
  2106  	f.useRealTiltfileLoader()
  2107  	f.WriteFile("Tiltfile", "")
  2108  
  2109  	closeCh := make(chan error)
  2110  	go func() {
  2111  		err := f.upper.Start(f.ctx, []string{}, model.TiltBuild{},
  2112  			f.JoinPath("Tiltfile"), store.TerminalModeHUD,
  2113  			analytics.OptIn, token.Token("unit test token"),
  2114  			"nonexistent.example.com")
  2115  		closeCh <- err
  2116  	}()
  2117  	f.WaitUntil("build is set", func(st store.EngineState) bool {
  2118  		return !st.TiltfileStates[model.MainTiltfileManifestName].LastBuild().Empty()
  2119  	})
  2120  	f.withState(func(st store.EngineState) {
  2121  		assert.Contains(t, st.TiltfileStates[model.MainTiltfileManifestName].LastBuild().Error.Error(), "No resources found. Check out ")
  2122  		assertContainsOnce(t, st.LogStore.String(), "No resources found. Check out ")
  2123  		assertContainsOnce(t, st.LogStore.ManifestLog(store.MainTiltfileManifestName), "No resources found. Check out ")
  2124  
  2125  		buildRecord := st.TiltfileStates[model.MainTiltfileManifestName].LastBuild()
  2126  		assertContainsOnce(t, st.LogStore.SpanLog(buildRecord.SpanID), "No resources found. Check out ")
  2127  	})
  2128  
  2129  	f.cancel()
  2130  
  2131  	err := <-closeCh
  2132  	testutils.FailOnNonCanceledErr(t, err, "upper.Start failed")
  2133  }
  2134  
  2135  func TestUpperStart(t *testing.T) {
  2136  	f := newTestFixture(t)
  2137  	f.useRealTiltfileLoader()
  2138  
  2139  	tok := token.Token("unit test token")
  2140  	cloudAddress := "nonexistent.example.com"
  2141  
  2142  	closeCh := make(chan error)
  2143  
  2144  	f.WriteFile("Tiltfile", "")
  2145  	go func() {
  2146  		err := f.upper.Start(f.ctx, []string{"foo", "bar"}, model.TiltBuild{},
  2147  			f.JoinPath("Tiltfile"), store.TerminalModeHUD,
  2148  			analytics.OptIn, tok, cloudAddress)
  2149  		closeCh <- err
  2150  	}()
  2151  	f.WaitUntil("init action processed", func(state store.EngineState) bool {
  2152  		return !state.TiltStartTime.IsZero()
  2153  	})
  2154  
  2155  	f.withState(func(state store.EngineState) {
  2156  		require.Equal(t, []string{"foo", "bar"}, state.UserConfigState.Args)
  2157  		require.Equal(t, f.JoinPath("Tiltfile"), state.DesiredTiltfilePath)
  2158  		require.Equal(t, tok, state.Token)
  2159  		require.Equal(t, analytics.OptIn, state.AnalyticsEffectiveOpt())
  2160  		require.Equal(t, cloudAddress, state.CloudAddress)
  2161  	})
  2162  
  2163  	f.cancel()
  2164  
  2165  	err := <-closeCh
  2166  	testutils.FailOnNonCanceledErr(t, err, "upper.Start failed")
  2167  }
  2168  
  2169  func TestWatchManifestsWithCommonAncestor(t *testing.T) {
  2170  	f := newTestFixture(t)
  2171  	m1, m2 := NewManifestsWithCommonAncestor(f)
  2172  	f.Start([]model.Manifest{m1, m2})
  2173  
  2174  	f.waitForCompletedBuildCount(2)
  2175  
  2176  	call := f.nextCall("m1 build1")
  2177  	assert.Equal(t, m1.K8sTarget(), call.k8s())
  2178  
  2179  	call = f.nextCall("m2 build1")
  2180  	assert.Equal(t, m2.K8sTarget(), call.k8s())
  2181  
  2182  	f.WriteFile(filepath.Join("common", "a.txt"), "hello world")
  2183  
  2184  	aPath := f.JoinPath("common", "a.txt")
  2185  	f.fsWatcher.Events <- watch.NewFileEvent(aPath)
  2186  
  2187  	f.waitForCompletedBuildCount(4)
  2188  
  2189  	// Make sure that both builds are triggered, and that they
  2190  	// are triggered in a particular order.
  2191  	call = f.nextCall("m1 build2")
  2192  	assert.Equal(t, m1.K8sTarget(), call.k8s())
  2193  
  2194  	state := call.state[m1.ImageTargets[0].ID()]
  2195  	assert.Equal(t, map[string]bool{aPath: true}, state.FilesChangedSet)
  2196  
  2197  	// Make sure that when the second build is triggered, we did the bookkeeping
  2198  	// correctly around reusing the image and propagating DepsChanged when
  2199  	// we deploy the second k8s target.
  2200  	call = f.nextCall("m2 build2")
  2201  	assert.Equal(t, m2.K8sTarget(), call.k8s())
  2202  
  2203  	id := m2.ImageTargets[0].ID()
  2204  	result := f.b.resultsByID[id]
  2205  	assert.Equal(t, result, call.state[id].LastResult)
  2206  	assert.Equal(t, 0, len(call.state[id].FilesChangedSet))
  2207  
  2208  	id = m2.ImageTargets[1].ID()
  2209  	result = f.b.resultsByID[id]
  2210  
  2211  	// Assert the 2nd image was not re-used from the previous result.
  2212  	assert.NotEqual(t, result, call.state[id].LastResult)
  2213  	assert.Equal(t, map[model.TargetID]bool{m2.ImageTargets[0].ID(): true},
  2214  		call.state[id].DepsChangedSet)
  2215  
  2216  	err := f.Stop()
  2217  	assert.NoError(t, err)
  2218  	f.assertAllBuildsConsumed()
  2219  }
  2220  
  2221  func TestConfigChangeThatChangesManifestIsIncludedInManifestsChangedFile(t *testing.T) {
  2222  	// https://app.clubhouse.io/windmill/story/5701/test-testconfigchangethatchangesmanifestisincludedinmanifestschangedfile-is-flaky
  2223  	t.Skip("TODO(nick): fix this")
  2224  
  2225  	f := newTestFixture(t)
  2226  	f.useRealTiltfileLoader()
  2227  
  2228  	tiltfile := `
  2229  docker_build('gcr.io/windmill-public-containers/servantes/snack', '.')
  2230  k8s_yaml('snack.yaml')`
  2231  	f.WriteFile("Tiltfile", tiltfile)
  2232  	f.WriteFile("Dockerfile", `FROM iron/go:dev`)
  2233  	f.WriteFile("snack.yaml", testyaml.Deployment("snack", "gcr.io/windmill-public-containers/servantes/snack:old"))
  2234  
  2235  	f.loadAndStart()
  2236  
  2237  	f.waitForCompletedBuildCount(1)
  2238  
  2239  	f.WriteFile("snack.yaml", testyaml.Deployment("snack", "gcr.io/windmill-public-containers/servantes/snack:new"))
  2240  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("snack.yaml"))
  2241  
  2242  	f.waitForCompletedBuildCount(2)
  2243  
  2244  	f.withManifestState("snack", func(ms store.ManifestState) {
  2245  		require.Equal(t, []string{f.JoinPath("snack.yaml")}, ms.LastBuild().Edits)
  2246  	})
  2247  
  2248  	f.WriteFile("Dockerfile", `FROM iron/go:foobar`)
  2249  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Dockerfile"))
  2250  
  2251  	f.waitForCompletedBuildCount(3)
  2252  
  2253  	f.withManifestState("snack", func(ms store.ManifestState) {
  2254  		require.Equal(t, []string{f.JoinPath("Dockerfile")}, ms.LastBuild().Edits)
  2255  	})
  2256  }
  2257  
  2258  func TestSetAnalyticsOpt(t *testing.T) {
  2259  	f := newTestFixture(t)
  2260  
  2261  	opt := func(ia InitAction) InitAction {
  2262  		ia.AnalyticsUserOpt = analytics.OptIn
  2263  		return ia
  2264  	}
  2265  
  2266  	f.Start([]model.Manifest{}, opt)
  2267  	f.store.Dispatch(store.AnalyticsUserOptAction{Opt: analytics.OptOut})
  2268  	f.WaitUntil("opted out", func(state store.EngineState) bool {
  2269  		return state.AnalyticsEffectiveOpt() == analytics.OptOut
  2270  	})
  2271  
  2272  	// if we don't wait for 1 here, it's possible the state flips to out and back to in before the subscriber sees it,
  2273  	// and we end up with no events
  2274  	f.opter.WaitUntilCount(t, 1)
  2275  
  2276  	f.store.Dispatch(store.AnalyticsUserOptAction{Opt: analytics.OptIn})
  2277  	f.WaitUntil("opted in", func(state store.EngineState) bool {
  2278  		return state.AnalyticsEffectiveOpt() == analytics.OptIn
  2279  	})
  2280  
  2281  	f.opter.WaitUntilCount(t, 2)
  2282  
  2283  	err := f.Stop()
  2284  	if !assert.NoError(t, err) {
  2285  		return
  2286  	}
  2287  	assert.Equal(t, []analytics.Opt{analytics.OptOut, analytics.OptIn}, f.opter.Calls())
  2288  }
  2289  
  2290  func TestFeatureFlagsStoredOnState(t *testing.T) {
  2291  	f := newTestFixture(t)
  2292  
  2293  	f.Start([]model.Manifest{})
  2294  	f.ensureCluster()
  2295  
  2296  	f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{
  2297  		Name:       model.MainTiltfileManifestName,
  2298  		FinishTime: f.Now(),
  2299  		Features:   map[string]bool{"foo": true},
  2300  	})
  2301  
  2302  	f.WaitUntil("feature is enabled", func(state store.EngineState) bool {
  2303  		return state.Features["foo"] == true
  2304  	})
  2305  
  2306  	f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{
  2307  		Name:       model.MainTiltfileManifestName,
  2308  		FinishTime: f.Now(),
  2309  		Features:   map[string]bool{"foo": false},
  2310  	})
  2311  
  2312  	f.WaitUntil("feature is disabled", func(state store.EngineState) bool {
  2313  		return state.Features["foo"] == false
  2314  	})
  2315  }
  2316  
  2317  func TestTeamIDStoredOnState(t *testing.T) {
  2318  	f := newTestFixture(t)
  2319  
  2320  	f.Start([]model.Manifest{})
  2321  	f.ensureCluster()
  2322  
  2323  	f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{
  2324  		Name:       model.MainTiltfileManifestName,
  2325  		FinishTime: f.Now(),
  2326  		TeamID:     "sharks",
  2327  	})
  2328  
  2329  	f.WaitUntil("teamID is set to sharks", func(state store.EngineState) bool {
  2330  		return state.TeamID == "sharks"
  2331  	})
  2332  
  2333  	f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{
  2334  		Name:       model.MainTiltfileManifestName,
  2335  		FinishTime: f.Now(),
  2336  		TeamID:     "jets",
  2337  	})
  2338  
  2339  	f.WaitUntil("teamID is set to jets", func(state store.EngineState) bool {
  2340  		return state.TeamID == "jets"
  2341  	})
  2342  }
  2343  
  2344  func TestBuildLogAction(t *testing.T) {
  2345  	f := newTestFixture(t)
  2346  	f.bc.DisableForTesting()
  2347  
  2348  	manifest := f.newManifest("alert-injester")
  2349  	f.Start([]model.Manifest{manifest})
  2350  
  2351  	f.store.Dispatch(buildcontrols.BuildStartedAction{
  2352  		ManifestName: manifest.Name,
  2353  		StartTime:    f.Now(),
  2354  		SpanID:       SpanIDForBuildLog(1),
  2355  		Source:       "buildcontrol",
  2356  	})
  2357  
  2358  	f.store.Dispatch(store.NewLogAction(manifest.Name, SpanIDForBuildLog(1), logger.InfoLvl, nil, []byte(`a
  2359  bc
  2360  def
  2361  ghij`)))
  2362  
  2363  	f.WaitUntil("log appears", func(es store.EngineState) bool {
  2364  		ms, _ := es.ManifestState("alert-injester")
  2365  		spanID := ms.EarliestCurrentBuild().SpanID
  2366  		return spanID != "" && len(es.LogStore.SpanLog(spanID)) > 0
  2367  	})
  2368  
  2369  	f.withState(func(s store.EngineState) {
  2370  		assert.Contains(t, s.LogStore.String(), `alert-injest… │ a
  2371  alert-injest… │ bc
  2372  alert-injest… │ def
  2373  alert-injest… │ ghij`)
  2374  	})
  2375  
  2376  	err := f.Stop()
  2377  	assert.Nil(t, err)
  2378  }
  2379  
  2380  func TestBuildErrorLoggedOnceByUpper(t *testing.T) {
  2381  	f := newTestFixture(t)
  2382  
  2383  	manifest := f.newManifest("alert-injester")
  2384  	err := errors.New("cats and dogs, living together")
  2385  	f.SetNextBuildError(err)
  2386  
  2387  	f.Start([]model.Manifest{manifest})
  2388  
  2389  	f.waitForCompletedBuildCount(1)
  2390  
  2391  	// so the test name says "once", but the fake builder also logs once, so we get it twice
  2392  	f.withState(func(state store.EngineState) {
  2393  		require.Equal(t, 2, strings.Count(state.LogStore.String(), err.Error()))
  2394  	})
  2395  }
  2396  
  2397  func TestTiltfileChangedFilesOnlyLoggedAfterFirstBuild(t *testing.T) {
  2398  	f := newTestFixture(t)
  2399  	f.useRealTiltfileLoader()
  2400  
  2401  	f.WriteFile("Tiltfile", `
  2402  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile')
  2403  k8s_yaml('snack.yaml')`)
  2404  	f.WriteFile("Dockerfile", `FROM iron/go:dev1`)
  2405  	f.WriteFile("snack.yaml", simpleYAML)
  2406  	f.WriteFile("src/main.go", "hello")
  2407  
  2408  	f.loadAndStart()
  2409  
  2410  	f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool {
  2411  		return len(state.MainTiltfileState().BuildHistory) == 1
  2412  	})
  2413  	f.waitForCompletedBuildCount(1)
  2414  
  2415  	// we shouldn't log changes for first build
  2416  	f.withState(func(state store.EngineState) {
  2417  		require.NotContains(t, state.LogStore.String(), "changed: [")
  2418  	})
  2419  
  2420  	f.WriteFile("Tiltfile", `
  2421  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile', ignore='foo')
  2422  k8s_yaml('snack.yaml')`)
  2423  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile"))
  2424  
  2425  	f.WaitUntil("Tiltfile reloaded", func(state store.EngineState) bool {
  2426  		return len(state.MainTiltfileState().BuildHistory) == 2
  2427  	})
  2428  	f.waitForCompletedBuildCount(2)
  2429  
  2430  	f.withState(func(state store.EngineState) {
  2431  		expectedMessage := fmt.Sprintf("1 File Changed: [%s]", f.JoinPath("Tiltfile"))
  2432  		require.Contains(t, state.LogStore.String(), expectedMessage)
  2433  	})
  2434  }
  2435  
  2436  func TestDeployUIDsInEngineState(t *testing.T) {
  2437  	f := newTestFixture(t)
  2438  
  2439  	uid := types.UID("fake-uid")
  2440  	f.b.nextDeployedUID = uid
  2441  
  2442  	manifest := f.newManifest("fe")
  2443  	f.Start([]model.Manifest{manifest})
  2444  
  2445  	_ = f.nextCall()
  2446  	f.WaitUntilManifestState("UID in ManifestState", "fe", func(state store.ManifestState) bool {
  2447  		return k8sconv.ContainsUID(state.K8sRuntimeState().ApplyFilter, uid)
  2448  	})
  2449  
  2450  	err := f.Stop()
  2451  	assert.NoError(t, err)
  2452  	f.assertAllBuildsConsumed()
  2453  }
  2454  
  2455  func TestEnableFeatureOnFail(t *testing.T) {
  2456  	f := newTestFixture(t)
  2457  	f.useRealTiltfileLoader()
  2458  
  2459  	f.WriteFile("Tiltfile", `
  2460  enable_feature('snapshots')
  2461  fail('goodnight moon')
  2462  `)
  2463  
  2464  	f.loadAndStart()
  2465  
  2466  	f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool {
  2467  		return len(state.MainTiltfileState().BuildHistory) == 1
  2468  	})
  2469  	f.withState(func(state store.EngineState) {
  2470  		assert.True(t, state.Features["snapshots"])
  2471  	})
  2472  }
  2473  
  2474  func TestSecretScrubbed(t *testing.T) {
  2475  	f := newTestFixture(t)
  2476  	f.useRealTiltfileLoader()
  2477  
  2478  	tiltfile := `
  2479  print('about to print secret')
  2480  print('aGVsbG8=')
  2481  k8s_yaml('secret.yaml')`
  2482  	f.WriteFile("Tiltfile", tiltfile)
  2483  	f.WriteFile("secret.yaml", `
  2484  apiVersion: v1
  2485  kind: Secret
  2486  metadata:
  2487    name: my-secret
  2488  data:
  2489    client-secret: aGVsbG8=
  2490  `)
  2491  
  2492  	f.loadAndStart()
  2493  
  2494  	f.waitForCompletedBuildCount(1)
  2495  
  2496  	f.withState(func(state store.EngineState) {
  2497  		log := state.LogStore.String()
  2498  		assert.Contains(t, log, "about to print secret")
  2499  		assert.NotContains(t, log, "aGVsbG8=")
  2500  		assert.Contains(t, log, "[redacted secret my-secret:client-secret]")
  2501  	})
  2502  }
  2503  
  2504  func TestShortSecretNotScrubbed(t *testing.T) {
  2505  	f := newTestFixture(t)
  2506  	f.useRealTiltfileLoader()
  2507  
  2508  	tiltfile := `
  2509  print('about to print secret: s')
  2510  k8s_yaml('secret.yaml')`
  2511  	f.WriteFile("Tiltfile", tiltfile)
  2512  	f.WriteFile("secret.yaml", `
  2513  apiVersion: v1
  2514  kind: Secret
  2515  metadata:
  2516    name: my-secret
  2517  stringData:
  2518    client-secret: s
  2519  `)
  2520  
  2521  	f.loadAndStart()
  2522  
  2523  	f.waitForCompletedBuildCount(1)
  2524  
  2525  	f.withState(func(state store.EngineState) {
  2526  		log := state.LogStore.String()
  2527  		assert.Contains(t, log, "about to print secret: s")
  2528  		assert.NotContains(t, log, "redacted")
  2529  	})
  2530  }
  2531  
  2532  func TestDisableDockerPrune(t *testing.T) {
  2533  	f := newTestFixture(t)
  2534  	f.useRealTiltfileLoader()
  2535  
  2536  	f.WriteFile("Dockerfile", `FROM iron/go:prod`)
  2537  	f.WriteFile("snack.yaml", simpleYAML)
  2538  
  2539  	f.WriteFile("Tiltfile", `
  2540  docker_prune_settings(disable=True)
  2541  `+simpleTiltfile)
  2542  
  2543  	f.loadAndStart()
  2544  
  2545  	f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool {
  2546  		return len(state.MainTiltfileState().BuildHistory) == 1
  2547  	})
  2548  	f.withState(func(state store.EngineState) {
  2549  		assert.False(t, state.DockerPruneSettings.Enabled)
  2550  	})
  2551  }
  2552  
  2553  func TestDockerPruneEnabledByDefault(t *testing.T) {
  2554  	f := newTestFixture(t)
  2555  	f.useRealTiltfileLoader()
  2556  
  2557  	f.WriteFile("Tiltfile", simpleTiltfile)
  2558  	f.WriteFile("Dockerfile", `FROM iron/go:prod`)
  2559  	f.WriteFile("snack.yaml", simpleYAML)
  2560  
  2561  	f.loadAndStart()
  2562  
  2563  	f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool {
  2564  		return len(state.MainTiltfileState().BuildHistory) == 1
  2565  	})
  2566  	f.withState(func(state store.EngineState) {
  2567  		assert.True(t, state.DockerPruneSettings.Enabled)
  2568  		assert.Equal(t, model.DockerPruneDefaultMaxAge, state.DockerPruneSettings.MaxAge)
  2569  		assert.Equal(t, model.DockerPruneDefaultInterval, state.DockerPruneSettings.Interval)
  2570  	})
  2571  }
  2572  
  2573  func TestHasEverBeenReadyK8s(t *testing.T) {
  2574  	f := newTestFixture(t)
  2575  
  2576  	m := f.newManifest("foobar")
  2577  	pb := f.registerForDeployer(m)
  2578  	f.Start([]model.Manifest{m})
  2579  
  2580  	f.waitForCompletedBuildCount(1)
  2581  	f.withManifestState(m.Name, func(ms store.ManifestState) {
  2582  		require.False(t, ms.RuntimeState.HasEverBeenReadyOrSucceeded())
  2583  	})
  2584  
  2585  	f.podEvent(pb.WithContainerReady(true).Build())
  2586  	f.WaitUntilManifestState("flagged ready", m.Name, func(state store.ManifestState) bool {
  2587  		return state.RuntimeState.HasEverBeenReadyOrSucceeded()
  2588  	})
  2589  }
  2590  
  2591  func TestHasEverBeenCompleteK8s(t *testing.T) {
  2592  	f := newTestFixture(t)
  2593  
  2594  	m := f.newManifest("foobar")
  2595  	pb := f.registerForDeployer(m)
  2596  	f.Start([]model.Manifest{m})
  2597  
  2598  	f.waitForCompletedBuildCount(1)
  2599  	f.withManifestState(m.Name, func(ms store.ManifestState) {
  2600  		require.False(t, ms.RuntimeState.HasEverBeenReadyOrSucceeded())
  2601  	})
  2602  
  2603  	f.podEvent(pb.WithPhase(string(v1.PodSucceeded)).Build())
  2604  	f.WaitUntilManifestState("flagged ready", m.Name, func(state store.ManifestState) bool {
  2605  		return state.RuntimeState.HasEverBeenReadyOrSucceeded()
  2606  	})
  2607  }
  2608  
  2609  func TestHasEverBeenReadyLocal(t *testing.T) {
  2610  	f := newTestFixture(t)
  2611  
  2612  	m := manifestbuilder.New(f, "foobar").WithLocalResource("foo", []string{f.Path()}).Build()
  2613  	f.SetNextBuildError(errors.New("failure!"))
  2614  	f.Start([]model.Manifest{m})
  2615  
  2616  	// first build will fail, HasEverBeenReadyOrSucceeded should be false
  2617  	f.waitForCompletedBuildCount(1)
  2618  	f.withManifestState(m.Name, func(ms store.ManifestState) {
  2619  		require.False(t, ms.RuntimeState.HasEverBeenReadyOrSucceeded())
  2620  	})
  2621  
  2622  	// second build will succeed, HasEverBeenReadyOrSucceeded should be true
  2623  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("bar", "main.go"))
  2624  	f.WaitUntilManifestState("flagged ready", m.Name, func(state store.ManifestState) bool {
  2625  		return state.RuntimeState.HasEverBeenReadyOrSucceeded()
  2626  	})
  2627  }
  2628  
  2629  func TestVersionSettingsStoredOnState(t *testing.T) {
  2630  	f := newTestFixture(t)
  2631  
  2632  	f.Start([]model.Manifest{})
  2633  	f.ensureCluster()
  2634  
  2635  	vs := model.VersionSettings{
  2636  		CheckUpdates: false,
  2637  	}
  2638  	f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{
  2639  		Name:            model.MainTiltfileManifestName,
  2640  		FinishTime:      f.Now(),
  2641  		VersionSettings: vs,
  2642  	})
  2643  
  2644  	f.WaitUntil("CheckVersionUpdates is set to false", func(state store.EngineState) bool {
  2645  		return state.VersionSettings.CheckUpdates == false
  2646  	})
  2647  }
  2648  
  2649  func TestAnalyticsTiltfileOpt(t *testing.T) {
  2650  	f := newTestFixture(t)
  2651  
  2652  	f.Start([]model.Manifest{})
  2653  	f.ensureCluster()
  2654  
  2655  	f.withState(func(state store.EngineState) {
  2656  		assert.Equal(t, analytics.OptDefault, state.AnalyticsEffectiveOpt())
  2657  	})
  2658  
  2659  	f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{
  2660  		Name:                 model.MainTiltfileManifestName,
  2661  		FinishTime:           f.Now(),
  2662  		AnalyticsTiltfileOpt: analytics.OptIn,
  2663  	})
  2664  
  2665  	f.WaitUntil("analytics tiltfile opt-in", func(state store.EngineState) bool {
  2666  		return state.AnalyticsTiltfileOpt == analytics.OptIn
  2667  	})
  2668  
  2669  	f.withState(func(state store.EngineState) {
  2670  		assert.Equal(t, analytics.OptIn, state.AnalyticsEffectiveOpt())
  2671  	})
  2672  }
  2673  
  2674  func TestConfigArgsChangeCausesTiltfileRerun(t *testing.T) {
  2675  	f := newTestFixture(t)
  2676  	f.useRealTiltfileLoader()
  2677  
  2678  	f.WriteFile("Tiltfile", `
  2679  print('hello')
  2680  config.define_string_list('foo')
  2681  cfg = config.parse()
  2682  print('foo=', cfg['foo'])`)
  2683  
  2684  	opt := func(ia InitAction) InitAction {
  2685  		ia.UserArgs = []string{"--foo", "bar"}
  2686  		return ia
  2687  	}
  2688  
  2689  	f.loadAndStart(opt)
  2690  
  2691  	// Wait for both EngineState and apiserver state updates,
  2692  	// so we can write back to the apiserver.
  2693  	f.WaitUntil("first tiltfile build finishes", func(state store.EngineState) bool {
  2694  		var tf v1alpha1.Tiltfile
  2695  		_ = f.ctrlClient.Get(f.ctx,
  2696  			types.NamespacedName{Name: model.MainTiltfileManifestName.String()}, &tf)
  2697  		return len(state.MainTiltfileState().BuildHistory) == 1 &&
  2698  			tf.Status.Terminated != nil
  2699  	})
  2700  
  2701  	f.withState(func(state store.EngineState) {
  2702  		spanID := state.MainTiltfileState().LastBuild().SpanID
  2703  		require.Contains(t, state.LogStore.SpanLog(spanID), `foo= ["bar"]`)
  2704  	})
  2705  	err := tiltfiles.SetTiltfileArgs(f.ctx, f.ctrlClient, []string{"--foo", "baz", "--foo", "quu"})
  2706  	require.NoError(t, err)
  2707  
  2708  	f.WaitUntil("second tiltfile build finishes", func(state store.EngineState) bool {
  2709  		return len(state.MainTiltfileState().BuildHistory) == 2
  2710  	})
  2711  
  2712  	f.withState(func(state store.EngineState) {
  2713  		spanID := state.MainTiltfileState().LastBuild().SpanID
  2714  		require.Contains(t, state.LogStore.SpanLog(spanID), `foo= ["baz", "quu"]`)
  2715  	})
  2716  }
  2717  
  2718  func TestTelemetryLogAction(t *testing.T) {
  2719  	f := newTestFixture(t)
  2720  
  2721  	f.Start([]model.Manifest{})
  2722  
  2723  	f.store.Dispatch(store.NewLogAction(model.MainTiltfileManifestName, "0", logger.InfoLvl, nil, []byte("testing")))
  2724  
  2725  	f.WaitUntil("log is stored", func(state store.EngineState) bool {
  2726  		l := state.LogStore.ManifestLog(store.MainTiltfileManifestName)
  2727  		return strings.Contains(l, "testing")
  2728  	})
  2729  }
  2730  
  2731  func TestLocalResourceServeChangeCmd(t *testing.T) {
  2732  	f := newTestFixture(t)
  2733  	f.useRealTiltfileLoader()
  2734  
  2735  	f.WriteFile("Tiltfile", "local_resource('foo', serve_cmd='true')")
  2736  
  2737  	f.loadAndStart()
  2738  
  2739  	f.WaitUntil("true is served", func(state store.EngineState) bool {
  2740  		return strings.Contains(state.LogStore.ManifestLog("foo"), "Starting cmd true")
  2741  	})
  2742  
  2743  	f.WriteFile("Tiltfile", "local_resource('foo', serve_cmd='false')")
  2744  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile"))
  2745  
  2746  	f.WaitUntil("false is served", func(state store.EngineState) bool {
  2747  		return strings.Contains(state.LogStore.ManifestLog("foo"), "Starting cmd false")
  2748  	})
  2749  
  2750  	f.fe.RequireNoKnownProcess(t, "true")
  2751  
  2752  	err := f.Stop()
  2753  	require.NoError(t, err)
  2754  }
  2755  
  2756  func TestDefaultUpdateSettings(t *testing.T) {
  2757  	f := newTestFixture(t)
  2758  	f.useRealTiltfileLoader()
  2759  
  2760  	f.WriteFile("Dockerfile", `FROM iron/go:prod`)
  2761  	f.WriteFile("snack.yaml", simpleYAML)
  2762  
  2763  	f.WriteFile("Tiltfile", simpleTiltfile)
  2764  
  2765  	f.loadAndStart()
  2766  
  2767  	f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool {
  2768  		return len(state.MainTiltfileState().BuildHistory) == 1
  2769  	})
  2770  	f.withState(func(state store.EngineState) {
  2771  		assert.Equal(t, model.DefaultUpdateSettings(), state.UpdateSettings)
  2772  	})
  2773  }
  2774  
  2775  func TestSetK8sUpsertTimeout(t *testing.T) {
  2776  	f := newTestFixture(t)
  2777  	f.useRealTiltfileLoader()
  2778  
  2779  	f.WriteFile("Dockerfile", `FROM iron/go:prod`)
  2780  	f.WriteFile("snack.yaml", simpleYAML)
  2781  
  2782  	f.WriteFile("Tiltfile", `
  2783  update_settings(k8s_upsert_timeout_secs=123)
  2784  `+simpleTiltfile)
  2785  	f.loadAndStart()
  2786  
  2787  	f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool {
  2788  		return len(state.MainTiltfileState().BuildHistory) == 1
  2789  	})
  2790  	f.withState(func(state store.EngineState) {
  2791  		assert.Equal(t, 123*time.Second, state.UpdateSettings.K8sUpsertTimeout())
  2792  	})
  2793  }
  2794  
  2795  func TestSetMaxBuildSlots(t *testing.T) {
  2796  	f := newTestFixture(t)
  2797  	f.useRealTiltfileLoader()
  2798  
  2799  	f.WriteFile("Dockerfile", `FROM iron/go:prod`)
  2800  	f.WriteFile("snack.yaml", simpleYAML)
  2801  
  2802  	f.WriteFile("Tiltfile", `
  2803  update_settings(max_parallel_updates=123)
  2804  `+simpleTiltfile)
  2805  	f.loadAndStart()
  2806  
  2807  	f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool {
  2808  		return len(state.MainTiltfileState().BuildHistory) == 1
  2809  	})
  2810  	f.withState(func(state store.EngineState) {
  2811  		assert.Equal(t, 123, state.UpdateSettings.MaxParallelUpdates())
  2812  	})
  2813  }
  2814  
  2815  // https://github.com/tilt-dev/tilt/issues/3514
  2816  func TestTiltignoreRespectedOnError(t *testing.T) {
  2817  	f := newTestFixture(t)
  2818  	f.useRealTiltfileLoader()
  2819  
  2820  	f.WriteFile("a.txt", "hello")
  2821  	f.WriteFile("Tiltfile", `read_file('a.txt')
  2822  fail('x')`)
  2823  	f.WriteFile(".tiltignore", "a.txt")
  2824  
  2825  	f.Init(InitAction{
  2826  		TiltfilePath: f.JoinPath("Tiltfile"),
  2827  		TerminalMode: store.TerminalModeHUD,
  2828  		StartTime:    f.Now(),
  2829  	})
  2830  
  2831  	f.WaitUntil(".tiltignore processed", func(es store.EngineState) bool {
  2832  		var fw v1alpha1.FileWatch
  2833  		err := f.ctrlClient.Get(f.ctx, types.NamespacedName{Name: "configs:(Tiltfile)"}, &fw)
  2834  		if err != nil {
  2835  			return false
  2836  		}
  2837  		return strings.Contains(strings.Join(fw.Spec.Ignores[0].Patterns, "\n"), "a.txt")
  2838  	})
  2839  
  2840  	f.WriteFile(".tiltignore", "a.txt\nb.txt\n")
  2841  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile"))
  2842  
  2843  	f.WaitUntil(".tiltignore processed", func(es store.EngineState) bool {
  2844  		var fw v1alpha1.FileWatch
  2845  		err := f.ctrlClient.Get(f.ctx, types.NamespacedName{Name: "configs:(Tiltfile)"}, &fw)
  2846  		if err != nil {
  2847  			return false
  2848  		}
  2849  		return strings.Contains(strings.Join(fw.Spec.Ignores[0].Patterns, "\n"), "b.txt")
  2850  	})
  2851  
  2852  	err := f.Stop()
  2853  	assert.NoError(t, err)
  2854  }
  2855  
  2856  func TestHandleTiltfileTriggerQueue(t *testing.T) {
  2857  	f := newTestFixture(t)
  2858  	f.useRealTiltfileLoader()
  2859  
  2860  	f.WriteFile("Tiltfile", `print("hello world")`)
  2861  
  2862  	f.Init(InitAction{
  2863  		TiltfilePath: f.JoinPath("Tiltfile"),
  2864  		TerminalMode: store.TerminalModeHUD,
  2865  		StartTime:    f.Now(),
  2866  	})
  2867  
  2868  	f.WaitUntil("init action processed", func(state store.EngineState) bool {
  2869  		return !state.TiltStartTime.IsZero()
  2870  	})
  2871  
  2872  	f.withState(func(st store.EngineState) {
  2873  		assert.False(t, st.ManifestInTriggerQueue(model.MainTiltfileManifestName),
  2874  			"initial state should NOT have Tiltfile in trigger queue")
  2875  		assert.Equal(t, model.BuildReasonNone, st.MainTiltfileState().TriggerReason,
  2876  			"initial state should not have Tiltfile trigger reason")
  2877  	})
  2878  	action := store.AppendToTriggerQueueAction{Name: model.MainTiltfileManifestName, Reason: 123}
  2879  	f.store.Dispatch(action)
  2880  
  2881  	f.WaitUntil("Tiltfile trigger processed", func(st store.EngineState) bool {
  2882  		return st.ManifestInTriggerQueue(model.MainTiltfileManifestName) &&
  2883  			st.MainTiltfileState().TriggerReason == 123
  2884  	})
  2885  
  2886  	f.WaitUntil("Tiltfile built and trigger cleared", func(st store.EngineState) bool {
  2887  		return len(st.MainTiltfileState().BuildHistory) == 2 && // Tiltfile built b/c it was triggered...
  2888  
  2889  			// and the trigger was cleared
  2890  			!st.ManifestInTriggerQueue(model.MainTiltfileManifestName) &&
  2891  			st.MainTiltfileState().TriggerReason == model.BuildReasonNone
  2892  	})
  2893  
  2894  	err := f.Stop()
  2895  	assert.NoError(t, err)
  2896  }
  2897  
  2898  func TestOverrideTriggerModeEvent(t *testing.T) {
  2899  	f := newTestFixture(t)
  2900  
  2901  	manifest := f.newManifest("foo")
  2902  	f.Start([]model.Manifest{manifest})
  2903  	_ = f.nextCall()
  2904  
  2905  	f.WaitUntilManifest("manifest has triggerMode = auto (default)", "foo", func(mt store.ManifestTarget) bool {
  2906  		return mt.Manifest.TriggerMode == model.TriggerModeAuto
  2907  	})
  2908  
  2909  	f.upper.store.Dispatch(server.OverrideTriggerModeAction{
  2910  		ManifestNames: []model.ManifestName{"foo"},
  2911  		TriggerMode:   model.TriggerModeManualWithAutoInit,
  2912  	})
  2913  
  2914  	f.WaitUntilManifest("triggerMode updated", "foo", func(mt store.ManifestTarget) bool {
  2915  		return mt.Manifest.TriggerMode == model.TriggerModeManualWithAutoInit
  2916  	})
  2917  
  2918  	err := f.Stop()
  2919  	require.NoError(t, err)
  2920  	f.assertAllBuildsConsumed()
  2921  }
  2922  
  2923  func TestOverrideTriggerModeBadManifestLogsError(t *testing.T) {
  2924  	f := newTestFixture(t)
  2925  
  2926  	manifest := f.newManifest("foo")
  2927  	f.Start([]model.Manifest{manifest})
  2928  	_ = f.nextCall()
  2929  
  2930  	f.WaitUntilManifest("manifest has triggerMode = auto (default)", "foo", func(mt store.ManifestTarget) bool {
  2931  		return mt.Manifest.TriggerMode == model.TriggerModeAuto
  2932  	})
  2933  
  2934  	f.upper.store.Dispatch(server.OverrideTriggerModeAction{
  2935  		ManifestNames: []model.ManifestName{"bar"},
  2936  		TriggerMode:   model.TriggerModeManualWithAutoInit,
  2937  	})
  2938  
  2939  	f.log.AssertEventuallyContains(t, "no such manifest", stdTimeout)
  2940  
  2941  	err := f.Stop()
  2942  	require.NoError(t, err)
  2943  	f.assertAllBuildsConsumed()
  2944  }
  2945  
  2946  func TestOverrideTriggerModeBadTriggerModeLogsError(t *testing.T) {
  2947  	f := newTestFixture(t)
  2948  
  2949  	manifest := f.newManifest("foo")
  2950  	f.Start([]model.Manifest{manifest})
  2951  	_ = f.nextCall()
  2952  
  2953  	f.WaitUntilManifest("manifest has triggerMode = auto (default)", "foo", func(mt store.ManifestTarget) bool {
  2954  		return mt.Manifest.TriggerMode == model.TriggerModeAuto
  2955  	})
  2956  
  2957  	f.upper.store.Dispatch(server.OverrideTriggerModeAction{
  2958  		ManifestNames: []model.ManifestName{"fooo"},
  2959  		TriggerMode:   12345,
  2960  	})
  2961  
  2962  	f.log.AssertEventuallyContains(t, "invalid trigger mode", stdTimeout)
  2963  
  2964  	err := f.Stop()
  2965  	require.NoError(t, err)
  2966  	f.assertAllBuildsConsumed()
  2967  }
  2968  
  2969  func TestDisableButtonIsCreated(t *testing.T) {
  2970  	f := newTestFixture(t)
  2971  	f.useRealTiltfileLoader()
  2972  
  2973  	f.WriteFile("Tiltfile", `
  2974  enable_feature('disable_resources')
  2975  local_resource('foo', 'echo hi')
  2976  `)
  2977  	f.loadAndStart()
  2978  
  2979  	f.waitForCompletedBuildCount(1)
  2980  
  2981  	var b v1alpha1.UIButton
  2982  	require.Eventually(t, func() bool {
  2983  		err := f.ctrlClient.Get(f.ctx, types.NamespacedName{Name: "toggle-foo-disable"}, &b)
  2984  		require.NoError(t, ctrlclient.IgnoreNotFound(err))
  2985  		return err == nil
  2986  	}, time.Second, time.Millisecond)
  2987  
  2988  	require.Equal(t, "DisableToggle", b.Annotations[v1alpha1.AnnotationButtonType])
  2989  	require.Equal(t, []v1alpha1.UIInputSpec{
  2990  		{
  2991  			Name:   "action",
  2992  			Hidden: &v1alpha1.UIHiddenInputSpec{Value: "on"},
  2993  		},
  2994  	}, b.Spec.Inputs)
  2995  }
  2996  
  2997  func TestCmdServerDoesntStartWhenDisabled(t *testing.T) {
  2998  	f := newTestFixture(t)
  2999  	f.useRealTiltfileLoader()
  3000  
  3001  	f.WriteFile("Tiltfile", `print('dummy tiltfile with no resources')`)
  3002  
  3003  	f.loadAndStart()
  3004  
  3005  	f.WriteFile("Tiltfile", `print('tiltfile 1')
  3006  local_resource('foo', serve_cmd='echo hi; sleep 10')
  3007  local_resource('bar', 'true')
  3008  config.set_enabled_resources(['bar'])
  3009  `)
  3010  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile"))
  3011  
  3012  	// make sure we got to the point where we recognized the server is disabled without actually
  3013  	// running the command
  3014  	f.WaitUntil("disabled", func(state store.EngineState) bool {
  3015  		ds := f.localServerController.Get("foo").Status.DisableStatus
  3016  		return ds != nil && ds.Disabled
  3017  	})
  3018  
  3019  	require.Equal(t, f.log.String(), "")
  3020  }
  3021  
  3022  func TestDisabledResourceRemovedFromTriggerQueue(t *testing.T) {
  3023  	f := newTestFixture(t)
  3024  
  3025  	m := manifestbuilder.New(f, "foo").WithLocalResource("foo", []string{f.Path()}).Build()
  3026  
  3027  	f.Start([]model.Manifest{m})
  3028  
  3029  	f.waitForCompletedBuildCount(1)
  3030  
  3031  	f.bc.DisableForTesting()
  3032  
  3033  	f.store.Dispatch(store.AppendToTriggerQueueAction{Name: m.Name, Reason: model.BuildReasonFlagTriggerCLI})
  3034  
  3035  	f.WaitUntil("in trigger queue", func(state store.EngineState) bool {
  3036  		return state.ManifestInTriggerQueue(m.Name)
  3037  	})
  3038  
  3039  	f.setDisableState(m.Name, true)
  3040  
  3041  	f.WaitUntil("is removed from trigger queue", func(state store.EngineState) bool {
  3042  		return !state.ManifestInTriggerQueue(m.Name)
  3043  	})
  3044  }
  3045  
  3046  func TestLocalResourceNoServeCmdDeps(t *testing.T) {
  3047  	if runtime.GOOS == "windows" {
  3048  		t.Skip("TODO(nick): fix this")
  3049  	}
  3050  	f := newTestFixture(t)
  3051  	f.useRealTiltfileLoader()
  3052  
  3053  	// create a Tiltfile with 2 resources:
  3054  	// 	1. foo - update only, i.e. a job, with a readiness_probe also defined
  3055  	// 		(which should be ignored as there's no server to be ready!)
  3056  	// 	2. bar - local_resource w/ dep on foo
  3057  	f.WriteFile("Tiltfile", `
  3058  local_resource('foo', cmd='true', readiness_probe=probe(http_get=http_get_action(port=12345)))
  3059  local_resource('bar', serve_cmd='while true; do echo hi; sleep 30; done', resource_deps=['foo'])
  3060  `)
  3061  	f.loadAndStart()
  3062  
  3063  	f.waitForCompletedBuildCount(2)
  3064  
  3065  	f.withState(func(es store.EngineState) {
  3066  		require.True(t, strings.Contains(es.LogStore.ManifestLog("(Tiltfile)"),
  3067  			`WARNING: Ignoring readiness probe for local resource "foo" (no serve_cmd was defined)`),
  3068  			"Log did not contain ignored readiness probe warning")
  3069  	})
  3070  
  3071  	// foo should indicate that it has succeeded since there is no serve_cmd and thus no runtime status
  3072  	f.withManifestState("foo", func(ms store.ManifestState) {
  3073  		require.True(t, ms.RuntimeState.HasEverBeenReadyOrSucceeded())
  3074  		require.Equal(t, v1alpha1.RuntimeStatusNotApplicable, ms.RuntimeState.RuntimeStatus())
  3075  	})
  3076  
  3077  	f.WaitUntilManifestState("bar ready", "bar", func(ms store.ManifestState) bool {
  3078  		return ms.RuntimeState.HasEverBeenReadyOrSucceeded() && ms.RuntimeState.RuntimeStatus() == v1alpha1.RuntimeStatusOK
  3079  	})
  3080  }
  3081  
  3082  type testFixture struct {
  3083  	*tempdir.TempDirFixture
  3084  	t                          *testing.T
  3085  	ctx                        context.Context
  3086  	cancel                     func()
  3087  	clock                      clockwork.Clock
  3088  	upper                      Upper
  3089  	b                          *fakeBuildAndDeployer
  3090  	fsWatcher                  *fsevent.FakeMultiWatcher
  3091  	docker                     *docker.FakeClient
  3092  	kClient                    *k8s.FakeK8sClient
  3093  	hud                        hud.HeadsUpDisplay
  3094  	ts                         *hud.TerminalStream
  3095  	upperInitResult            chan error
  3096  	log                        *bufsync.ThreadSafeBuffer
  3097  	store                      *store.Store
  3098  	bc                         *BuildController
  3099  	cc                         *configs.ConfigsController
  3100  	dcc                        *dockercompose.FakeDCClient
  3101  	tfl                        *tiltfile.FakeTiltfileLoader
  3102  	realTFL                    tiltfile.TiltfileLoader
  3103  	opter                      *tiltanalytics.FakeOpter
  3104  	dp                         *dockerprune.DockerPruner
  3105  	fe                         *cmd.FakeExecer
  3106  	fpm                        *cmd.FakeProberManager
  3107  	overrideMaxParallelUpdates int
  3108  	ctrlClient                 ctrlclient.Client
  3109  	engineMode                 store.EngineMode
  3110  
  3111  	onchangeCh            chan bool
  3112  	sessionController     *session.Controller
  3113  	localServerController *local.ServerController
  3114  	execer                *localexec.FakeExecer
  3115  }
  3116  
  3117  type fixtureOptions struct {
  3118  	engineMode *store.EngineMode
  3119  }
  3120  
  3121  func newTestFixture(t *testing.T, options ...fixtureOptions) *testFixture {
  3122  	controllers.InitKlog(io.Discard)
  3123  	f := tempdir.NewTempDirFixture(t)
  3124  
  3125  	engineMode := store.EngineModeUp
  3126  	for _, o := range options {
  3127  		if o.engineMode != nil {
  3128  			engineMode = *o.engineMode
  3129  		}
  3130  	}
  3131  
  3132  	base := xdg.FakeBase{Dir: f.Path()}
  3133  	log := bufsync.NewThreadSafeBuffer()
  3134  	to := tiltanalytics.NewFakeOpter(analytics.OptIn)
  3135  	ctx, _, ta := testutils.ForkedCtxAndAnalyticsWithOpterForTest(log, to)
  3136  	ctx, cancel := context.WithTimeout(ctx, 15*time.Second)
  3137  
  3138  	cdc := controllers.ProvideDeferredClient()
  3139  	sch := v1alpha1.NewScheme()
  3140  
  3141  	watcher := fsevent.NewFakeMultiWatcher()
  3142  	kClient := k8s.NewFakeK8sClient(t)
  3143  	clusterClients := cluster.NewConnectionManager()
  3144  
  3145  	timerMaker := fsevent.MakeFakeTimerMaker(t)
  3146  
  3147  	dockerClient := docker.NewFakeClient()
  3148  
  3149  	fSub := fixtureSub{ch: make(chan bool, 1000)}
  3150  	st := store.NewStore(UpperReducer, store.LogActionsFlag(false))
  3151  	require.NoError(t, st.AddSubscriber(ctx, fSub))
  3152  
  3153  	err := os.Mkdir(f.JoinPath(".git"), os.FileMode(0777))
  3154  	if err != nil {
  3155  		t.Fatal(err)
  3156  	}
  3157  
  3158  	clock := clockwork.NewRealClock()
  3159  	env := clusterid.ProductDockerDesktop
  3160  	podSource := podlogstream.NewPodSource(ctx, kClient, v1alpha1.NewScheme(), clock)
  3161  	plsc := podlogstream.NewController(ctx, cdc, sch, st, kClient, podSource, clock)
  3162  	au := engineanalytics.NewAnalyticsUpdater(ta, engineanalytics.CmdTags{}, engineMode)
  3163  	ar := engineanalytics.ProvideAnalyticsReporter(ta, st, kClient, env, feature.MainDefaults)
  3164  	fakeDcc := dockercompose.NewFakeDockerComposeClient(t, ctx)
  3165  	k8sContextPlugin := k8scontext.NewPlugin("fake-context", "default", env)
  3166  	versionPlugin := version.NewPlugin(model.TiltBuild{Version: "0.5.0"})
  3167  	configPlugin := config.NewPlugin("up")
  3168  	execer := localexec.NewFakeExecer(t)
  3169  
  3170  	extPlugin := tiltextension.NewFakePlugin(
  3171  		tiltextension.NewFakeExtRepoReconciler(f.Path()),
  3172  		tiltextension.NewFakeExtReconciler(f.Path()))
  3173  	ciSettingsPlugin := cisettings.NewPlugin(0)
  3174  	realTFL := tiltfile.ProvideTiltfileLoader(ta,
  3175  		k8sContextPlugin, versionPlugin, configPlugin, extPlugin, ciSettingsPlugin,
  3176  		fakeDcc, "localhost", execer, feature.MainDefaults, env)
  3177  	tfl := tiltfile.NewFakeTiltfileLoader()
  3178  	cc := configs.NewConfigsController(cdc)
  3179  	tqs := configs.NewTriggerQueueSubscriber(cdc)
  3180  	serverOptions, err := server.ProvideTiltServerOptionsForTesting(ctx)
  3181  	require.NoError(t, err)
  3182  	webListener, err := server.ProvideWebListener("localhost", 0)
  3183  	require.NoError(t, err)
  3184  	hudsc := server.ProvideHeadsUpServerController(
  3185  		nil, "tilt-default", webListener, serverOptions,
  3186  		&server.HeadsUpServer{}, assets.NewFakeServer(), model.WebURL{})
  3187  	ns := k8s.Namespace("default")
  3188  	rd := kubernetesdiscovery.NewContainerRestartDetector()
  3189  	kdc := kubernetesdiscovery.NewReconciler(cdc, sch, clusterClients, rd, st)
  3190  	sw := k8swatch.NewServiceWatcher(clusterClients, ns)
  3191  	ewm := k8swatch.NewEventWatchManager(clusterClients, ns)
  3192  	tcum := cloud.NewStatusManager(httptest.NewFakeClientEmptyJSON(), clock)
  3193  	fe := cmd.NewFakeExecer()
  3194  	fpm := cmd.NewFakeProberManager()
  3195  	fwc := filewatch.NewController(cdc, st, watcher.NewSub, timerMaker.Maker(), v1alpha1.NewScheme(), clock)
  3196  	cmds := cmd.NewController(ctx, fe, fpm, cdc, st, clock, v1alpha1.NewScheme())
  3197  	lsc := local.NewServerController(cdc)
  3198  	sr := ctrlsession.NewReconciler(cdc, st, clock)
  3199  	sessionController := session.NewController(sr)
  3200  	ts := hud.NewTerminalStream(hud.NewIncrementalPrinter(log), st)
  3201  	tp := prompt.NewTerminalPrompt(ta, prompt.TTYOpen, openurl.BrowserOpen,
  3202  		log, "localhost", model.WebURL{})
  3203  	h := hud.NewFakeHud()
  3204  
  3205  	uncached := controllers.UncachedObjects{}
  3206  	for _, obj := range v1alpha1.AllResourceObjects() {
  3207  		uncached = append(uncached, obj.(ctrlclient.Object))
  3208  	}
  3209  
  3210  	tscm, err := controllers.NewTiltServerControllerManager(
  3211  		serverOptions,
  3212  		sch,
  3213  		cdc,
  3214  		uncached)
  3215  	require.NoError(t, err, "Failed to create Tilt API server controller manager")
  3216  	pfr := apiportforward.NewReconciler(cdc, sch, st, clusterClients)
  3217  
  3218  	wsl := server.NewWebsocketList()
  3219  
  3220  	kar := kubernetesapply.NewReconciler(cdc, kClient, sch, st, execer)
  3221  	dcds := dockercomposeservice.NewDisableSubscriber(ctx, fakeDcc, clock)
  3222  	dcr := dockercomposeservice.NewReconciler(cdc, fakeDcc, dockerClient, st, sch, dcds)
  3223  
  3224  	tfr := ctrltiltfile.NewReconciler(st, tfl, dockerClient, cdc, sch, engineMode, "", "", 0)
  3225  	tbr := togglebutton.NewReconciler(cdc, sch)
  3226  	extr := extension.NewReconciler(cdc, sch, ta)
  3227  	extrr, err := extensionrepo.NewReconciler(cdc, st, base)
  3228  	require.NoError(t, err)
  3229  	cmr := configmap.NewReconciler(cdc, st)
  3230  
  3231  	cu := &containerupdate.FakeContainerUpdater{}
  3232  	lur := liveupdate.NewFakeReconciler(st, cu, cdc)
  3233  	dockerBuilder := build.NewDockerBuilder(dockerClient, nil)
  3234  	customBuilder := build.NewCustomBuilder(dockerClient, clock, cmds)
  3235  	kp := build.NewKINDLoader()
  3236  	ib := build.NewImageBuilder(dockerBuilder, customBuilder, kp)
  3237  	dir := dockerimage.NewReconciler(cdc, st, sch, dockerClient, ib)
  3238  	cir := cmdimage.NewReconciler(cdc, st, sch, dockerClient, ib)
  3239  	clr := cluster.NewReconciler(ctx, cdc, st, clock, clusterClients, docker.LocalEnv{},
  3240  		cluster.FakeDockerClientOrError(dockerClient, nil),
  3241  		cluster.FakeKubernetesClientOrError(kClient, nil),
  3242  		wsl, base, "tilt-default")
  3243  	dclsr := dockercomposelogstream.NewReconciler(cdc, st, fakeDcc, dockerClient)
  3244  
  3245  	cb := controllers.NewControllerBuilder(tscm, controllers.ProvideControllers(
  3246  		fwc,
  3247  		cmds,
  3248  		plsc,
  3249  		kdc,
  3250  		kar,
  3251  		ctrluisession.NewReconciler(cdc, wsl),
  3252  		ctrluiresource.NewReconciler(cdc, wsl, st),
  3253  		ctrluibutton.NewReconciler(cdc, wsl, st),
  3254  		pfr,
  3255  		tfr,
  3256  		tbr,
  3257  		extr,
  3258  		extrr,
  3259  		lur,
  3260  		cmr,
  3261  		dir,
  3262  		cir,
  3263  		clr,
  3264  		dcr,
  3265  		imagemap.NewReconciler(cdc, st),
  3266  		dclsr,
  3267  		sr,
  3268  	))
  3269  
  3270  	dp := dockerprune.NewDockerPruner(dockerClient)
  3271  	dp.DisabledForTesting(true)
  3272  
  3273  	b := newFakeBuildAndDeployer(t, kClient, fakeDcc, cdc, kar, dcr)
  3274  	bc := NewBuildController(b)
  3275  
  3276  	ret := &testFixture{
  3277  		TempDirFixture:        f,
  3278  		t:                     t,
  3279  		ctx:                   ctx,
  3280  		cancel:                cancel,
  3281  		clock:                 clock,
  3282  		b:                     b,
  3283  		fsWatcher:             watcher,
  3284  		docker:                dockerClient,
  3285  		kClient:               b.kClient,
  3286  		hud:                   h,
  3287  		ts:                    ts,
  3288  		log:                   log,
  3289  		store:                 st,
  3290  		bc:                    bc,
  3291  		onchangeCh:            fSub.ch,
  3292  		cc:                    cc,
  3293  		dcc:                   fakeDcc,
  3294  		tfl:                   tfl,
  3295  		realTFL:               realTFL,
  3296  		opter:                 to,
  3297  		dp:                    dp,
  3298  		fe:                    fe,
  3299  		fpm:                   fpm,
  3300  		ctrlClient:            cdc,
  3301  		sessionController:     sessionController,
  3302  		localServerController: lsc,
  3303  		engineMode:            engineMode,
  3304  		execer:                execer,
  3305  	}
  3306  
  3307  	ret.disableEnvAnalyticsOpt()
  3308  
  3309  	tc := telemetry.NewController(clock, tracer.NewSpanCollector(ctx))
  3310  	podm := k8srollout.NewPodMonitor(clock)
  3311  
  3312  	uss := uisession.NewSubscriber(cdc)
  3313  	urs := uiresource.NewSubscriber(cdc)
  3314  
  3315  	subs := ProvideSubscribers(hudsc, tscm, cb, h, ts, tp, sw, bc, cc, tqs, ar, au, ewm, tcum, dp, tc, lsc, podm, sessionController, uss, urs)
  3316  	ret.upper, err = NewUpper(ctx, st, subs)
  3317  	require.NoError(t, err)
  3318  
  3319  	go func() {
  3320  		err := h.Run(ctx, ret.upper.Dispatch, hud.DefaultRefreshInterval)
  3321  		testutils.FailOnNonCanceledErr(t, err, "hud.Run failed")
  3322  	}()
  3323  
  3324  	t.Cleanup(ret.TearDown)
  3325  	return ret
  3326  }
  3327  
  3328  func (f *testFixture) Now() time.Time {
  3329  	return f.clock.Now()
  3330  }
  3331  
  3332  func (f *testFixture) fakeHud() *hud.FakeHud {
  3333  	fakeHud, ok := f.hud.(*hud.FakeHud)
  3334  	if !ok {
  3335  		f.t.Fatalf("called f.fakeHud() on a test fixure without a fakeHud (instead f.hud is of type: %T", f.hud)
  3336  	}
  3337  	return fakeHud
  3338  }
  3339  
  3340  // starts the upper with the given manifests, bypassing normal tiltfile loading
  3341  func (f *testFixture) Start(manifests []model.Manifest, initOptions ...initOption) {
  3342  	f.t.Helper()
  3343  	f.setManifests(manifests)
  3344  
  3345  	ia := InitAction{
  3346  		TiltfilePath: f.JoinPath("Tiltfile"),
  3347  		TerminalMode: store.TerminalModeHUD,
  3348  		StartTime:    f.Now(),
  3349  	}
  3350  	for _, o := range initOptions {
  3351  		ia = o(ia)
  3352  	}
  3353  	f.Init(ia)
  3354  }
  3355  
  3356  func (f *testFixture) useRealTiltfileLoader() {
  3357  	f.tfl.Delegate = f.realTFL
  3358  }
  3359  
  3360  func (f *testFixture) setManifests(manifests []model.Manifest) {
  3361  	f.tfl.Result.Manifests = manifests
  3362  	f.tfl.Result = f.tfl.Result.WithAllManifestsEnabled()
  3363  }
  3364  
  3365  func (f *testFixture) setMaxParallelUpdates(n int) {
  3366  	f.overrideMaxParallelUpdates = n
  3367  
  3368  	state := f.store.LockMutableStateForTesting()
  3369  	state.UpdateSettings = state.UpdateSettings.WithMaxParallelUpdates(n)
  3370  	f.store.UnlockMutableState()
  3371  }
  3372  
  3373  func (f *testFixture) disableEnvAnalyticsOpt() {
  3374  	state := f.store.LockMutableStateForTesting()
  3375  	state.AnalyticsEnvOpt = analytics.OptDefault
  3376  	f.store.UnlockMutableState()
  3377  }
  3378  
  3379  type initOption func(ia InitAction) InitAction
  3380  
  3381  func (f *testFixture) Init(action InitAction) {
  3382  	f.t.Helper()
  3383  
  3384  	ctx, cancel := context.WithCancel(f.ctx)
  3385  	defer cancel()
  3386  
  3387  	watchFiles := f.engineMode.WatchesFiles()
  3388  	f.upperInitResult = make(chan error, 10)
  3389  
  3390  	go func() {
  3391  		err := f.upper.Init(f.ctx, action)
  3392  		if err != nil && err != context.Canceled {
  3393  			// Print this out here in case the test never completes
  3394  			log.Printf("upper exited: %v\n", err)
  3395  			f.cancel()
  3396  		}
  3397  		cancel()
  3398  
  3399  		select {
  3400  		case f.upperInitResult <- err:
  3401  		default:
  3402  			fmt.Println("writing to upperInitResult would block!")
  3403  			panic(err)
  3404  		}
  3405  		close(f.upperInitResult)
  3406  	}()
  3407  
  3408  	f.WaitUntil("tiltfile build finishes", func(st store.EngineState) bool {
  3409  		return !st.MainTiltfileState().LastBuild().Empty()
  3410  	})
  3411  
  3412  	state := f.store.LockMutableStateForTesting()
  3413  	expectedFileWatches := ctrltiltfile.ToFileWatchObjects(ctrltiltfile.WatchInputs{
  3414  		TiltfileManifestName: model.MainTiltfileManifestName,
  3415  		Manifests:            state.Manifests(),
  3416  		ConfigFiles:          state.MainConfigPaths(),
  3417  		TiltfilePath:         action.TiltfilePath,
  3418  	}, make(map[model.ManifestName]*v1alpha1.DisableSource))
  3419  	if f.overrideMaxParallelUpdates > 0 {
  3420  		state.UpdateSettings = state.UpdateSettings.WithMaxParallelUpdates(f.overrideMaxParallelUpdates)
  3421  	}
  3422  	f.store.UnlockMutableState()
  3423  
  3424  	f.PollUntil("watches set up", func() bool {
  3425  		if !watchFiles {
  3426  			return true
  3427  		}
  3428  
  3429  		// wait for FileWatch objects to exist AND have a status indicating they're running
  3430  		var fwList v1alpha1.FileWatchList
  3431  		if err := f.ctrlClient.List(ctx, &fwList); err != nil {
  3432  			// If the context was canceled but the file watches haven't been set up,
  3433  			// that's OK. Just continue executing the rest of the test.
  3434  			//
  3435  			// If the error wasn't intended, the error will be properly
  3436  			// handled in TearDown().
  3437  			if ctx.Done() != nil {
  3438  				return true
  3439  			}
  3440  
  3441  			return false
  3442  		}
  3443  
  3444  		remainingWatchNames := make(map[string]bool)
  3445  		for _, fw := range expectedFileWatches {
  3446  			remainingWatchNames[fw.GetName()] = true
  3447  		}
  3448  
  3449  		for _, fw := range fwList.Items {
  3450  			if !fw.Status.MonitorStartTime.IsZero() {
  3451  				delete(remainingWatchNames, fw.GetName())
  3452  			}
  3453  		}
  3454  		return len(remainingWatchNames) == 0
  3455  	})
  3456  }
  3457  
  3458  func (f *testFixture) Stop() error {
  3459  	f.cancel()
  3460  	err := <-f.upperInitResult
  3461  	if err == context.Canceled {
  3462  		return nil
  3463  	} else {
  3464  		return err
  3465  	}
  3466  }
  3467  
  3468  func (f *testFixture) WaitForExit() error {
  3469  	select {
  3470  	case <-time.After(stdTimeout):
  3471  		f.T().Fatalf("Timed out waiting for upper to exit")
  3472  		return nil
  3473  	case err := <-f.upperInitResult:
  3474  		return err
  3475  	}
  3476  }
  3477  
  3478  func (f *testFixture) WaitForNoExit() error {
  3479  	select {
  3480  	case <-time.After(stdTimeout):
  3481  		return nil
  3482  	case err := <-f.upperInitResult:
  3483  		f.T().Fatalf("upper exited when it shouldn't have")
  3484  		return err
  3485  	}
  3486  }
  3487  
  3488  func (f *testFixture) SetNextBuildError(err error) {
  3489  	// Before setting the nextBuildError, make sure that any in-flight builds (state.BuildStartedCount)
  3490  	// have hit the buildAndDeployer (f.b.buildCount); by the time we've incremented buildCount and
  3491  	// the fakeBaD mutex is unlocked, we've already grabbed the nextBuildError for that build,
  3492  	// so we can freely set it here for a future build.
  3493  	f.WaitUntil("any in-flight builds have hit the buildAndDeployer", func(state store.EngineState) bool {
  3494  		f.b.mu.Lock()
  3495  		defer f.b.mu.Unlock()
  3496  		return f.b.buildCount == state.BuildControllerStartCount
  3497  	})
  3498  
  3499  	_ = f.store.RLockState()
  3500  	f.b.mu.Lock()
  3501  	f.b.nextBuildError = err
  3502  	f.b.mu.Unlock()
  3503  	f.store.RUnlockState()
  3504  }
  3505  
  3506  // Wait until the given view test passes.
  3507  func (f *testFixture) WaitUntilHUD(msg string, isDone func(view.View) bool) {
  3508  	f.fakeHud().WaitUntil(f.T(), f.ctx, msg, isDone)
  3509  }
  3510  
  3511  func (f *testFixture) WaitUntilHUDResource(msg string, name model.ManifestName, isDone func(view.Resource) bool) {
  3512  	f.fakeHud().WaitUntilResource(f.T(), f.ctx, msg, name, isDone)
  3513  }
  3514  
  3515  // Wait until the given engine state test passes.
  3516  func (f *testFixture) WaitUntil(msg string, isDone func(store.EngineState) bool) {
  3517  	f.T().Helper()
  3518  
  3519  	ctx, cancel := context.WithTimeout(f.ctx, stdTimeout)
  3520  	defer cancel()
  3521  
  3522  	isCanceled := false
  3523  
  3524  	for {
  3525  		state := f.upper.store.RLockState()
  3526  		done := isDone(state)
  3527  		fatalErr := state.FatalError
  3528  		f.upper.store.RUnlockState()
  3529  		if done {
  3530  			return
  3531  		}
  3532  		if fatalErr != nil {
  3533  			f.T().Fatalf("Store had fatal error: %v", fatalErr)
  3534  		}
  3535  
  3536  		if isCanceled {
  3537  			_, _ = fmt.Fprintf(os.Stderr, "Test canceled. Dumping engine state:\n")
  3538  			encoder := store.CreateEngineStateEncoder(os.Stderr)
  3539  			require.NoError(f.T(), encoder.Encode(state))
  3540  			f.T().Fatalf("Timed out waiting for: %s", msg)
  3541  		}
  3542  
  3543  		select {
  3544  		case <-ctx.Done():
  3545  			// Let the loop run the isDone test one more time
  3546  			isCanceled = true
  3547  		case <-f.onchangeCh:
  3548  		}
  3549  	}
  3550  }
  3551  
  3552  func (f *testFixture) withState(tf func(store.EngineState)) {
  3553  	state := f.upper.store.RLockState()
  3554  	defer f.upper.store.RUnlockState()
  3555  	tf(state)
  3556  }
  3557  
  3558  func (f *testFixture) withManifestTarget(name model.ManifestName, tf func(ms store.ManifestTarget)) {
  3559  	f.withState(func(es store.EngineState) {
  3560  		mt, ok := es.ManifestTargets[name]
  3561  		if !ok {
  3562  			f.T().Fatalf("no manifest state for name %s", name)
  3563  		}
  3564  		tf(*mt)
  3565  	})
  3566  }
  3567  
  3568  func (f *testFixture) withManifestState(name model.ManifestName, tf func(ms store.ManifestState)) {
  3569  	f.withManifestTarget(name, func(mt store.ManifestTarget) {
  3570  		tf(*mt.State)
  3571  	})
  3572  }
  3573  
  3574  // Poll until the given state passes. This should be used for checking things outside
  3575  // the state loop. Don't use this to check state inside the state loop.
  3576  func (f *testFixture) PollUntil(msg string, isDone func() bool) {
  3577  	f.t.Helper()
  3578  	ctx, cancel := context.WithTimeout(f.ctx, stdTimeout)
  3579  	defer cancel()
  3580  
  3581  	ticker := time.NewTicker(10 * time.Millisecond)
  3582  	for {
  3583  		done := isDone()
  3584  		if done {
  3585  			return
  3586  		}
  3587  
  3588  		select {
  3589  		case <-ctx.Done():
  3590  			f.T().Fatalf("Timed out waiting for: %s", msg)
  3591  		case <-ticker.C:
  3592  		}
  3593  	}
  3594  }
  3595  
  3596  func (f *testFixture) WaitUntilManifest(msg string, name model.ManifestName, isDone func(store.ManifestTarget) bool) {
  3597  	f.t.Helper()
  3598  	f.WaitUntil(msg, func(es store.EngineState) bool {
  3599  		mt, ok := es.ManifestTargets[name]
  3600  		if !ok {
  3601  			return false
  3602  		}
  3603  		return isDone(*mt)
  3604  	})
  3605  }
  3606  
  3607  func (f *testFixture) WaitUntilManifestState(msg string, name model.ManifestName, isDone func(store.ManifestState) bool) {
  3608  	f.t.Helper()
  3609  	f.WaitUntilManifest(msg, name, func(mt store.ManifestTarget) bool {
  3610  		return isDone(*(mt.State))
  3611  	})
  3612  }
  3613  
  3614  // gets the args for the next BaD call and blocks until that build is reflected in EngineState
  3615  func (f *testFixture) nextCallComplete(msgAndArgs ...interface{}) buildAndDeployCall {
  3616  	f.t.Helper()
  3617  	call := f.nextCall(msgAndArgs...)
  3618  	f.waitForCompletedBuildCount(call.count)
  3619  	return call
  3620  }
  3621  
  3622  // gets the args passed to the next call to the BaDer
  3623  // note that if you're using this to block until a build happens, it only blocks until the BaDer itself finishes
  3624  // so it can return before the build has actually been processed by the upper or the EngineState reflects
  3625  // the completed build.
  3626  // using `nextCallComplete` will ensure you block until the EngineState reflects the completed build.
  3627  func (f *testFixture) nextCall(msgAndArgs ...interface{}) buildAndDeployCall {
  3628  	f.t.Helper()
  3629  	msg := "timed out waiting for BuildAndDeployCall"
  3630  	if len(msgAndArgs) > 0 {
  3631  		format := msgAndArgs[0].(string)
  3632  		args := msgAndArgs[1:]
  3633  		msg = fmt.Sprintf("%s: %s", msg, fmt.Sprintf(format, args...))
  3634  	}
  3635  
  3636  	for {
  3637  		select {
  3638  		case call := <-f.b.calls:
  3639  			return call
  3640  		case <-time.After(stdTimeout):
  3641  			f.T().Fatal(msg)
  3642  		}
  3643  	}
  3644  }
  3645  
  3646  func (f *testFixture) assertNoCall(msgAndArgs ...interface{}) {
  3647  	f.t.Helper()
  3648  	msg := "expected there to be no BuildAndDeployCalls, but found one"
  3649  	if len(msgAndArgs) > 0 {
  3650  		msg = fmt.Sprintf("expected there to be no BuildAndDeployCalls, but found one: %s", msgAndArgs...)
  3651  	}
  3652  	for {
  3653  		select {
  3654  		case call := <-f.b.calls:
  3655  			f.T().Fatalf("%s\ncall:\n%s", msg, spew.Sdump(call))
  3656  		case <-time.After(200 * time.Millisecond):
  3657  			return
  3658  		}
  3659  	}
  3660  }
  3661  
  3662  func (f *testFixture) lastDeployedUID(manifestName model.ManifestName) types.UID {
  3663  	var manifest model.Manifest
  3664  	f.withManifestTarget(manifestName, func(mt store.ManifestTarget) {
  3665  		manifest = mt.Manifest
  3666  	})
  3667  	result := f.b.resultsByID[manifest.K8sTarget().ID()]
  3668  	k8sResult, ok := result.(store.K8sBuildResult)
  3669  	if !ok {
  3670  		return ""
  3671  	}
  3672  	if len(k8sResult.DeployedRefs) > 0 {
  3673  		return k8sResult.DeployedRefs[0].UID
  3674  	}
  3675  	return ""
  3676  }
  3677  
  3678  func (f *testFixture) startPod(pod *v1.Pod, manifestName model.ManifestName) {
  3679  	f.t.Helper()
  3680  	f.podEvent(pod)
  3681  	f.WaitUntilManifestState("pod appears", manifestName, func(ms store.ManifestState) bool {
  3682  		return ms.MostRecentPod().Name == pod.Name
  3683  	})
  3684  }
  3685  
  3686  func (f *testFixture) podLog(pod *v1.Pod, manifestName model.ManifestName, s string) {
  3687  	podID := k8s.PodID(pod.Name)
  3688  	f.upper.store.Dispatch(store.NewLogAction(manifestName, k8sconv.SpanIDForPod(manifestName, podID), logger.InfoLvl, nil, []byte(s+"\n")))
  3689  
  3690  	f.WaitUntil("pod log seen", func(es store.EngineState) bool {
  3691  		ms, _ := es.ManifestState(manifestName)
  3692  		spanID := k8sconv.SpanIDForPod(manifestName, k8s.PodID(ms.MostRecentPod().Name))
  3693  		return strings.Contains(es.LogStore.SpanLog(spanID), s)
  3694  	})
  3695  }
  3696  
  3697  func (f *testFixture) restartPod(pb podbuilder.PodBuilder) podbuilder.PodBuilder {
  3698  	restartCount := pb.RestartCount() + 1
  3699  	pb = pb.WithRestartCount(restartCount)
  3700  
  3701  	f.podEvent(pb.Build())
  3702  
  3703  	f.WaitUntilManifestState("pod restart seen", pb.ManifestName(), func(ms store.ManifestState) bool {
  3704  		return store.AllPodContainerRestarts(ms.MostRecentPod()) == int32(restartCount)
  3705  	})
  3706  	return pb
  3707  }
  3708  
  3709  func (f *testFixture) notifyAndWaitForPodStatus(pod *v1.Pod, mn model.ManifestName, pred func(pod v1alpha1.Pod) bool) {
  3710  	f.podEvent(pod)
  3711  	f.WaitUntilManifestState("pod status change seen", mn, func(state store.ManifestState) bool {
  3712  		return pred(state.MostRecentPod())
  3713  	})
  3714  }
  3715  
  3716  func (f *testFixture) waitForCompletedBuildCount(count int) {
  3717  	f.t.Helper()
  3718  	f.WaitUntil(fmt.Sprintf("%d builds done", count), func(state store.EngineState) bool {
  3719  		return state.CompletedBuildCount >= count
  3720  	})
  3721  }
  3722  
  3723  func (f *testFixture) LogLines() []string {
  3724  	return strings.Split(f.log.String(), "\n")
  3725  }
  3726  
  3727  func (f *testFixture) TearDown() {
  3728  	if f.T().Failed() {
  3729  		f.withState(func(es store.EngineState) {
  3730  			fmt.Println(es.LogStore.String())
  3731  		})
  3732  	}
  3733  	close(f.fsWatcher.Events)
  3734  	close(f.fsWatcher.Errors)
  3735  	f.cancel()
  3736  
  3737  	// If the test started an Init() in a goroutine, drain it.
  3738  	if f.upperInitResult != nil {
  3739  		<-f.upperInitResult
  3740  	}
  3741  }
  3742  
  3743  func (f *testFixture) registerForDeployer(manifest model.Manifest) podbuilder.PodBuilder {
  3744  	pb := podbuilder.New(f.t, manifest)
  3745  	f.b.targetObjectTree[manifest.K8sTarget().ID()] = pb.ObjectTreeEntities()
  3746  	return pb
  3747  }
  3748  
  3749  func (f *testFixture) podEvent(pod *v1.Pod) {
  3750  	f.t.Helper()
  3751  	for _, ownerRef := range pod.OwnerReferences {
  3752  		_, err := f.kClient.GetMetaByReference(f.ctx, v1.ObjectReference{
  3753  			UID:  ownerRef.UID,
  3754  			Name: ownerRef.Name,
  3755  		})
  3756  		if err != nil {
  3757  			f.t.Logf("Owner reference uid[%s] name[%s] for pod[%s] does not exist in fake client",
  3758  				ownerRef.UID, ownerRef.Name, pod.Name)
  3759  		}
  3760  	}
  3761  
  3762  	f.kClient.UpsertPod(pod)
  3763  }
  3764  
  3765  func (f *testFixture) newManifest(name string) model.Manifest {
  3766  	iTarget := NewSanchoLiveUpdateImageTarget(f)
  3767  	return manifestbuilder.New(f, model.ManifestName(name)).
  3768  		WithK8sYAML(SanchoYAML).
  3769  		WithImageTarget(iTarget).
  3770  		Build()
  3771  }
  3772  
  3773  func (f *testFixture) newManifestWithRef(name string, ref reference.Named) model.Manifest {
  3774  	refSel := container.NewRefSelector(ref)
  3775  
  3776  	iTarget := NewSanchoLiveUpdateImageTarget(f)
  3777  	iTarget = iTarget.MustWithRef(refSel)
  3778  
  3779  	return manifestbuilder.New(f, model.ManifestName(name)).
  3780  		WithK8sYAML(SanchoYAML).
  3781  		WithImageTarget(iTarget).
  3782  		Build()
  3783  }
  3784  
  3785  func (f *testFixture) newDockerBuildManifestWithBuildPath(name string, path string) model.Manifest {
  3786  	db := v1alpha1.DockerImageSpec{DockerfileContents: "FROM alpine", Context: path}
  3787  	iTarget := NewSanchoDockerBuildImageTarget(f).WithDockerImage(db)
  3788  	iTarget = iTarget.MustWithRef(container.MustParseSelector(strings.ToLower(name))) // each target should have a unique ID
  3789  	return manifestbuilder.New(f, model.ManifestName(name)).
  3790  		WithK8sYAML(SanchoYAML).
  3791  		WithImageTarget(iTarget).
  3792  		Build()
  3793  }
  3794  
  3795  func (f *testFixture) assertAllBuildsConsumed() {
  3796  	f.t.Helper()
  3797  	close(f.b.calls)
  3798  
  3799  	for call := range f.b.calls {
  3800  		f.T().Fatalf("Build not consumed: %s", spew.Sdump(call))
  3801  	}
  3802  }
  3803  
  3804  func (f *testFixture) loadAndStart(initOptions ...initOption) {
  3805  	f.t.Helper()
  3806  	ia := InitAction{
  3807  		TiltfilePath: f.JoinPath("Tiltfile"),
  3808  		TerminalMode: store.TerminalModeHUD,
  3809  		StartTime:    f.Now(),
  3810  	}
  3811  	for _, opt := range initOptions {
  3812  		ia = opt(ia)
  3813  	}
  3814  	f.Init(ia)
  3815  }
  3816  
  3817  func (f *testFixture) WriteConfigFiles(args ...string) {
  3818  	f.t.Helper()
  3819  	if (len(args) % 2) != 0 {
  3820  		f.T().Fatalf("WriteConfigFiles needs an even number of arguments; got %d", len(args))
  3821  	}
  3822  
  3823  	for i := 0; i < len(args); i += 2 {
  3824  		filename := f.JoinPath(args[i])
  3825  		contents := args[i+1]
  3826  		f.WriteFile(filename, contents)
  3827  
  3828  		// Fire an FS event thru the normal pipeline, so that manifests get marked dirty.
  3829  		f.fsWatcher.Events <- watch.NewFileEvent(filename)
  3830  	}
  3831  }
  3832  
  3833  func (f *testFixture) setupDCFixture() (redis, server model.Manifest) {
  3834  	dcp := filepath.Join(originalWD, "testdata", "fixture_docker-config.yml")
  3835  	dcpc, err := os.ReadFile(dcp)
  3836  	if err != nil {
  3837  		f.T().Fatal(err)
  3838  	}
  3839  	f.WriteFile("docker-compose.yml", string(dcpc))
  3840  
  3841  	dfp := filepath.Join(originalWD, "testdata", "server.dockerfile")
  3842  	dfc, err := os.ReadFile(dfp)
  3843  	if err != nil {
  3844  		f.T().Fatal(err)
  3845  	}
  3846  	f.WriteFile("Dockerfile", string(dfc))
  3847  
  3848  	f.WriteFile("Tiltfile", `docker_compose('docker-compose.yml')`)
  3849  
  3850  	f.dcc.WorkDir = f.Path()
  3851  	f.dcc.ConfigOutput = string(dcpc)
  3852  
  3853  	tlr := f.realTFL.Load(f.ctx, apitiltfile.MainTiltfile(f.JoinPath("Tiltfile"), nil), nil)
  3854  	if tlr.Error != nil {
  3855  		f.T().Fatal(tlr.Error)
  3856  	}
  3857  
  3858  	if len(tlr.Manifests) != 2 {
  3859  		f.T().Fatalf("Expected two manifests. Actual: %v", tlr.Manifests)
  3860  	}
  3861  
  3862  	for _, m := range tlr.Manifests {
  3863  		require.NoError(f.t, m.InferImageProperties())
  3864  	}
  3865  
  3866  	return tlr.Manifests[0], tlr.Manifests[1]
  3867  }
  3868  
  3869  func (f *testFixture) setBuildLogOutput(id model.TargetID, output string) {
  3870  	f.b.buildLogOutput[id] = output
  3871  }
  3872  
  3873  func (f *testFixture) hudResource(name model.ManifestName) view.Resource {
  3874  	res, ok := f.fakeHud().LastView.Resource(name)
  3875  	if !ok {
  3876  		f.T().Fatalf("Resource not found: %s", name)
  3877  	}
  3878  	return res
  3879  }
  3880  
  3881  func (f *testFixture) completeBuildForManifest(m model.Manifest) {
  3882  	f.b.completeBuild(targetIDStringForManifest(m))
  3883  }
  3884  
  3885  func (f *testFixture) setDisableState(mn model.ManifestName, isDisabled bool) {
  3886  	err := tiltconfigmap.UpsertDisableConfigMap(f.ctx, f.ctrlClient, fmt.Sprintf("%s-disable", mn), "isDisabled", isDisabled)
  3887  	require.NoError(f.t, err)
  3888  
  3889  	f.WaitUntil("new disable state reflected in UIResource", func(state store.EngineState) bool {
  3890  		if uir, ok := state.UIResources[mn.String()]; ok {
  3891  			return uir.Status.DisableStatus.DisabledCount > 0 == isDisabled
  3892  		}
  3893  		return false
  3894  	})
  3895  }
  3896  
  3897  type fixtureSub struct {
  3898  	ch chan bool
  3899  }
  3900  
  3901  func (s fixtureSub) OnChange(ctx context.Context, st store.RStore, _ store.ChangeSummary) error {
  3902  	s.ch <- true
  3903  	return nil
  3904  }
  3905  
  3906  func (f *testFixture) ensureCluster() {
  3907  	f.ensureClusterNamed(v1alpha1.ClusterNameDefault)
  3908  }
  3909  
  3910  func (f *testFixture) ensureClusterNamed(name string) {
  3911  	f.t.Helper()
  3912  	err := f.ctrlClient.Create(f.ctx, &v1alpha1.Cluster{
  3913  		ObjectMeta: metav1.ObjectMeta{
  3914  			Name: name,
  3915  		},
  3916  		Spec: v1alpha1.ClusterSpec{
  3917  			Connection: &v1alpha1.ClusterConnection{
  3918  				Kubernetes: &v1alpha1.KubernetesClusterConnection{},
  3919  			},
  3920  		},
  3921  	})
  3922  	require.NoError(f.T(), err)
  3923  }
  3924  
  3925  func assertLineMatches(t *testing.T, lines []string, re *regexp.Regexp) {
  3926  	for _, line := range lines {
  3927  		if re.MatchString(line) {
  3928  			return
  3929  		}
  3930  	}
  3931  	t.Fatalf("Expected line to match: %s. Lines: %v", re.String(), lines)
  3932  }
  3933  
  3934  func assertContainsOnce(t *testing.T, s string, val string) {
  3935  	assert.Contains(t, s, val)
  3936  	assert.Equal(t, 1, strings.Count(s, val), "Expected string to appear only once")
  3937  }
  3938  
  3939  // stringifyTargetIDs attempts to make a unique string to identify any set of targets
  3940  // (order-agnostic) by sorting and then concatenating the target IDs.
  3941  func stringifyTargetIDs(targets []model.TargetSpec) string {
  3942  	ids := make([]string, len(targets))
  3943  	for i, t := range targets {
  3944  		ids[i] = t.ID().String()
  3945  	}
  3946  	sort.Strings(ids)
  3947  	return strings.Join(ids, "::")
  3948  }
  3949  
  3950  func targetIDStringForManifest(m model.Manifest) string {
  3951  	return stringifyTargetIDs(m.TargetSpecs())
  3952  }