github.com/tilt-dev/tilt@v0.36.0/internal/engine/upper_test.go (about)

     1  package engine
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"log"
     9  	"os"
    10  	"path"
    11  	"path/filepath"
    12  	"regexp"
    13  	"runtime"
    14  	"sort"
    15  	"strings"
    16  	"sync"
    17  	"testing"
    18  	"time"
    19  
    20  	"github.com/davecgh/go-spew/spew"
    21  	"github.com/distribution/reference"
    22  	dockertypes "github.com/docker/docker/api/types"
    23  	"github.com/google/uuid"
    24  	"github.com/jonboulle/clockwork"
    25  	"github.com/spf13/afero"
    26  	"github.com/stretchr/testify/assert"
    27  	"github.com/stretchr/testify/require"
    28  	v1 "k8s.io/api/core/v1"
    29  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    30  	"k8s.io/apimachinery/pkg/types"
    31  	ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
    32  
    33  	"github.com/tilt-dev/clusterid"
    34  	tiltanalytics "github.com/tilt-dev/tilt/internal/analytics"
    35  	"github.com/tilt-dev/tilt/internal/build"
    36  	"github.com/tilt-dev/tilt/internal/cloud"
    37  	"github.com/tilt-dev/tilt/internal/container"
    38  	"github.com/tilt-dev/tilt/internal/containerupdate"
    39  	"github.com/tilt-dev/tilt/internal/controllers"
    40  	apitiltfile "github.com/tilt-dev/tilt/internal/controllers/apis/tiltfile"
    41  	"github.com/tilt-dev/tilt/internal/controllers/core/cluster"
    42  	"github.com/tilt-dev/tilt/internal/controllers/core/cmd"
    43  	"github.com/tilt-dev/tilt/internal/controllers/core/cmdimage"
    44  	"github.com/tilt-dev/tilt/internal/controllers/core/configmap"
    45  	"github.com/tilt-dev/tilt/internal/controllers/core/dockercomposelogstream"
    46  	"github.com/tilt-dev/tilt/internal/controllers/core/dockercomposeservice"
    47  	"github.com/tilt-dev/tilt/internal/controllers/core/dockerimage"
    48  	"github.com/tilt-dev/tilt/internal/controllers/core/extension"
    49  	"github.com/tilt-dev/tilt/internal/controllers/core/extensionrepo"
    50  	"github.com/tilt-dev/tilt/internal/controllers/core/filewatch"
    51  	"github.com/tilt-dev/tilt/internal/controllers/core/filewatch/fsevent"
    52  	"github.com/tilt-dev/tilt/internal/controllers/core/imagemap"
    53  	"github.com/tilt-dev/tilt/internal/controllers/core/kubernetesapply"
    54  	"github.com/tilt-dev/tilt/internal/controllers/core/kubernetesdiscovery"
    55  	"github.com/tilt-dev/tilt/internal/controllers/core/liveupdate"
    56  	"github.com/tilt-dev/tilt/internal/controllers/core/podlogstream"
    57  	apiportforward "github.com/tilt-dev/tilt/internal/controllers/core/portforward"
    58  	ctrlsession "github.com/tilt-dev/tilt/internal/controllers/core/session"
    59  	ctrltiltfile "github.com/tilt-dev/tilt/internal/controllers/core/tiltfile"
    60  	"github.com/tilt-dev/tilt/internal/controllers/core/togglebutton"
    61  	ctrluibutton "github.com/tilt-dev/tilt/internal/controllers/core/uibutton"
    62  	ctrluiresource "github.com/tilt-dev/tilt/internal/controllers/core/uiresource"
    63  	ctrluisession "github.com/tilt-dev/tilt/internal/controllers/core/uisession"
    64  	"github.com/tilt-dev/tilt/internal/docker"
    65  	"github.com/tilt-dev/tilt/internal/dockercompose"
    66  	engineanalytics "github.com/tilt-dev/tilt/internal/engine/analytics"
    67  	"github.com/tilt-dev/tilt/internal/engine/buildcontrol"
    68  	"github.com/tilt-dev/tilt/internal/engine/configs"
    69  	"github.com/tilt-dev/tilt/internal/engine/dockerprune"
    70  	"github.com/tilt-dev/tilt/internal/engine/k8srollout"
    71  	"github.com/tilt-dev/tilt/internal/engine/k8swatch"
    72  	"github.com/tilt-dev/tilt/internal/engine/local"
    73  	"github.com/tilt-dev/tilt/internal/engine/session"
    74  	"github.com/tilt-dev/tilt/internal/engine/telemetry"
    75  	"github.com/tilt-dev/tilt/internal/engine/uiresource"
    76  	"github.com/tilt-dev/tilt/internal/engine/uisession"
    77  	"github.com/tilt-dev/tilt/internal/feature"
    78  	"github.com/tilt-dev/tilt/internal/hud"
    79  	"github.com/tilt-dev/tilt/internal/hud/prompt"
    80  	"github.com/tilt-dev/tilt/internal/hud/server"
    81  	"github.com/tilt-dev/tilt/internal/hud/view"
    82  	"github.com/tilt-dev/tilt/internal/k8s"
    83  	"github.com/tilt-dev/tilt/internal/k8s/kubeconfig"
    84  	"github.com/tilt-dev/tilt/internal/k8s/testyaml"
    85  	"github.com/tilt-dev/tilt/internal/localexec"
    86  	"github.com/tilt-dev/tilt/internal/openurl"
    87  	"github.com/tilt-dev/tilt/internal/store"
    88  	"github.com/tilt-dev/tilt/internal/store/buildcontrols"
    89  	"github.com/tilt-dev/tilt/internal/store/k8sconv"
    90  	"github.com/tilt-dev/tilt/internal/store/tiltfiles"
    91  	"github.com/tilt-dev/tilt/internal/testutils"
    92  	"github.com/tilt-dev/tilt/internal/testutils/bufsync"
    93  	tiltconfigmap "github.com/tilt-dev/tilt/internal/testutils/configmap"
    94  	"github.com/tilt-dev/tilt/internal/testutils/httptest"
    95  	"github.com/tilt-dev/tilt/internal/testutils/manifestbuilder"
    96  	"github.com/tilt-dev/tilt/internal/testutils/podbuilder"
    97  	"github.com/tilt-dev/tilt/internal/testutils/servicebuilder"
    98  	"github.com/tilt-dev/tilt/internal/testutils/tempdir"
    99  	"github.com/tilt-dev/tilt/internal/tiltfile"
   100  	"github.com/tilt-dev/tilt/internal/tiltfile/cisettings"
   101  	"github.com/tilt-dev/tilt/internal/tiltfile/config"
   102  	"github.com/tilt-dev/tilt/internal/tiltfile/k8scontext"
   103  	"github.com/tilt-dev/tilt/internal/tiltfile/tiltextension"
   104  	"github.com/tilt-dev/tilt/internal/tiltfile/version"
   105  	"github.com/tilt-dev/tilt/internal/token"
   106  	"github.com/tilt-dev/tilt/internal/tracer"
   107  	"github.com/tilt-dev/tilt/internal/watch"
   108  	"github.com/tilt-dev/tilt/internal/xdg"
   109  	"github.com/tilt-dev/tilt/pkg/apis"
   110  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
   111  	"github.com/tilt-dev/tilt/pkg/assets"
   112  	"github.com/tilt-dev/tilt/pkg/logger"
   113  	"github.com/tilt-dev/tilt/pkg/model"
   114  	"github.com/tilt-dev/wmclient/pkg/analytics"
   115  )
   116  
   117  var originalWD string
   118  
   119  const stdTimeout = 2 * time.Second
   120  
   121  type buildCompletionChannel chan bool
   122  
   123  func init() {
   124  	wd, err := os.Getwd()
   125  	if err != nil {
   126  		panic(err)
   127  	}
   128  	originalWD = wd
   129  }
   130  
   131  const (
   132  	simpleTiltfile = `
   133  docker_build('gcr.io/windmill-public-containers/servantes/snack', '.')
   134  k8s_yaml('snack.yaml')
   135  `
   136  	simpleYAML = testyaml.SnackYaml
   137  )
   138  
   139  // represents a single call to `BuildAndDeploy`
   140  type buildAndDeployCall struct {
   141  	count int
   142  	specs []model.TargetSpec
   143  	state store.BuildStateSet
   144  }
   145  
   146  func (c buildAndDeployCall) firstImgTarg() model.ImageTarget {
   147  	iTargs := c.imageTargets()
   148  	if len(iTargs) > 0 {
   149  		return iTargs[0]
   150  	}
   151  	return model.ImageTarget{}
   152  }
   153  
   154  func (c buildAndDeployCall) imageTargets() []model.ImageTarget {
   155  	targs := make([]model.ImageTarget, 0, len(c.specs))
   156  	for _, spec := range c.specs {
   157  		t, ok := spec.(model.ImageTarget)
   158  		if ok {
   159  			targs = append(targs, t)
   160  		}
   161  	}
   162  	return targs
   163  }
   164  
   165  func (c buildAndDeployCall) k8s() model.K8sTarget {
   166  	for _, spec := range c.specs {
   167  		t, ok := spec.(model.K8sTarget)
   168  		if ok {
   169  			return t
   170  		}
   171  	}
   172  	return model.K8sTarget{}
   173  }
   174  
   175  func (c buildAndDeployCall) dc() model.DockerComposeTarget {
   176  	for _, spec := range c.specs {
   177  		t, ok := spec.(model.DockerComposeTarget)
   178  		if ok {
   179  			return t
   180  		}
   181  	}
   182  	return model.DockerComposeTarget{}
   183  }
   184  
   185  func (c buildAndDeployCall) local() model.LocalTarget {
   186  	for _, spec := range c.specs {
   187  		t, ok := spec.(model.LocalTarget)
   188  		if ok {
   189  			return t
   190  		}
   191  	}
   192  	return model.LocalTarget{}
   193  }
   194  
   195  func (c buildAndDeployCall) dcState() store.BuildState {
   196  	return c.state[c.dc().ID()]
   197  }
   198  
   199  func (c buildAndDeployCall) k8sState() store.BuildState {
   200  	return c.state[c.k8s().ID()]
   201  }
   202  
   203  func (c buildAndDeployCall) oneImageState() store.BuildState {
   204  	imageStates := make([]store.BuildState, 0)
   205  	for k, v := range c.state {
   206  		if k.Type == model.TargetTypeImage {
   207  			imageStates = append(imageStates, v)
   208  		}
   209  	}
   210  
   211  	if len(imageStates) != 1 {
   212  		panic(fmt.Sprintf("More than one state: %v", c.state))
   213  	}
   214  	return imageStates[0]
   215  }
   216  
   217  type fakeBuildAndDeployer struct {
   218  	t     *testing.T
   219  	mu    sync.Mutex
   220  	calls chan buildAndDeployCall
   221  
   222  	completeBuildsManually bool
   223  	buildCompletionChans   sync.Map // map[string]buildCompletionChannel; close channel at buildCompletionChans[k(targs)] to
   224  	// complete the build started for targs (where k(targs) generates a unique string key for the set of targets)
   225  
   226  	buildCount int
   227  
   228  	// Inject the container ID of the container started by Docker Compose.
   229  	// If not set, we will auto-generate an ID.
   230  	nextDockerComposeContainerID    container.ID
   231  	nextDockerComposeContainerState *dockertypes.ContainerState
   232  
   233  	targetObjectTree        map[model.TargetID]podbuilder.PodObjectTree
   234  	nextDeployedUID         types.UID
   235  	nextPodTemplateSpecHash k8s.PodTemplateSpecHash
   236  
   237  	// Set this to simulate a build with no results and an error.
   238  	// Do not set this directly, use fixture.SetNextBuildError
   239  	nextBuildError error
   240  
   241  	buildLogOutput map[model.TargetID]string
   242  
   243  	resultsByID store.BuildResultSet
   244  
   245  	// kClient registers deployed entities for subsequent retrieval.
   246  	kClient  *k8s.FakeK8sClient
   247  	dcClient *dockercompose.FakeDCClient
   248  
   249  	ctrlClient ctrlclient.Client
   250  
   251  	kaReconciler *kubernetesapply.Reconciler
   252  	dcReconciler *dockercomposeservice.Reconciler
   253  }
   254  
   255  var _ buildcontrol.BuildAndDeployer = &fakeBuildAndDeployer{}
   256  
   257  func (b *fakeBuildAndDeployer) nextImageBuildResult(ctx context.Context, iTarget model.ImageTarget) (store.ImageBuildResult, error) {
   258  	var clusterNN types.NamespacedName
   259  	if iTarget.IsDockerBuild() {
   260  		clusterNN = types.NamespacedName{Name: iTarget.DockerBuildInfo().Cluster}
   261  	} else if iTarget.IsCustomBuild() {
   262  		clusterNN = types.NamespacedName{Name: iTarget.CustomBuildInfo().Cluster}
   263  	} else if iTarget.IsDockerComposeBuild() {
   264  		clusterNN = types.NamespacedName{Name: v1alpha1.ClusterNameDocker}
   265  	} else {
   266  		return store.ImageBuildResult{}, fmt.Errorf("Unknown build type. ImageTarget: %s", iTarget.ID().String())
   267  	}
   268  
   269  	if clusterNN.Name == "" {
   270  		clusterNN.Name = v1alpha1.ClusterNameDefault
   271  	}
   272  
   273  	var cluster v1alpha1.Cluster
   274  	err := b.ctrlClient.Get(ctx, clusterNN, &cluster)
   275  	if err != nil {
   276  		return store.ImageBuildResult{}, err
   277  	}
   278  	refs, err := iTarget.Refs(&cluster)
   279  	if err != nil {
   280  		return store.ImageBuildResult{}, fmt.Errorf("determining refs: %v", err)
   281  	}
   282  
   283  	tag := fmt.Sprintf("tilt-%d", b.buildCount)
   284  	localRefTagged := container.MustWithTag(refs.LocalRef(), tag)
   285  	clusterRefTagged := container.MustWithTag(refs.ClusterRef(), tag)
   286  	return store.NewImageBuildResult(iTarget.ID(), localRefTagged, clusterRefTagged), nil
   287  }
   288  
   289  func (b *fakeBuildAndDeployer) BuildAndDeploy(ctx context.Context, st store.RStore, specs []model.TargetSpec, state store.BuildStateSet) (brs store.BuildResultSet, err error) {
   290  	b.t.Helper()
   291  
   292  	b.mu.Lock()
   293  	b.buildCount++
   294  	buildKey := stringifyTargetIDs(specs)
   295  	b.registerBuild(buildKey)
   296  
   297  	if !b.completeBuildsManually {
   298  		// i.e. we should complete builds automatically: mark the build for completion now,
   299  		// so we return immediately at the end of BuildAndDeploy.
   300  		b.completeBuild(buildKey)
   301  	}
   302  
   303  	call := buildAndDeployCall{count: b.buildCount, specs: specs, state: state}
   304  	if call.dc().Empty() && call.k8s().Empty() && call.local().Empty() {
   305  		b.t.Fatalf("Invalid call: %+v", call)
   306  	}
   307  
   308  	ids := []model.TargetID{}
   309  	for _, spec := range specs {
   310  		id := spec.ID()
   311  		ids = append(ids, id)
   312  		output, ok := b.buildLogOutput[id]
   313  		if ok {
   314  			logger.Get(ctx).Infof("%s", output)
   315  		}
   316  	}
   317  
   318  	defer func() {
   319  		b.mu.Unlock()
   320  
   321  		// block until we know we're supposed to resolve this build
   322  		err2 := b.waitUntilBuildCompleted(ctx, buildKey)
   323  		if err == nil {
   324  			err = err2
   325  		}
   326  
   327  		// don't update b.calls until the end, to ensure appropriate actions have been dispatched first
   328  		select {
   329  		case b.calls <- call:
   330  		default:
   331  			b.t.Error("writing to fakeBuildAndDeployer would block. either there's a bug or the buffer size needs to be increased")
   332  		}
   333  
   334  		logger.Get(ctx).Infof("fake built %s. error: %v", ids, err)
   335  	}()
   336  
   337  	err = b.nextBuildError
   338  	b.nextBuildError = nil
   339  	if err != nil {
   340  		return nil, err
   341  	}
   342  
   343  	iTargets := model.ExtractImageTargets(specs)
   344  	fakeImageExistsCheck := func(ctx context.Context, iTarget model.ImageTarget, namedTagged reference.NamedTagged) (bool, error) {
   345  		return true, nil
   346  	}
   347  	queue, err := buildcontrol.NewImageTargetQueue(ctx, iTargets, state, fakeImageExistsCheck)
   348  	if err != nil {
   349  		return nil, err
   350  	}
   351  
   352  	err = queue.RunBuilds(func(target model.TargetSpec, depResults []store.ImageBuildResult) (store.ImageBuildResult, error) {
   353  		b.t.Helper()
   354  		iTarget := target.(model.ImageTarget)
   355  		ibr, err := b.nextImageBuildResult(ctx, iTarget)
   356  		if err != nil {
   357  			return store.ImageBuildResult{}, err
   358  		}
   359  
   360  		var im v1alpha1.ImageMap
   361  		if err := b.ctrlClient.Get(ctx, types.NamespacedName{Name: iTarget.ImageMapName()}, &im); err != nil {
   362  			return store.ImageBuildResult{}, err
   363  		}
   364  
   365  		im.Status = *ibr.ImageMapStatus.DeepCopy()
   366  		buildStartTime := apis.NowMicro()
   367  		im.Status.BuildStartTime = &buildStartTime
   368  
   369  		if err := b.ctrlClient.Status().Update(ctx, &im); err != nil {
   370  			return store.ImageBuildResult{}, err
   371  		}
   372  
   373  		return ibr, nil
   374  	})
   375  	result := queue.NewResults().ToBuildResultSet()
   376  	if err != nil {
   377  		return result, err
   378  	}
   379  
   380  	if !call.dc().Empty() {
   381  		dcContainerID := container.ID(fmt.Sprintf("dc-%s", path.Base(call.dc().ID().Name.String())))
   382  		if b.nextDockerComposeContainerID != "" {
   383  			dcContainerID = b.nextDockerComposeContainerID
   384  		}
   385  		b.dcClient.ContainerIDDefault = dcContainerID
   386  
   387  		err = b.updateDockerComposeServiceStatus(ctx, call.dc(), iTargets)
   388  		if err != nil {
   389  			return result, err
   390  		}
   391  
   392  		dcContainerState := b.nextDockerComposeContainerState
   393  		result[call.dc().ID()] = store.NewDockerComposeDeployResult(
   394  			call.dc().ID(), dockercompose.ToServiceStatus(dcContainerID, string(dcContainerID), dcContainerState, nil))
   395  	}
   396  
   397  	if kTarg := call.k8s(); !kTarg.Empty() {
   398  		nextK8sResult := b.nextK8sDeployResult(kTarg)
   399  		err = b.updateKubernetesApplyStatus(ctx, kTarg, iTargets)
   400  		if err != nil {
   401  			return result, err
   402  		}
   403  		result[call.k8s().ID()] = nextK8sResult
   404  	}
   405  
   406  	b.nextDockerComposeContainerID = ""
   407  
   408  	for key, val := range result {
   409  		b.resultsByID[key] = val
   410  	}
   411  
   412  	return result, nil
   413  }
   414  
   415  func (b *fakeBuildAndDeployer) updateKubernetesApplyStatus(ctx context.Context, kTarg model.K8sTarget, iTargets []model.ImageTarget) error {
   416  	imageMapSet := make(map[types.NamespacedName]*v1alpha1.ImageMap, len(kTarg.ImageMaps))
   417  	for _, iTarget := range iTargets {
   418  		if iTarget.IsLiveUpdateOnly {
   419  			continue
   420  		}
   421  
   422  		var im v1alpha1.ImageMap
   423  		nn := types.NamespacedName{Name: iTarget.ImageMapName()}
   424  		err := b.ctrlClient.Get(ctx, nn, &im)
   425  		if err != nil {
   426  			return err
   427  		}
   428  		imageMapSet[nn] = &im
   429  	}
   430  
   431  	clusterName := kTarg.KubernetesApplySpec.Cluster
   432  	if clusterName == "" {
   433  		clusterName = v1alpha1.ClusterNameDefault
   434  	}
   435  
   436  	var cluster v1alpha1.Cluster
   437  	err := b.ctrlClient.Get(ctx, types.NamespacedName{Name: clusterName}, &cluster)
   438  	if err != nil {
   439  		return err
   440  	}
   441  
   442  	nn := types.NamespacedName{Name: kTarg.ID().Name.String()}
   443  	status := b.kaReconciler.ForceApply(ctx, nn, kTarg.KubernetesApplySpec, &cluster, imageMapSet)
   444  
   445  	// We want our fake stub to only propagate apiserver problems.
   446  	_ = status
   447  
   448  	return nil
   449  }
   450  
   451  func (b *fakeBuildAndDeployer) updateDockerComposeServiceStatus(ctx context.Context, dcTarg model.DockerComposeTarget, iTargets []model.ImageTarget) error {
   452  	imageMapSet := make(map[types.NamespacedName]*v1alpha1.ImageMap, len(dcTarg.Spec.ImageMaps))
   453  	for _, iTarget := range iTargets {
   454  		if iTarget.IsLiveUpdateOnly {
   455  			continue
   456  		}
   457  
   458  		var im v1alpha1.ImageMap
   459  		nn := types.NamespacedName{Name: iTarget.ImageMapName()}
   460  		err := b.ctrlClient.Get(ctx, nn, &im)
   461  		if err != nil {
   462  			return err
   463  		}
   464  		imageMapSet[nn] = &im
   465  	}
   466  
   467  	nn := types.NamespacedName{Name: dcTarg.ID().Name.String()}
   468  	status := b.dcReconciler.ForceApply(ctx, nn, dcTarg.Spec, imageMapSet, false)
   469  
   470  	// We want our fake stub to only propagate apiserver problems.
   471  	_ = status
   472  
   473  	return nil
   474  }
   475  
   476  func (b *fakeBuildAndDeployer) nextK8sDeployResult(kTarg model.K8sTarget) store.K8sBuildResult {
   477  	var err error
   478  	var deployed []k8s.K8sEntity
   479  
   480  	explicitDeploymentEntities := b.targetObjectTree[kTarg.ID()]
   481  	if len(explicitDeploymentEntities) != 0 {
   482  		if b.nextDeployedUID != "" {
   483  			b.t.Fatalf("Cannot set both explicit deployed entities + next deployed UID")
   484  		}
   485  		if b.nextPodTemplateSpecHash != "" {
   486  			b.t.Fatalf("Cannot set both explicit deployed entities + next pod template spec hashes")
   487  		}
   488  
   489  		// register Deployment + ReplicaSet so that other parts of the system can properly retrieve them
   490  		b.kClient.Inject(
   491  			explicitDeploymentEntities.Deployment(),
   492  			explicitDeploymentEntities.ReplicaSet())
   493  
   494  		// only return the Deployment entity as deployed since the ReplicaSet + Pod are created implicitly,
   495  		// i.e. they are not returned in a normal apply call for a Deployment
   496  		deployed = []k8s.K8sEntity{explicitDeploymentEntities.Deployment()}
   497  	} else {
   498  		deployed, err = k8s.ParseYAMLFromString(kTarg.YAML)
   499  		require.NoError(b.t, err)
   500  
   501  		for i := 0; i < len(deployed); i++ {
   502  			uid := types.UID(uuid.New().String())
   503  			if b.nextDeployedUID != "" {
   504  				uid = b.nextDeployedUID
   505  				b.nextDeployedUID = ""
   506  			}
   507  			deployed[i].SetUID(string(uid))
   508  		}
   509  
   510  		for i, e := range deployed {
   511  			if b.nextPodTemplateSpecHash != "" {
   512  				e = e.DeepCopy()
   513  				templateSpecs, err := k8s.ExtractPodTemplateSpec(&e)
   514  				require.NoError(b.t, err)
   515  				for _, ts := range templateSpecs {
   516  					ts.Labels = map[string]string{k8s.TiltPodTemplateHashLabel: string(b.nextPodTemplateSpecHash)}
   517  				}
   518  				deployed[i] = e
   519  			} else {
   520  				deployed[i], err = k8s.InjectPodTemplateSpecHashes(e)
   521  				require.NoError(b.t, err)
   522  			}
   523  		}
   524  	}
   525  
   526  	resultYAML, err := k8s.SerializeSpecYAML(deployed)
   527  	require.NoError(b.t, err)
   528  
   529  	b.kClient.UpsertResult = deployed
   530  
   531  	filter, err := k8sconv.NewKubernetesApplyFilter(resultYAML)
   532  	require.NoError(b.t, err)
   533  	return store.NewK8sDeployResult(kTarg.ID(), filter)
   534  }
   535  
   536  func (b *fakeBuildAndDeployer) getOrCreateBuildCompletionChannel(key string) buildCompletionChannel {
   537  	ch := make(buildCompletionChannel)
   538  	val, _ := b.buildCompletionChans.LoadOrStore(key, ch)
   539  
   540  	var ok bool
   541  	ch, ok = val.(buildCompletionChannel)
   542  	if !ok {
   543  		panic(fmt.Sprintf("expected map value of type: buildCompletionChannel, got %T", val))
   544  	}
   545  
   546  	return ch
   547  }
   548  
   549  func (b *fakeBuildAndDeployer) registerBuild(key string) {
   550  	b.getOrCreateBuildCompletionChannel(key)
   551  }
   552  
   553  func (b *fakeBuildAndDeployer) waitUntilBuildCompleted(ctx context.Context, key string) error {
   554  	ch := b.getOrCreateBuildCompletionChannel(key)
   555  
   556  	defer b.buildCompletionChans.Delete(key)
   557  
   558  	// wait until channel for this build is closed, or context is canceled/finished.
   559  	select {
   560  	case <-ch:
   561  		return nil
   562  	case <-ctx.Done():
   563  		return ctx.Err()
   564  	}
   565  }
   566  
   567  func newFakeBuildAndDeployer(t *testing.T, kClient *k8s.FakeK8sClient, dcClient *dockercompose.FakeDCClient, ctrlClient ctrlclient.Client, kaReconciler *kubernetesapply.Reconciler, dcReconciler *dockercomposeservice.Reconciler) *fakeBuildAndDeployer {
   568  	return &fakeBuildAndDeployer{
   569  		t:                t,
   570  		calls:            make(chan buildAndDeployCall, 20),
   571  		buildLogOutput:   make(map[model.TargetID]string),
   572  		resultsByID:      store.BuildResultSet{},
   573  		kClient:          kClient,
   574  		dcClient:         dcClient,
   575  		ctrlClient:       ctrlClient,
   576  		kaReconciler:     kaReconciler,
   577  		dcReconciler:     dcReconciler,
   578  		targetObjectTree: make(map[model.TargetID]podbuilder.PodObjectTree),
   579  	}
   580  }
   581  
   582  func (b *fakeBuildAndDeployer) completeBuild(key string) {
   583  	ch := b.getOrCreateBuildCompletionChannel(key)
   584  	close(ch)
   585  }
   586  
   587  func TestUpper_Up(t *testing.T) {
   588  	f := newTestFixture(t)
   589  	manifest := f.newManifest("foobar")
   590  
   591  	f.setManifests([]model.Manifest{manifest})
   592  
   593  	storeErr := make(chan error, 1)
   594  	go func() {
   595  		storeErr <- f.upper.Init(f.ctx, InitAction{
   596  			TiltfilePath: f.JoinPath("Tiltfile"),
   597  			StartTime:    f.Now(),
   598  		})
   599  	}()
   600  
   601  	call := f.nextCallComplete()
   602  	assert.Equal(t, manifest.K8sTarget().ID(), call.k8s().ID())
   603  	close(f.b.calls)
   604  
   605  	// cancel the context to simulate a Ctrl-C
   606  	f.cancel()
   607  	err := <-storeErr
   608  	if assert.NotNil(t, err, "Store returned nil error (expected context canceled)") {
   609  		assert.Contains(t, err.Error(), context.Canceled.Error(), "Store error was not as expected")
   610  	}
   611  
   612  	state := f.upper.store.RLockState()
   613  	defer f.upper.store.RUnlockState()
   614  
   615  	buildRecord := state.ManifestTargets[manifest.Name].Status().LastBuild()
   616  	lines := strings.Split(state.LogStore.SpanLog(buildRecord.SpanID), "\n")
   617  	assertLineMatches(t, lines, regexp.MustCompile("fake built .*foobar"))
   618  }
   619  
   620  func TestUpper_UpK8sEntityOrdering(t *testing.T) {
   621  	f := newTestFixture(t, fixtureOptions{engineMode: &store.EngineModeCI})
   622  	f.useRealTiltfileLoader()
   623  
   624  	postgresEntities, err := k8s.ParseYAMLFromString(testyaml.PostgresYAML)
   625  	require.NoError(t, err)
   626  	yaml, err := k8s.SerializeSpecYAML(postgresEntities[:3]) // only take entities that don't belong to a workload
   627  	require.NoError(t, err)
   628  	f.WriteFile("Tiltfile", `k8s_yaml('postgres.yaml')`)
   629  	f.WriteFile("postgres.yaml", yaml)
   630  
   631  	storeErr := make(chan error, 1)
   632  	go func() {
   633  		storeErr <- f.upper.Init(f.ctx, InitAction{
   634  			TiltfilePath: f.JoinPath("Tiltfile"),
   635  			StartTime:    f.Now(),
   636  		})
   637  	}()
   638  
   639  	call := f.nextCallComplete()
   640  	entities, err := k8s.ParseYAMLFromString(call.k8s().YAML)
   641  	require.NoError(t, err)
   642  	expectedKindOrder := []string{"PersistentVolume", "PersistentVolumeClaim", "ConfigMap"}
   643  	actualKindOrder := make([]string, len(entities))
   644  	for i, e := range entities {
   645  		actualKindOrder[i] = e.GVK().Kind
   646  	}
   647  	assert.Equal(t, expectedKindOrder, actualKindOrder,
   648  		"YAML on the manifest should be in sorted order")
   649  
   650  	f.assertAllBuildsConsumed()
   651  	require.NoError(t, <-storeErr)
   652  }
   653  
   654  func TestUpper_CI(t *testing.T) {
   655  	f := newTestFixture(t, fixtureOptions{engineMode: &store.EngineModeCI})
   656  
   657  	manifest := f.newManifest("foobar")
   658  	pb := f.registerForDeployer(manifest)
   659  	f.setManifests([]model.Manifest{manifest})
   660  
   661  	storeErr := make(chan error, 1)
   662  	go func() {
   663  		storeErr <- f.upper.Init(f.ctx, InitAction{
   664  			TiltfilePath: f.JoinPath("Tiltfile"),
   665  			UserArgs:     nil, // equivalent to `tilt up --watch=false` (i.e. not specifying any manifest names)
   666  			StartTime:    f.Now(),
   667  		})
   668  	}()
   669  
   670  	call := f.nextCallComplete()
   671  	close(f.b.calls)
   672  	assert.Equal(t, "foobar", call.k8s().ID().Name.String())
   673  
   674  	f.startPod(pb.WithPhase(string(v1.PodRunning)).Build(), manifest.Name)
   675  	require.NoError(t, <-storeErr)
   676  }
   677  
   678  func TestFirstBuildFails_Up(t *testing.T) {
   679  	if runtime.GOOS == "windows" {
   680  		t.Skip("flaky on windows")
   681  	}
   682  
   683  	f := newTestFixture(t)
   684  	manifest := f.newManifest("foobar")
   685  	f.SetNextBuildError(errors.New("Build failed"))
   686  
   687  	f.Start([]model.Manifest{manifest})
   688  
   689  	call := f.nextCall()
   690  	assert.True(t, call.oneImageState().IsEmpty())
   691  
   692  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("a.go"))
   693  
   694  	call = f.nextCall()
   695  	assert.True(t, call.oneImageState().IsEmpty())
   696  	assert.Equal(t, []string{f.JoinPath("a.go")}, call.oneImageState().FilesChanged())
   697  
   698  	err := f.Stop()
   699  	assert.NoError(t, err)
   700  	f.assertAllBuildsConsumed()
   701  }
   702  
   703  func TestFirstBuildCancels_Up(t *testing.T) {
   704  	f := newTestFixture(t)
   705  	manifest := f.newManifest("foobar")
   706  	f.SetNextBuildError(context.Canceled)
   707  
   708  	f.Start([]model.Manifest{manifest})
   709  
   710  	call := f.nextCall()
   711  	assert.True(t, call.oneImageState().IsEmpty())
   712  
   713  	err := f.Stop()
   714  	assert.NoError(t, err)
   715  	f.assertAllBuildsConsumed()
   716  }
   717  
   718  func TestFirstBuildFails_CI(t *testing.T) {
   719  	f := newTestFixture(t, fixtureOptions{engineMode: &store.EngineModeCI})
   720  	manifest := f.newManifest("foobar")
   721  	buildFailedToken := errors.New("doesn't compile")
   722  	f.SetNextBuildError(buildFailedToken)
   723  
   724  	f.setManifests([]model.Manifest{manifest})
   725  	f.Init(InitAction{
   726  		TiltfilePath: f.JoinPath("Tiltfile"),
   727  		TerminalMode: store.TerminalModeHUD,
   728  		StartTime:    f.Now(),
   729  	})
   730  
   731  	f.WaitUntilManifestState("build has failed", manifest.ManifestName(), func(st store.ManifestState) bool {
   732  		return st.LastBuild().Error != nil
   733  	})
   734  
   735  	select {
   736  	case err := <-f.upperInitResult:
   737  		require.NotNil(t, err)
   738  		assert.Contains(t, err.Error(), "doesn't compile")
   739  	case <-time.After(stdTimeout):
   740  		t.Fatal("Timed out waiting for exit action")
   741  	}
   742  
   743  	f.withState(func(es store.EngineState) {
   744  		assert.True(t, es.ExitSignal)
   745  	})
   746  }
   747  
   748  func TestCIIgnoresDisabledResources(t *testing.T) {
   749  	f := newTestFixture(t, fixtureOptions{engineMode: &store.EngineModeCI})
   750  
   751  	m1 := f.newManifest("m1")
   752  	pb := f.registerForDeployer(m1)
   753  	m2 := f.newManifest("m2")
   754  	f.setManifests([]model.Manifest{m1, m2})
   755  	f.tfl.Result.EnabledManifests = []model.ManifestName{m1.Name}
   756  
   757  	storeErr := make(chan error, 1)
   758  	go func() {
   759  		storeErr <- f.upper.Init(f.ctx, InitAction{
   760  			TiltfilePath: f.JoinPath("Tiltfile"),
   761  			StartTime:    f.Now(),
   762  		})
   763  	}()
   764  
   765  	call := f.nextCallComplete()
   766  	close(f.b.calls)
   767  	assert.Equal(t, "m1", call.k8s().ID().Name.String())
   768  
   769  	f.startPod(pb.WithPhase(string(v1.PodRunning)).Build(), m1.Name)
   770  	require.NoError(t, <-storeErr)
   771  }
   772  
   773  func TestConfigFileChangeClearsBuildStateToForceImageBuild(t *testing.T) {
   774  	f := newTestFixture(t)
   775  	f.useRealTiltfileLoader()
   776  
   777  	f.WriteFile("Tiltfile", `
   778  docker_build('gcr.io/windmill-public-containers/servantes/snack', '.', live_update=[sync('.', '/app')])
   779  k8s_yaml('snack.yaml')
   780  	`)
   781  	f.WriteFile("Dockerfile", `FROM iron/go:prod`)
   782  	f.WriteFile("snack.yaml", simpleYAML)
   783  
   784  	f.loadAndStart()
   785  
   786  	// First call: with the old manifest
   787  	call := f.nextCall("old manifest")
   788  	assert.Equal(t, `FROM iron/go:prod`, call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   789  
   790  	f.WriteConfigFiles("Dockerfile", `FROM iron/go:dev`)
   791  
   792  	// Second call: new manifest!
   793  	call = f.nextCall("new manifest")
   794  	assert.Equal(t, "FROM iron/go:dev", call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   795  	assert.Equal(t, testyaml.SnackYAMLPostConfig, call.k8s().YAML)
   796  
   797  	// Since the manifest changed, we cleared the previous build state to force an image build
   798  	// (i.e. check that we called BuildAndDeploy with no pre-existing state)
   799  	assert.False(t, call.oneImageState().HasLastResult())
   800  
   801  	err := f.Stop()
   802  	assert.NoError(t, err)
   803  	f.assertAllBuildsConsumed()
   804  }
   805  
   806  func TestMultipleChangesOnlyDeployOneManifest(t *testing.T) {
   807  	f := newTestFixture(t)
   808  	f.useRealTiltfileLoader()
   809  
   810  	f.WriteFile("Tiltfile", `
   811  # ensure builds happen in deterministic order
   812  update_settings(max_parallel_updates=1)
   813  
   814  docker_build("gcr.io/windmill-public-containers/servantes/snack", "./snack", dockerfile="Dockerfile1")
   815  docker_build("gcr.io/windmill-public-containers/servantes/doggos", "./doggos", dockerfile="Dockerfile2")
   816  
   817  k8s_yaml(['snack.yaml', 'doggos.yaml'])
   818  k8s_resource('snack', new_name='baz')
   819  k8s_resource('doggos', new_name='quux')
   820  `)
   821  	f.WriteFile("snack.yaml", simpleYAML)
   822  	f.WriteFile("Dockerfile1", `FROM iron/go:prod`)
   823  	f.WriteFile("Dockerfile2", `FROM iron/go:prod`)
   824  	f.WriteFile("doggos.yaml", testyaml.DoggosDeploymentYaml)
   825  
   826  	f.loadAndStart()
   827  
   828  	// First call: with the old manifests
   829  	call := f.nextCall("old manifest (baz)")
   830  	assert.Equal(t, `FROM iron/go:prod`, call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   831  	assert.Equal(t, "baz", string(call.k8s().Name))
   832  
   833  	call = f.nextCall("old manifest (quux)")
   834  	assert.Equal(t, `FROM iron/go:prod`, call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   835  	assert.Equal(t, "quux", string(call.k8s().Name))
   836  
   837  	// rewrite the dockerfiles
   838  	f.WriteConfigFiles(
   839  		"Dockerfile1", `FROM iron/go:dev1`,
   840  		"Dockerfile2", "FROM iron/go:dev2")
   841  
   842  	// Builds triggered by config file changes
   843  	call = f.nextCall("manifest from config files (baz)")
   844  	assert.Equal(t, `FROM iron/go:dev1`, call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   845  	assert.Equal(t, "baz", string(call.k8s().Name))
   846  
   847  	call = f.nextCall("manifest from config files (quux)")
   848  	assert.Equal(t, `FROM iron/go:dev2`, call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   849  	assert.Equal(t, "quux", string(call.k8s().Name))
   850  
   851  	// Now change (only one) dockerfile
   852  	f.WriteConfigFiles("Dockerfile1", `FROM node:10`)
   853  
   854  	// Second call: one new manifest!
   855  	call = f.nextCall("changed config file --> new manifest")
   856  
   857  	assert.Equal(t, "baz", string(call.k8s().Name))
   858  	assert.ElementsMatch(t, []string{}, call.oneImageState().FilesChanged())
   859  
   860  	// Since the manifest changed, we cleared the previous build state to force an image build
   861  	assert.False(t, call.oneImageState().HasLastResult())
   862  
   863  	// Importantly the other manifest, quux, is _not_ called -- the DF change didn't affect its manifest
   864  	err := f.Stop()
   865  	assert.Nil(t, err)
   866  	f.assertAllBuildsConsumed()
   867  }
   868  
   869  func TestSecondResourceIsBuilt(t *testing.T) {
   870  	f := newTestFixture(t)
   871  	f.useRealTiltfileLoader()
   872  
   873  	f.WriteFile("Tiltfile", `
   874  docker_build("gcr.io/windmill-public-containers/servantes/snack", "./snack", dockerfile="Dockerfile1")
   875  
   876  k8s_yaml('snack.yaml')
   877  k8s_resource('snack', new_name='baz')  # rename "snack" --> "baz"
   878  `)
   879  	f.WriteFile("snack.yaml", simpleYAML)
   880  	f.WriteFile("Dockerfile1", `FROM iron/go:dev1`)
   881  	f.WriteFile("Dockerfile2", `FROM iron/go:dev2`)
   882  	f.WriteFile("doggos.yaml", testyaml.DoggosDeploymentYaml)
   883  
   884  	f.loadAndStart()
   885  
   886  	// First call: with one resource
   887  	call := f.nextCall("old manifest (baz)")
   888  	assert.Equal(t, "FROM iron/go:dev1", call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   889  	assert.Equal(t, "baz", string(call.k8s().Name))
   890  
   891  	// Now add a second resource
   892  	f.WriteConfigFiles("Tiltfile", `
   893  docker_build("gcr.io/windmill-public-containers/servantes/snack", "./snack", dockerfile="Dockerfile1")
   894  docker_build("gcr.io/windmill-public-containers/servantes/doggos", "./doggos", dockerfile="Dockerfile2")
   895  
   896  k8s_yaml(['snack.yaml', 'doggos.yaml'])
   897  k8s_resource('snack', new_name='baz')  # rename "snack" --> "baz"
   898  k8s_resource('doggos', new_name='quux')  # rename "doggos" --> "quux"
   899  `)
   900  
   901  	// Expect a build of quux, the new resource
   902  	call = f.nextCall("changed config file --> new manifest")
   903  	assert.Equal(t, "quux", string(call.k8s().Name))
   904  	assert.ElementsMatch(t, []string{}, call.oneImageState().FilesChanged())
   905  
   906  	err := f.Stop()
   907  	assert.Nil(t, err)
   908  	f.assertAllBuildsConsumed()
   909  }
   910  
   911  func TestConfigChange_NoOpChange(t *testing.T) {
   912  	f := newTestFixture(t)
   913  	f.useRealTiltfileLoader()
   914  
   915  	f.WriteFile("Tiltfile", `
   916  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile')
   917  k8s_yaml('snack.yaml')`)
   918  	f.WriteFile("Dockerfile", `FROM iron/go:dev1`)
   919  	f.WriteFile("snack.yaml", simpleYAML)
   920  	f.WriteFile("src/main.go", "hello")
   921  
   922  	f.loadAndStart()
   923  
   924  	// First call: with the old manifests
   925  	call := f.nextCall("initial call")
   926  	assert.Equal(t, "FROM iron/go:dev1", call.firstImgTarg().DockerBuildInfo().DockerfileContents)
   927  	assert.Equal(t, "snack", string(call.k8s().Name))
   928  
   929  	// Write same contents to Dockerfile -- an "edit" event for a config file,
   930  	// but it doesn't change the manifest at all.
   931  	f.WriteConfigFiles("Dockerfile", `FROM iron/go:dev1`)
   932  	f.assertNoCall("Dockerfile hasn't changed, so there shouldn't be any builds")
   933  
   934  	// Second call: Editing the Dockerfile means we have to reevaluate the Tiltfile.
   935  	// Editing the random file means we have to do a rebuild. BUT! The Dockerfile
   936  	// hasn't changed, so the manifest hasn't changed, so we can do an incremental build.
   937  	changed := f.WriteFile("src/main.go", "goodbye")
   938  	f.fsWatcher.Events <- watch.NewFileEvent(changed)
   939  
   940  	call = f.nextCall("build from file change")
   941  	assert.Equal(t, "snack", string(call.k8s().Name))
   942  	assert.ElementsMatch(t, []string{
   943  		f.JoinPath("src/main.go"),
   944  	}, call.oneImageState().FilesChanged())
   945  	assert.True(t, call.oneImageState().HasLastResult(), "Unchanged manifest --> we do NOT clear the build state")
   946  
   947  	err := f.Stop()
   948  	assert.Nil(t, err)
   949  	f.assertAllBuildsConsumed()
   950  }
   951  
   952  func TestConfigChange_TiltfileErrorAndFixWithNoChanges(t *testing.T) {
   953  	f := newTestFixture(t)
   954  	f.useRealTiltfileLoader()
   955  
   956  	origTiltfile := `
   957  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile')
   958  k8s_yaml('snack.yaml')`
   959  	f.WriteFile("Tiltfile", origTiltfile)
   960  	f.WriteFile("Dockerfile", `FROM iron/go:dev`)
   961  	f.WriteFile("snack.yaml", simpleYAML)
   962  
   963  	f.loadAndStart()
   964  
   965  	// First call: all is well
   966  	_ = f.nextCall("first call")
   967  
   968  	// Second call: change Tiltfile, break manifest
   969  	f.WriteConfigFiles("Tiltfile", "broken")
   970  	f.WaitUntil("tiltfile error set", func(st store.EngineState) bool {
   971  		return st.LastMainTiltfileError() != nil
   972  	})
   973  	f.assertNoCall("Tiltfile error should prevent BuildAndDeploy from being called")
   974  
   975  	// Third call: put Tiltfile back. No change to manifest or to synced files, so expect no build.
   976  	f.WriteConfigFiles("Tiltfile", origTiltfile)
   977  	f.WaitUntil("tiltfile error cleared", func(st store.EngineState) bool {
   978  		return st.LastMainTiltfileError() == nil
   979  	})
   980  
   981  	f.withState(func(state store.EngineState) {
   982  		assert.Equal(t, "", buildcontrol.NextManifestNameToBuild(state).String())
   983  	})
   984  }
   985  
   986  func TestConfigChange_TiltfileErrorAndFixWithFileChange(t *testing.T) {
   987  	f := newTestFixture(t)
   988  	f.useRealTiltfileLoader()
   989  
   990  	tiltfileWithCmd := func(cmd string) string {
   991  		return fmt.Sprintf(`
   992  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile',
   993      live_update=[
   994          sync('./src', '/src'),
   995          run('%s')
   996      ]
   997  )
   998  k8s_yaml('snack.yaml')
   999  `, cmd)
  1000  	}
  1001  
  1002  	f.WriteFile("Tiltfile", tiltfileWithCmd("original"))
  1003  	f.WriteFile("Dockerfile", `FROM iron/go:dev`)
  1004  	f.WriteFile("snack.yaml", simpleYAML)
  1005  
  1006  	f.loadAndStart()
  1007  
  1008  	// First call: all is well
  1009  	_ = f.nextCall("first call")
  1010  
  1011  	// Second call: change Tiltfile, break manifest
  1012  	f.WriteConfigFiles("Tiltfile", "broken")
  1013  	f.WaitUntil("tiltfile error set", func(st store.EngineState) bool {
  1014  		return st.LastMainTiltfileError() != nil
  1015  	})
  1016  
  1017  	f.assertNoCall("Tiltfile error should prevent BuildAndDeploy from being called")
  1018  
  1019  	// Third call: put Tiltfile back. manifest changed, so expect a build
  1020  	f.WriteConfigFiles("Tiltfile", tiltfileWithCmd("changed"))
  1021  
  1022  	call := f.nextCall("fixed broken config and rebuilt manifest")
  1023  	assert.False(t, call.oneImageState().HasLastResult(),
  1024  		"expected this call to have NO image (since we should have cleared it to force an image build)")
  1025  
  1026  	f.WaitUntil("tiltfile error cleared", func(state store.EngineState) bool {
  1027  		return state.LastMainTiltfileError() == nil
  1028  	})
  1029  
  1030  	f.withManifestTarget("snack", func(mt store.ManifestTarget) {
  1031  		assert.Equal(t,
  1032  			model.ToUnixCmd("changed").Argv,
  1033  			mt.Manifest.ImageTargetAt(0).LiveUpdateSpec.Execs[0].Args,
  1034  			"Tiltfile change should have propagated to manifest")
  1035  	})
  1036  
  1037  	err := f.Stop()
  1038  	assert.Nil(t, err)
  1039  	f.assertAllBuildsConsumed()
  1040  }
  1041  
  1042  func TestConfigChange_TriggerModeChangePropagatesButDoesntInvalidateBuild(t *testing.T) {
  1043  	f := newTestFixture(t)
  1044  	f.useRealTiltfileLoader()
  1045  
  1046  	origTiltfile := `
  1047  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile')
  1048  k8s_yaml('snack.yaml')`
  1049  	f.WriteFile("Tiltfile", origTiltfile)
  1050  	f.WriteFile("Dockerfile", `FROM iron/go:dev1`)
  1051  	f.WriteFile("snack.yaml", simpleYAML)
  1052  
  1053  	f.loadAndStart()
  1054  
  1055  	_ = f.nextCall("initial build")
  1056  	f.WaitUntilManifest("manifest has triggerMode = auto (default)", "snack", func(mt store.ManifestTarget) bool {
  1057  		return mt.Manifest.TriggerMode == model.TriggerModeAuto
  1058  	})
  1059  
  1060  	// Update Tiltfile to change the trigger mode of the manifest
  1061  	tiltfileWithTriggerMode := fmt.Sprintf(`%s
  1062  
  1063  trigger_mode(TRIGGER_MODE_MANUAL)`, origTiltfile)
  1064  	f.WriteConfigFiles("Tiltfile", tiltfileWithTriggerMode)
  1065  
  1066  	f.assertNoCall("A change to TriggerMode shouldn't trigger an update (doesn't invalidate current build)")
  1067  	f.WaitUntilManifest("triggerMode has changed on manifest", "snack", func(mt store.ManifestTarget) bool {
  1068  		return mt.Manifest.TriggerMode == model.TriggerModeManualWithAutoInit
  1069  	})
  1070  
  1071  	err := f.Stop()
  1072  	assert.Nil(t, err)
  1073  	f.assertAllBuildsConsumed()
  1074  }
  1075  
  1076  func TestConfigChange_ManifestWithPendingChangesBuildsIfTriggerModeChangedToAuto(t *testing.T) {
  1077  	f := newTestFixture(t)
  1078  	f.useRealTiltfileLoader()
  1079  
  1080  	baseTiltfile := `trigger_mode(%s)
  1081  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile')
  1082  k8s_yaml('snack.yaml')`
  1083  	triggerManualTiltfile := fmt.Sprintf(baseTiltfile, "TRIGGER_MODE_MANUAL")
  1084  	f.WriteFile("Tiltfile", triggerManualTiltfile)
  1085  	f.WriteFile("Dockerfile", `FROM iron/go:dev1`)
  1086  	f.WriteFile("snack.yaml", simpleYAML)
  1087  
  1088  	f.loadAndStart()
  1089  
  1090  	// First call: with the old manifests
  1091  	_ = f.nextCall("initial build")
  1092  	var imageTargetID model.TargetID
  1093  	f.WaitUntilManifest("manifest has triggerMode = manual_after_initial", "snack", func(mt store.ManifestTarget) bool {
  1094  		imageTargetID = mt.Manifest.ImageTargetAt(0).ID() // grab for later
  1095  		return mt.Manifest.TriggerMode == model.TriggerModeManualWithAutoInit
  1096  	})
  1097  
  1098  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("src/main.go"))
  1099  	f.WaitUntil("pending change appears", func(st store.EngineState) bool {
  1100  		return st.BuildStatus(imageTargetID).HasPendingFileChanges()
  1101  	})
  1102  	f.assertNoCall("even tho there are pending changes, manual manifest shouldn't build w/o explicit trigger")
  1103  
  1104  	// Update Tiltfile to change the trigger mode of the manifest
  1105  	triggerAutoTiltfile := fmt.Sprintf(baseTiltfile, "TRIGGER_MODE_AUTO")
  1106  	f.WriteConfigFiles("Tiltfile", triggerAutoTiltfile)
  1107  
  1108  	call := f.nextCall("manifest updated b/c it's now TriggerModeAuto")
  1109  	assert.True(t, call.oneImageState().HasLastResult(),
  1110  		"we did NOT clear the build state (b/c a change to Manifest.TriggerMode does NOT invalidate the build")
  1111  	f.WaitUntilManifest("triggerMode has changed on manifest", "snack", func(mt store.ManifestTarget) bool {
  1112  		return mt.Manifest.TriggerMode == model.TriggerModeAuto
  1113  	})
  1114  	f.WaitUntil("manifest is no longer in trigger queue", func(st store.EngineState) bool {
  1115  		return len(st.TriggerQueue) == 0
  1116  	})
  1117  
  1118  	err := f.Stop()
  1119  	assert.Nil(t, err)
  1120  	f.assertAllBuildsConsumed()
  1121  }
  1122  
  1123  func TestConfigChange_ManifestIncludingInitialBuildsIfTriggerModeChangedToManualAfterInitial(t *testing.T) {
  1124  	f := newTestFixture(t)
  1125  
  1126  	foo := f.newManifest("foo").WithTriggerMode(model.TriggerModeManual)
  1127  	bar := f.newManifest("bar")
  1128  
  1129  	f.Start([]model.Manifest{foo, bar})
  1130  
  1131  	// foo should be skipped, and just bar built
  1132  	call := f.nextCallComplete("initial build")
  1133  	require.Equal(t, bar.ImageTargetAt(0), call.firstImgTarg())
  1134  
  1135  	// since foo is "Manual", it should not be built on startup
  1136  	// make sure there's nothing waiting to build
  1137  	f.withState(func(state store.EngineState) {
  1138  		n := buildcontrol.NextManifestNameToBuild(state)
  1139  		require.Equal(t, model.ManifestName(""), n)
  1140  	})
  1141  
  1142  	// change the trigger mode
  1143  	foo = foo.WithTriggerMode(model.TriggerModeManualWithAutoInit)
  1144  	f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{
  1145  		Name:       model.MainTiltfileManifestName,
  1146  		FinishTime: f.Now(),
  1147  		Manifests:  []model.Manifest{foo, bar},
  1148  	})
  1149  
  1150  	// now that it is a trigger mode that should build on startup, a build should kick off
  1151  	// even though we didn't trigger anything
  1152  	call = f.nextCallComplete("second build")
  1153  	require.Equal(t, foo.ImageTargetAt(0), call.firstImgTarg())
  1154  
  1155  	err := f.Stop()
  1156  	assert.Nil(t, err)
  1157  	f.assertAllBuildsConsumed()
  1158  }
  1159  
  1160  func TestConfigChange_FilenamesLoggedInManifestBuild(t *testing.T) {
  1161  	f := newTestFixture(t)
  1162  	f.useRealTiltfileLoader()
  1163  
  1164  	f.WriteFile("Tiltfile", `
  1165  k8s_yaml('snack.yaml')
  1166  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src')`)
  1167  	f.WriteFile("src/Dockerfile", `FROM iron/go:dev`)
  1168  	f.WriteFile("snack.yaml", simpleYAML)
  1169  
  1170  	f.loadAndStart()
  1171  
  1172  	f.WaitUntilManifestState("snack loaded", "snack", func(ms store.ManifestState) bool {
  1173  		return len(ms.BuildHistory) == 1
  1174  	})
  1175  
  1176  	// make a config file change to kick off a new build
  1177  	f.WriteFile("Tiltfile", `
  1178  k8s_yaml('snack.yaml')
  1179  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', ignore='Dockerfile')`)
  1180  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile"))
  1181  
  1182  	f.WaitUntilManifestState("snack reloaded", "snack", func(ms store.ManifestState) bool {
  1183  		return len(ms.BuildHistory) == 2
  1184  	})
  1185  
  1186  	f.withState(func(es store.EngineState) {
  1187  		expected := fmt.Sprintf("1 File Changed: [%s]", f.JoinPath("Tiltfile"))
  1188  		require.Contains(t, es.LogStore.ManifestLog("snack"), expected)
  1189  	})
  1190  
  1191  	err := f.Stop()
  1192  	assert.Nil(t, err)
  1193  }
  1194  
  1195  func TestConfigChange_LocalResourceChange(t *testing.T) {
  1196  	f := newTestFixture(t)
  1197  	f.useRealTiltfileLoader()
  1198  
  1199  	f.WriteFile("Tiltfile", `print('tiltfile 1')
  1200  local_resource('local', 'echo one fish two fish', deps='foo.bar')`)
  1201  
  1202  	f.loadAndStart()
  1203  
  1204  	// First call: with the old manifests
  1205  	call := f.nextCall("initial call")
  1206  	assert.Equal(t, "local", string(call.local().Name))
  1207  	assert.Equal(t, "echo one fish two fish", model.ArgListToString(call.local().UpdateCmdSpec.Args))
  1208  
  1209  	// Change the definition of the resource -- this changes the manifest which should trigger an updated
  1210  	f.WriteConfigFiles("Tiltfile", `print('tiltfile 2')
  1211  local_resource('local', 'echo red fish blue fish', deps='foo.bar')`)
  1212  	call = f.nextCall("rebuild from config change")
  1213  	assert.Equal(t, "echo red fish blue fish", model.ArgListToString(call.local().UpdateCmdSpec.Args))
  1214  
  1215  	err := f.Stop()
  1216  	assert.Nil(t, err)
  1217  	f.assertAllBuildsConsumed()
  1218  }
  1219  
  1220  func TestDockerRebuildWithChangedFiles(t *testing.T) {
  1221  	f := newTestFixture(t)
  1222  	df := `FROM golang
  1223  ADD ./ ./
  1224  go build ./...
  1225  `
  1226  	manifest := f.newManifest("foobar")
  1227  	iTarget := manifest.ImageTargetAt(0).
  1228  		WithLiveUpdateSpec("foobar", v1alpha1.LiveUpdateSpec{}).
  1229  		WithDockerImage(v1alpha1.DockerImageSpec{
  1230  			DockerfileContents: df,
  1231  			Context:            f.Path(),
  1232  		})
  1233  	manifest = manifest.WithImageTarget(iTarget)
  1234  
  1235  	f.Start([]model.Manifest{manifest})
  1236  
  1237  	call := f.nextCallComplete("first build")
  1238  	assert.True(t, call.oneImageState().IsEmpty())
  1239  
  1240  	// Simulate a change to main.go
  1241  	mainPath := filepath.Join(f.Path(), "main.go")
  1242  	f.fsWatcher.Events <- watch.NewFileEvent(mainPath)
  1243  
  1244  	// Check that this triggered a rebuild.
  1245  	call = f.nextCallComplete("rebuild triggered")
  1246  	assert.Equal(t, []string{mainPath}, call.oneImageState().FilesChanged())
  1247  
  1248  	err := f.Stop()
  1249  	assert.NoError(t, err)
  1250  	f.assertAllBuildsConsumed()
  1251  }
  1252  
  1253  func TestHudUpdated(t *testing.T) {
  1254  	f := newTestFixture(t)
  1255  
  1256  	manifest := f.newManifest("foobar")
  1257  
  1258  	f.Start([]model.Manifest{manifest})
  1259  	call := f.nextCall()
  1260  	assert.True(t, call.oneImageState().IsEmpty())
  1261  
  1262  	f.WaitUntilHUD("hud update", func(v view.View) bool {
  1263  		return len(v.Resources) == 2
  1264  	})
  1265  
  1266  	err := f.Stop()
  1267  	assert.Equal(t, nil, err)
  1268  
  1269  	assert.Equal(t, 2, len(f.fakeHud().LastView.Resources))
  1270  	assert.Equal(t, store.MainTiltfileManifestName, f.fakeHud().LastView.Resources[0].Name)
  1271  	rv := f.fakeHud().LastView.Resources[1]
  1272  	assert.Equal(t, manifest.Name, rv.Name)
  1273  	f.assertAllBuildsConsumed()
  1274  }
  1275  
  1276  func TestDisabledHudUpdated(t *testing.T) {
  1277  	if runtime.GOOS == "windows" {
  1278  		t.Skip("TODO(nick): Investigate")
  1279  	}
  1280  	f := newTestFixture(t)
  1281  
  1282  	manifest := f.newManifest("foobar")
  1283  	opt := func(ia InitAction) InitAction {
  1284  		ia.TerminalMode = store.TerminalModeStream
  1285  		return ia
  1286  	}
  1287  
  1288  	f.Start([]model.Manifest{manifest}, opt)
  1289  	call := f.nextCall()
  1290  	assert.True(t, call.oneImageState().IsEmpty())
  1291  
  1292  	// Make sure we're done logging stuff, then grab # processed bytes
  1293  	f.WaitUntil("foobar logs appear", func(es store.EngineState) bool {
  1294  		return strings.Contains(f.log.String(), "Initial Build")
  1295  	})
  1296  
  1297  	assert.True(t, f.ts.ProcessedLogs > 0)
  1298  	oldCheckpoint := f.ts.ProcessedLogs
  1299  
  1300  	// Log something new, make sure it's reflected
  1301  	msg := []byte("hello world!\n")
  1302  	f.store.Dispatch(store.NewGlobalLogAction(logger.InfoLvl, msg))
  1303  
  1304  	f.WaitUntil("hello world logs appear", func(es store.EngineState) bool {
  1305  		return strings.Contains(f.log.String(), "hello world!")
  1306  	})
  1307  
  1308  	assert.True(t, f.ts.ProcessedLogs > oldCheckpoint)
  1309  
  1310  	err := f.Stop()
  1311  	assert.Equal(t, nil, err)
  1312  
  1313  	f.assertAllBuildsConsumed()
  1314  }
  1315  
  1316  func TestPodEvent(t *testing.T) {
  1317  	f := newTestFixture(t)
  1318  	manifest := f.newManifest("foobar")
  1319  	pb := f.registerForDeployer(manifest)
  1320  	f.Start([]model.Manifest{manifest})
  1321  
  1322  	call := f.nextCall()
  1323  	assert.True(t, call.oneImageState().IsEmpty())
  1324  
  1325  	pod := pb.WithPhase("CrashLoopBackOff").Build()
  1326  	f.podEvent(pod)
  1327  
  1328  	f.WaitUntilHUDResource("hud update", "foobar", func(res view.Resource) bool {
  1329  		return res.K8sInfo().PodName == pod.Name
  1330  	})
  1331  
  1332  	rv := f.hudResource("foobar")
  1333  	assert.Equal(t, pod.Name, rv.K8sInfo().PodName)
  1334  	assert.Equal(t, "CrashLoopBackOff", rv.K8sInfo().PodStatus)
  1335  
  1336  	assert.NoError(t, f.Stop())
  1337  	f.assertAllBuildsConsumed()
  1338  }
  1339  
  1340  func TestPodEventContainerStatus(t *testing.T) {
  1341  	f := newTestFixture(t)
  1342  	manifest := f.newManifest("foobar")
  1343  	pb := f.registerForDeployer(manifest)
  1344  	f.Start([]model.Manifest{manifest})
  1345  
  1346  	var ref reference.NamedTagged
  1347  	f.WaitUntilManifestState("image appears", "foobar", func(ms store.ManifestState) bool {
  1348  
  1349  		bs, ok := ms.BuildStatus(manifest.ImageTargetAt(0).ID())
  1350  		if !ok {
  1351  			return false
  1352  		}
  1353  		result := bs.LastResult
  1354  		ref, _ = container.ParseNamedTagged(store.ClusterImageRefFromBuildResult(result))
  1355  		return ref != nil
  1356  	})
  1357  
  1358  	pod := pb.Build()
  1359  	pod.Status = k8s.FakePodStatus(ref, "Running")
  1360  	pod.Status.ContainerStatuses[0].ContainerID = ""
  1361  	pod.Spec = k8s.FakePodSpec(ref)
  1362  	f.podEvent(pod)
  1363  
  1364  	podState := v1alpha1.Pod{}
  1365  	f.WaitUntilManifestState("container status", "foobar", func(ms store.ManifestState) bool {
  1366  		podState = ms.MostRecentPod()
  1367  		return podState.Name == pod.Name && len(podState.Containers) > 0
  1368  	})
  1369  
  1370  	container := podState.Containers[0]
  1371  	assert.Equal(t, "", container.ID)
  1372  	assert.Equal(t, "main", container.Name)
  1373  	assert.Equal(t, []int32{8080}, container.Ports)
  1374  
  1375  	err := f.Stop()
  1376  	assert.Nil(t, err)
  1377  }
  1378  
  1379  func TestPodEventContainerStatusWithoutImage(t *testing.T) {
  1380  	f := newTestFixture(t)
  1381  	manifest := model.Manifest{
  1382  		Name: model.ManifestName("foobar"),
  1383  	}.WithDeployTarget(k8s.MustTarget("foobar", SanchoYAML))
  1384  	pb := f.registerForDeployer(manifest)
  1385  	ref := container.MustParseNamedTagged("dockerhub/we-didnt-build-this:foo")
  1386  	f.Start([]model.Manifest{manifest})
  1387  
  1388  	f.WaitUntilManifestState("first build complete", "foobar", func(ms store.ManifestState) bool {
  1389  		return len(ms.BuildHistory) > 0
  1390  	})
  1391  
  1392  	pod := pb.Build()
  1393  	pod.Status = k8s.FakePodStatus(ref, "Running")
  1394  
  1395  	// If we have no image target to match container status by image ref,
  1396  	// we should just take the first one, i.e. this one
  1397  	pod.Status.ContainerStatuses[0].Name = "first-container"
  1398  	pod.Status.ContainerStatuses[0].ContainerID = "docker://great-container-id"
  1399  
  1400  	pod.Spec = v1.PodSpec{
  1401  		Containers: []v1.Container{
  1402  			{
  1403  				Name:  "second-container",
  1404  				Image: "gcr.io/windmill-public-containers/tilt-synclet:latest",
  1405  				Ports: []v1.ContainerPort{{ContainerPort: 9999}},
  1406  			},
  1407  			// we match container spec by NAME, so we'll get this one even tho it comes second.
  1408  			{
  1409  				Name:  "first-container",
  1410  				Image: ref.Name(),
  1411  				Ports: []v1.ContainerPort{{ContainerPort: 8080}},
  1412  			},
  1413  		},
  1414  	}
  1415  
  1416  	f.podEvent(pod)
  1417  
  1418  	podState := v1alpha1.Pod{}
  1419  	f.WaitUntilManifestState("container status", "foobar", func(ms store.ManifestState) bool {
  1420  		podState = ms.MostRecentPod()
  1421  		return podState.Name == pod.Name && len(podState.Containers) > 0
  1422  	})
  1423  
  1424  	// If we have no image target to match container by image ref, we just take the first one
  1425  	container := podState.Containers[0]
  1426  	assert.Equal(t, "great-container-id", container.ID)
  1427  	assert.Equal(t, "first-container", container.Name)
  1428  	assert.Equal(t, []int32{8080}, store.AllPodContainerPorts(podState))
  1429  
  1430  	err := f.Stop()
  1431  	assert.Nil(t, err)
  1432  }
  1433  
  1434  func TestPodEventUpdateByTimestamp(t *testing.T) {
  1435  	f := newTestFixture(t)
  1436  	manifest := f.newManifest("foobar")
  1437  	pb := f.registerForDeployer(manifest)
  1438  	f.Start([]model.Manifest{manifest})
  1439  
  1440  	call := f.nextCall()
  1441  	assert.True(t, call.oneImageState().IsEmpty())
  1442  
  1443  	firstCreationTime := f.Now()
  1444  	pod := pb.
  1445  		WithCreationTime(firstCreationTime).
  1446  		WithPhase("CrashLoopBackOff").
  1447  		Build()
  1448  	f.podEvent(pod)
  1449  	f.WaitUntilHUDResource("hud update crash", "foobar", func(res view.Resource) bool {
  1450  		return res.K8sInfo().PodStatus == "CrashLoopBackOff"
  1451  	})
  1452  
  1453  	pb = podbuilder.New(t, manifest).
  1454  		WithPodName("my-new-pod").
  1455  		WithCreationTime(firstCreationTime.Add(time.Minute * 2))
  1456  	newPod := pb.Build()
  1457  	f.podEvent(newPod)
  1458  	f.WaitUntilHUDResource("hud update running", "foobar", func(res view.Resource) bool {
  1459  		return res.K8sInfo().PodStatus == "Running"
  1460  	})
  1461  
  1462  	rv := f.hudResource("foobar")
  1463  	assert.Equal(t, newPod.Name, rv.K8sInfo().PodName)
  1464  	assert.Equal(t, "Running", rv.K8sInfo().PodStatus)
  1465  
  1466  	assert.NoError(t, f.Stop())
  1467  	f.assertAllBuildsConsumed()
  1468  }
  1469  
  1470  func TestPodForgottenOnDisable(t *testing.T) {
  1471  	f := newTestFixture(t)
  1472  	manifest := f.newManifest("foobar")
  1473  	pb := f.registerForDeployer(manifest)
  1474  	f.Start([]model.Manifest{manifest})
  1475  
  1476  	call := f.nextCall()
  1477  	assert.True(t, call.oneImageState().IsEmpty())
  1478  
  1479  	pod := pb.WithPhase("CrashLoopBackOff").Build()
  1480  	f.podEvent(pod)
  1481  
  1482  	f.WaitUntilManifestState("pod seen", "foobar", func(ms store.ManifestState) bool {
  1483  		return ms.K8sRuntimeState().MostRecentPod().Status == "CrashLoopBackOff"
  1484  	})
  1485  
  1486  	f.setDisableState("foobar", true)
  1487  
  1488  	f.WaitUntilManifestState("pod unseen", "foobar", func(ms store.ManifestState) bool {
  1489  		return ms.K8sRuntimeState().PodLen() == 0
  1490  	})
  1491  
  1492  	assert.NoError(t, f.Stop())
  1493  	f.assertAllBuildsConsumed()
  1494  }
  1495  
  1496  func TestPodEventUpdateByPodName(t *testing.T) {
  1497  	f := newTestFixture(t)
  1498  	manifest := f.newManifest("foobar")
  1499  	pb := f.registerForDeployer(manifest)
  1500  	f.Start([]model.Manifest{manifest})
  1501  
  1502  	call := f.nextCallComplete()
  1503  	assert.True(t, call.oneImageState().IsEmpty())
  1504  
  1505  	creationTime := f.Now()
  1506  	pb = pb.
  1507  		WithCreationTime(creationTime).
  1508  		WithPhase("CrashLoopBackOff")
  1509  	f.podEvent(pb.Build())
  1510  
  1511  	f.WaitUntilHUDResource("pod crashes", "foobar", func(res view.Resource) bool {
  1512  		return res.K8sInfo().PodStatus == "CrashLoopBackOff"
  1513  	})
  1514  
  1515  	f.podEvent(pb.WithPhase("Running").Build())
  1516  
  1517  	f.WaitUntilHUDResource("pod comes back", "foobar", func(res view.Resource) bool {
  1518  		return res.K8sInfo().PodStatus == "Running"
  1519  	})
  1520  
  1521  	rv := f.hudResource("foobar")
  1522  	assert.Equal(t, pb.Build().Name, rv.K8sInfo().PodName)
  1523  	assert.Equal(t, "Running", rv.K8sInfo().PodStatus)
  1524  
  1525  	err := f.Stop()
  1526  	if err != nil {
  1527  		t.Fatal(err)
  1528  	}
  1529  
  1530  	f.assertAllBuildsConsumed()
  1531  }
  1532  
  1533  func TestPodEventIgnoreOlderPod(t *testing.T) {
  1534  	f := newTestFixture(t)
  1535  	manifest := f.newManifest("foobar")
  1536  	pb := f.registerForDeployer(manifest)
  1537  	f.Start([]model.Manifest{manifest})
  1538  
  1539  	call := f.nextCall()
  1540  	assert.True(t, call.oneImageState().IsEmpty())
  1541  
  1542  	creationTime := f.Now()
  1543  	pb = pb.
  1544  		WithPodName("my-new-pod").
  1545  		WithPhase("CrashLoopBackOff").
  1546  		WithCreationTime(creationTime)
  1547  	pod := pb.Build()
  1548  	f.podEvent(pod)
  1549  	f.WaitUntilHUDResource("hud update", "foobar", func(res view.Resource) bool {
  1550  		return res.K8sInfo().PodStatus == "CrashLoopBackOff"
  1551  	})
  1552  
  1553  	pb = pb.WithCreationTime(creationTime.Add(time.Minute * -1))
  1554  	oldPod := pb.Build()
  1555  	f.podEvent(oldPod)
  1556  	time.Sleep(10 * time.Millisecond)
  1557  
  1558  	assert.NoError(t, f.Stop())
  1559  	f.assertAllBuildsConsumed()
  1560  
  1561  	rv := f.hudResource("foobar")
  1562  	assert.Equal(t, pod.Name, rv.K8sInfo().PodName)
  1563  	assert.Equal(t, "CrashLoopBackOff", rv.K8sInfo().PodStatus)
  1564  }
  1565  
  1566  func TestPodContainerStatus(t *testing.T) {
  1567  	f := newTestFixture(t)
  1568  	manifest := f.newManifest("fe")
  1569  	pb := f.registerForDeployer(manifest)
  1570  	f.Start([]model.Manifest{manifest})
  1571  
  1572  	_ = f.nextCall()
  1573  
  1574  	var ref reference.NamedTagged
  1575  	f.WaitUntilManifestState("image appears", "fe", func(ms store.ManifestState) bool {
  1576  		bs, ok := ms.BuildStatus(manifest.ImageTargetAt(0).ID())
  1577  		if !ok {
  1578  			return false
  1579  		}
  1580  		result := bs.LastResult
  1581  		ref, _ = container.ParseNamedTagged(store.ClusterImageRefFromBuildResult(result))
  1582  		return ref != nil
  1583  	})
  1584  
  1585  	startedAt := f.Now()
  1586  	pb = pb.WithCreationTime(startedAt)
  1587  	pod := pb.Build()
  1588  	f.podEvent(pod)
  1589  	f.WaitUntilManifestState("pod appears", "fe", func(ms store.ManifestState) bool {
  1590  		return ms.MostRecentPod().Name == pod.Name
  1591  	})
  1592  
  1593  	pod = pb.Build()
  1594  	pod.Spec = k8s.FakePodSpec(ref)
  1595  	pod.Status = k8s.FakePodStatus(ref, "Running")
  1596  	f.podEvent(pod)
  1597  
  1598  	f.WaitUntilManifestState("container is ready", "fe", func(ms store.ManifestState) bool {
  1599  		ports := store.AllPodContainerPorts(ms.MostRecentPod())
  1600  		return len(ports) == 1 && ports[0] == 8080
  1601  	})
  1602  
  1603  	err := f.Stop()
  1604  	assert.NoError(t, err)
  1605  
  1606  	f.assertAllBuildsConsumed()
  1607  }
  1608  
  1609  func TestUpper_WatchDockerIgnoredFiles(t *testing.T) {
  1610  	f := newTestFixture(t)
  1611  	manifest := f.newManifest("foobar")
  1612  	manifest = manifest.WithImageTarget(manifest.ImageTargetAt(0).
  1613  		WithIgnores([]v1alpha1.IgnoreDef{
  1614  			{
  1615  				BasePath: f.Path(),
  1616  				Patterns: []string{"dignore.txt"},
  1617  			},
  1618  		}))
  1619  
  1620  	f.Start([]model.Manifest{manifest})
  1621  
  1622  	call := f.nextCall()
  1623  	assert.Equal(t, manifest.ImageTargetAt(0), call.firstImgTarg())
  1624  
  1625  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("dignore.txt"))
  1626  	f.assertNoCall("event for ignored file should not trigger build")
  1627  
  1628  	err := f.Stop()
  1629  	assert.NoError(t, err)
  1630  	f.assertAllBuildsConsumed()
  1631  }
  1632  
  1633  func TestUpper_ShowErrorPodLog(t *testing.T) {
  1634  	f := newTestFixture(t)
  1635  
  1636  	name := model.ManifestName("foobar")
  1637  	manifest := f.newManifest(name.String())
  1638  	pb := f.registerForDeployer(manifest)
  1639  
  1640  	f.Start([]model.Manifest{manifest})
  1641  	f.waitForCompletedBuildCount(1)
  1642  
  1643  	pod := pb.Build()
  1644  	f.startPod(pod, name)
  1645  	f.podLog(pod, name, "first string")
  1646  
  1647  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("go/a"))
  1648  
  1649  	f.waitForCompletedBuildCount(2)
  1650  	f.podLog(pod, name, "second string")
  1651  
  1652  	f.withState(func(state store.EngineState) {
  1653  		ms, _ := state.ManifestState(name)
  1654  		spanID := k8sconv.SpanIDForPod(name, k8s.PodID(ms.MostRecentPod().Name))
  1655  		assert.Equal(t, "first string\nsecond string\n", state.LogStore.SpanLog(spanID))
  1656  	})
  1657  
  1658  	err := f.Stop()
  1659  	assert.NoError(t, err)
  1660  }
  1661  
  1662  func TestUpperPodLogInCrashLoopThirdInstanceStillUp(t *testing.T) {
  1663  	f := newTestFixture(t)
  1664  
  1665  	name := model.ManifestName("foobar")
  1666  	manifest := f.newManifest(name.String())
  1667  	pb := f.registerForDeployer(manifest)
  1668  
  1669  	f.Start([]model.Manifest{manifest})
  1670  	f.waitForCompletedBuildCount(1)
  1671  
  1672  	f.startPod(pb.Build(), name)
  1673  	f.podLog(pb.Build(), name, "first string")
  1674  	pb = f.restartPod(pb)
  1675  	f.podLog(pb.Build(), name, "second string")
  1676  	pb = f.restartPod(pb)
  1677  	f.podLog(pb.Build(), name, "third string")
  1678  
  1679  	// the third instance is still up, so we want to show the log from the last crashed pod plus the log from the current pod
  1680  	f.withState(func(es store.EngineState) {
  1681  		ms, _ := es.ManifestState(name)
  1682  		spanID := k8sconv.SpanIDForPod(name, k8s.PodID(ms.MostRecentPod().Name))
  1683  		assert.Contains(t, es.LogStore.SpanLog(spanID), "third string\n")
  1684  		assert.Contains(t, es.LogStore.ManifestLog(name), "second string\n")
  1685  		assert.Contains(t, es.LogStore.ManifestLog(name), "third string\n")
  1686  		assert.Contains(t, es.LogStore.ManifestLog(name),
  1687  			"WARNING: Detected container restart. Pod: foobar-fakePodID. Container: sancho.\n")
  1688  		assert.Contains(t, es.LogStore.SpanLog(spanID), "third string\n")
  1689  	})
  1690  
  1691  	err := f.Stop()
  1692  	assert.NoError(t, err)
  1693  }
  1694  
  1695  func TestUpperPodLogInCrashLoopPodCurrentlyDown(t *testing.T) {
  1696  	f := newTestFixture(t)
  1697  
  1698  	name := model.ManifestName("foobar")
  1699  	manifest := f.newManifest(name.String())
  1700  	pb := f.registerForDeployer(manifest)
  1701  
  1702  	f.Start([]model.Manifest{manifest})
  1703  	f.waitForCompletedBuildCount(1)
  1704  
  1705  	f.startPod(pb.Build(), name)
  1706  	f.podLog(pb.Build(), name, "first string")
  1707  	pb = f.restartPod(pb)
  1708  	f.podLog(pb.Build(), name, "second string")
  1709  
  1710  	pod := pb.Build()
  1711  	pod.Status.ContainerStatuses[0].Ready = false
  1712  	f.notifyAndWaitForPodStatus(pod, name, func(pod v1alpha1.Pod) bool {
  1713  		return !store.AllPodContainersReady(pod)
  1714  	})
  1715  
  1716  	f.withState(func(state store.EngineState) {
  1717  		ms, _ := state.ManifestState(name)
  1718  		spanID := k8sconv.SpanIDForPod(name, k8s.PodID(ms.MostRecentPod().Name))
  1719  		assert.Equal(t, "first string\nWARNING: Detected container restart. Pod: foobar-fakePodID. Container: sancho.\nsecond string\n",
  1720  			state.LogStore.SpanLog(spanID))
  1721  	})
  1722  
  1723  	err := f.Stop()
  1724  	assert.NoError(t, err)
  1725  }
  1726  
  1727  func TestUpperRecordPodWithMultipleContainers(t *testing.T) {
  1728  	f := newTestFixture(t)
  1729  
  1730  	name := model.ManifestName("foobar")
  1731  	manifest := f.newManifest(name.String())
  1732  	pb := f.registerForDeployer(manifest)
  1733  
  1734  	f.Start([]model.Manifest{manifest})
  1735  	f.waitForCompletedBuildCount(1)
  1736  
  1737  	pod := pb.Build()
  1738  	pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, v1.ContainerStatus{
  1739  		Name:        "sidecar",
  1740  		Image:       "sidecar-image",
  1741  		Ready:       false,
  1742  		ContainerID: "docker://sidecar",
  1743  	})
  1744  
  1745  	f.startPod(pod, manifest.Name)
  1746  	f.notifyAndWaitForPodStatus(pod, manifest.Name, func(pod v1alpha1.Pod) bool {
  1747  		if len(pod.Containers) != 2 {
  1748  			return false
  1749  		}
  1750  
  1751  		c1 := pod.Containers[0]
  1752  		require.Equal(t, container.Name("sancho").String(), c1.Name)
  1753  		require.Equal(t, podbuilder.FakeContainerID().String(), c1.ID)
  1754  		require.True(t, c1.Ready)
  1755  
  1756  		c2 := pod.Containers[1]
  1757  		require.Equal(t, container.Name("sidecar").String(), c2.Name)
  1758  		require.Equal(t, container.ID("sidecar").String(), c2.ID)
  1759  		require.False(t, c2.Ready)
  1760  
  1761  		return true
  1762  	})
  1763  
  1764  	err := f.Stop()
  1765  	assert.NoError(t, err)
  1766  }
  1767  
  1768  func TestUpperProcessOtherContainersIfOneErrors(t *testing.T) {
  1769  	f := newTestFixture(t)
  1770  
  1771  	name := model.ManifestName("foobar")
  1772  	manifest := f.newManifest(name.String())
  1773  	pb := f.registerForDeployer(manifest)
  1774  
  1775  	f.Start([]model.Manifest{manifest})
  1776  	f.waitForCompletedBuildCount(1)
  1777  
  1778  	pod := pb.Build()
  1779  	pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, v1.ContainerStatus{
  1780  		Name:  "extra1",
  1781  		Image: "extra1-image",
  1782  		Ready: false,
  1783  		// when populating container info for this pod, we'll error when we try to parse
  1784  		// this cID -- we should still populate info for the other containers, though.
  1785  		ContainerID: "malformed",
  1786  	}, v1.ContainerStatus{
  1787  		Name:        "extra2",
  1788  		Image:       "extra2-image",
  1789  		Ready:       false,
  1790  		ContainerID: "docker://extra2",
  1791  	})
  1792  
  1793  	f.startPod(pod, manifest.Name)
  1794  	f.notifyAndWaitForPodStatus(pod, manifest.Name, func(pod v1alpha1.Pod) bool {
  1795  		if len(pod.Containers) != 2 {
  1796  			return false
  1797  		}
  1798  
  1799  		require.Equal(t, container.Name("sancho").String(), pod.Containers[0].Name)
  1800  		require.Equal(t, container.Name("extra2").String(), pod.Containers[1].Name)
  1801  
  1802  		return true
  1803  	})
  1804  
  1805  	err := f.Stop()
  1806  	assert.NoError(t, err)
  1807  }
  1808  
  1809  func TestUpper_ServiceEvent(t *testing.T) {
  1810  	f := newTestFixture(t)
  1811  
  1812  	manifest := f.newManifest("foobar")
  1813  
  1814  	f.Start([]model.Manifest{manifest})
  1815  	f.waitForCompletedBuildCount(1)
  1816  
  1817  	result := f.b.resultsByID[manifest.K8sTarget().ID()]
  1818  	uid := result.(store.K8sBuildResult).DeployedRefs[0].UID
  1819  	svc := servicebuilder.New(t, manifest).WithUID(uid).WithPort(8080).WithIP("1.2.3.4").Build()
  1820  	err := k8swatch.DispatchServiceChange(f.store, svc, manifest.Name, "")
  1821  	require.NoError(t, err)
  1822  
  1823  	f.WaitUntilManifestState("lb updated", "foobar", func(ms store.ManifestState) bool {
  1824  		return len(ms.K8sRuntimeState().LBs) > 0
  1825  	})
  1826  
  1827  	err = f.Stop()
  1828  	assert.NoError(t, err)
  1829  
  1830  	ms, _ := f.upper.store.RLockState().ManifestState(manifest.Name)
  1831  	defer f.upper.store.RUnlockState()
  1832  	lbs := ms.K8sRuntimeState().LBs
  1833  	assert.Equal(t, 1, len(lbs))
  1834  	url, ok := lbs[k8s.ServiceName(svc.Name)]
  1835  	if !ok {
  1836  		t.Fatalf("%v did not contain key 'myservice'", lbs)
  1837  	}
  1838  	assert.Equal(t, "http://1.2.3.4:8080/", url.String())
  1839  }
  1840  
  1841  func TestUpper_ServiceEventRemovesURL(t *testing.T) {
  1842  	f := newTestFixture(t)
  1843  
  1844  	manifest := f.newManifest("foobar")
  1845  
  1846  	f.Start([]model.Manifest{manifest})
  1847  	f.waitForCompletedBuildCount(1)
  1848  
  1849  	result := f.b.resultsByID[manifest.K8sTarget().ID()]
  1850  	uid := result.(store.K8sBuildResult).DeployedRefs[0].UID
  1851  	sb := servicebuilder.New(t, manifest).WithUID(uid).WithPort(8080).WithIP("1.2.3.4")
  1852  	svc := sb.Build()
  1853  	err := k8swatch.DispatchServiceChange(f.store, svc, manifest.Name, "")
  1854  	require.NoError(t, err)
  1855  
  1856  	f.WaitUntilManifestState("lb url added", "foobar", func(ms store.ManifestState) bool {
  1857  		url := ms.K8sRuntimeState().LBs[k8s.ServiceName(svc.Name)]
  1858  		if url == nil {
  1859  			return false
  1860  		}
  1861  		return "http://1.2.3.4:8080/" == url.String()
  1862  	})
  1863  
  1864  	svc = sb.WithIP("").Build()
  1865  	err = k8swatch.DispatchServiceChange(f.store, svc, manifest.Name, "")
  1866  	require.NoError(t, err)
  1867  
  1868  	f.WaitUntilManifestState("lb url removed", "foobar", func(ms store.ManifestState) bool {
  1869  		url := ms.K8sRuntimeState().LBs[k8s.ServiceName(svc.Name)]
  1870  		return url == nil
  1871  	})
  1872  
  1873  	err = f.Stop()
  1874  	assert.NoError(t, err)
  1875  }
  1876  
  1877  func TestUpper_PodLogs(t *testing.T) {
  1878  	f := newTestFixture(t)
  1879  
  1880  	name := model.ManifestName("fe")
  1881  	manifest := f.newManifest(string(name))
  1882  	pb := f.registerForDeployer(manifest)
  1883  
  1884  	f.Start([]model.Manifest{manifest})
  1885  	f.waitForCompletedBuildCount(1)
  1886  
  1887  	pod := pb.Build()
  1888  	f.startPod(pod, name)
  1889  	f.podLog(pod, name, "Hello world!\n")
  1890  
  1891  	err := f.Stop()
  1892  	assert.NoError(t, err)
  1893  }
  1894  
  1895  func TestK8sEventGlobalLogAndManifestLog(t *testing.T) {
  1896  	f := newTestFixture(t)
  1897  
  1898  	name := model.ManifestName("fe")
  1899  	manifest := f.newManifest(string(name))
  1900  
  1901  	f.Start([]model.Manifest{manifest})
  1902  	f.waitForCompletedBuildCount(1)
  1903  
  1904  	objRef := v1.ObjectReference{UID: f.lastDeployedUID(name)}
  1905  	warnEvt := &v1.Event{
  1906  		InvolvedObject: objRef,
  1907  		Message:        "something has happened zomg",
  1908  		Type:           v1.EventTypeWarning,
  1909  		ObjectMeta: metav1.ObjectMeta{
  1910  			CreationTimestamp: apis.NewTime(f.Now()),
  1911  			Namespace:         k8s.DefaultNamespace.String(),
  1912  		},
  1913  	}
  1914  	f.kClient.UpsertEvent(warnEvt)
  1915  
  1916  	f.WaitUntil("event message appears in manifest log", func(st store.EngineState) bool {
  1917  		return strings.Contains(st.LogStore.ManifestLog(name), "something has happened zomg")
  1918  	})
  1919  
  1920  	f.withState(func(st store.EngineState) {
  1921  		assert.Contains(t, st.LogStore.String(), "something has happened zomg", "event message not in global log")
  1922  	})
  1923  
  1924  	err := f.Stop()
  1925  	assert.NoError(t, err)
  1926  }
  1927  
  1928  func TestK8sEventNotLoggedIfNoManifestForUID(t *testing.T) {
  1929  	f := newTestFixture(t)
  1930  
  1931  	name := model.ManifestName("fe")
  1932  	manifest := f.newManifest(string(name))
  1933  
  1934  	f.Start([]model.Manifest{manifest})
  1935  	f.waitForCompletedBuildCount(1)
  1936  
  1937  	warnEvt := &v1.Event{
  1938  		InvolvedObject: v1.ObjectReference{UID: types.UID("someRandomUID")},
  1939  		Message:        "something has happened zomg",
  1940  		Type:           v1.EventTypeWarning,
  1941  		ObjectMeta: metav1.ObjectMeta{
  1942  			CreationTimestamp: apis.NewTime(f.Now()),
  1943  			Namespace:         k8s.DefaultNamespace.String(),
  1944  		},
  1945  	}
  1946  	f.kClient.UpsertEvent(warnEvt)
  1947  
  1948  	time.Sleep(10 * time.Millisecond)
  1949  
  1950  	assert.NotContains(t, f.log.String(), "something has happened zomg",
  1951  		"should not log event message b/c it doesn't have a UID -> Manifest mapping")
  1952  }
  1953  
  1954  func TestHudExitNoError(t *testing.T) {
  1955  	f := newTestFixture(t)
  1956  	f.Start([]model.Manifest{})
  1957  	f.store.Dispatch(hud.NewExitAction(nil))
  1958  	err := f.WaitForExit()
  1959  	assert.NoError(t, err)
  1960  }
  1961  
  1962  func TestHudExitWithError(t *testing.T) {
  1963  	f := newTestFixture(t)
  1964  	f.Start([]model.Manifest{})
  1965  	e := errors.New("helllllo")
  1966  	f.store.Dispatch(hud.NewExitAction(e))
  1967  	_ = f.WaitForNoExit()
  1968  }
  1969  
  1970  func TestDockerComposeUp(t *testing.T) {
  1971  	f := newTestFixture(t)
  1972  	redis, server := f.setupDCFixture()
  1973  
  1974  	f.Start([]model.Manifest{redis, server})
  1975  	call := f.nextCall()
  1976  	assert.True(t, call.dcState().IsEmpty())
  1977  	assert.False(t, call.dc().ID().Empty())
  1978  	assert.Equal(t, redis.DockerComposeTarget().ID(), call.dc().ID())
  1979  	call = f.nextCall()
  1980  	assert.True(t, call.dcState().IsEmpty())
  1981  	assert.False(t, call.dc().ID().Empty())
  1982  	assert.Equal(t, server.DockerComposeTarget().ID(), call.dc().ID())
  1983  }
  1984  
  1985  func TestDockerComposeRedeployFromFileChange(t *testing.T) {
  1986  	if runtime.GOOS == "windows" {
  1987  		t.Skip("flaky on windows")
  1988  	}
  1989  	f := newTestFixture(t)
  1990  	r, m := f.setupDCFixture()
  1991  
  1992  	f.Start([]model.Manifest{r, m})
  1993  	_ = f.nextCall()
  1994  	_ = f.nextCall()
  1995  
  1996  	// Change a file -- should trigger build
  1997  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("package.json"))
  1998  	call := f.nextCall()
  1999  	assert.Equal(t, []string{f.JoinPath("package.json")}, call.oneImageState().FilesChanged())
  2000  }
  2001  
  2002  func TestDockerComposeRecordsBuildLogs(t *testing.T) {
  2003  	f := newTestFixture(t)
  2004  	f.useRealTiltfileLoader()
  2005  
  2006  	m, _ := f.setupDCFixture()
  2007  	expected := "yarn install"
  2008  	f.setBuildLogOutput(m.DockerComposeTarget().ID(), expected)
  2009  
  2010  	f.loadAndStart()
  2011  	f.waitForCompletedBuildCount(2)
  2012  
  2013  	// recorded in global log
  2014  	f.withState(func(st store.EngineState) {
  2015  		assert.Contains(t, st.LogStore.String(), expected)
  2016  
  2017  		ms, _ := st.ManifestState(m.ManifestName())
  2018  		spanID := ms.LastBuild().SpanID
  2019  		assert.Contains(t, st.LogStore.SpanLog(spanID), expected)
  2020  	})
  2021  }
  2022  
  2023  func TestDockerComposeBuildCompletedSetsStatusToUpIfSuccessful(t *testing.T) {
  2024  	f := newTestFixture(t)
  2025  	f.useRealTiltfileLoader()
  2026  
  2027  	m1, _ := f.setupDCFixture()
  2028  
  2029  	expected := container.ID("aaaaaa")
  2030  	f.b.nextDockerComposeContainerID = expected
  2031  
  2032  	containerState := docker.NewRunningContainerState()
  2033  	f.b.nextDockerComposeContainerState = &containerState
  2034  
  2035  	f.loadAndStart()
  2036  
  2037  	f.waitForCompletedBuildCount(2)
  2038  
  2039  	f.withManifestState(m1.ManifestName(), func(st store.ManifestState) {
  2040  		state, ok := st.RuntimeState.(dockercompose.State)
  2041  		if !ok {
  2042  			t.Fatal("expected RuntimeState to be docker compose, but it wasn't")
  2043  		}
  2044  		assert.Equal(t, expected, state.ContainerID)
  2045  		assert.Equal(t, v1alpha1.RuntimeStatusOK, state.RuntimeStatus())
  2046  	})
  2047  }
  2048  
  2049  func TestDockerComposeStopOnDisable(t *testing.T) {
  2050  	f := newTestFixture(t)
  2051  	f.useRealTiltfileLoader()
  2052  
  2053  	m, _ := f.setupDCFixture()
  2054  
  2055  	expected := container.ID("aaaaaa")
  2056  	f.b.nextDockerComposeContainerID = expected
  2057  
  2058  	containerState := docker.NewRunningContainerState()
  2059  	f.b.nextDockerComposeContainerState = &containerState
  2060  
  2061  	f.loadAndStart()
  2062  
  2063  	f.waitForCompletedBuildCount(2)
  2064  
  2065  	f.setDisableState(m.Name, true)
  2066  
  2067  	require.Eventually(t, func() bool {
  2068  		return len(f.dcc.RmCalls()) > 0
  2069  	}, stdTimeout, time.Millisecond)
  2070  
  2071  	require.Len(t, f.dcc.RmCalls(), 1)
  2072  	require.Len(t, f.dcc.RmCalls()[0].Specs, 1)
  2073  	require.Equal(t, m.Name.String(), f.dcc.RmCalls()[0].Specs[0].Service)
  2074  }
  2075  
  2076  func TestDockerComposeStartOnReenable(t *testing.T) {
  2077  	f := newTestFixture(t)
  2078  	f.useRealTiltfileLoader()
  2079  
  2080  	m, _ := f.setupDCFixture()
  2081  
  2082  	expected := container.ID("aaaaaa")
  2083  	f.b.nextDockerComposeContainerID = expected
  2084  
  2085  	containerState := docker.NewRunningContainerState()
  2086  	f.b.nextDockerComposeContainerState = &containerState
  2087  
  2088  	f.loadAndStart()
  2089  
  2090  	f.waitForCompletedBuildCount(2)
  2091  
  2092  	f.setDisableState(m.Name, true)
  2093  
  2094  	require.Eventually(t, func() bool {
  2095  		return len(f.dcc.RmCalls()) > 0
  2096  	}, stdTimeout, time.Millisecond, "DC rm")
  2097  
  2098  	f.setDisableState(m.Name, false)
  2099  
  2100  	f.waitForCompletedBuildCount(3)
  2101  }
  2102  
  2103  func TestEmptyTiltfile(t *testing.T) {
  2104  	f := newTestFixture(t)
  2105  	f.useRealTiltfileLoader()
  2106  	f.WriteFile("Tiltfile", "")
  2107  
  2108  	closeCh := make(chan error)
  2109  	go func() {
  2110  		err := f.upper.Start(f.ctx, []string{}, model.TiltBuild{},
  2111  			f.JoinPath("Tiltfile"), store.TerminalModeHUD,
  2112  			analytics.OptIn, token.Token("unit test token"),
  2113  			"nonexistent.example.com")
  2114  		closeCh <- err
  2115  	}()
  2116  	f.WaitUntil("build is set", func(st store.EngineState) bool {
  2117  		return !st.TiltfileStates[model.MainTiltfileManifestName].LastBuild().Empty()
  2118  	})
  2119  	f.withState(func(st store.EngineState) {
  2120  		assert.Contains(t, st.TiltfileStates[model.MainTiltfileManifestName].LastBuild().Error.Error(), "No resources found. Check out ")
  2121  		assertContainsOnce(t, st.LogStore.String(), "No resources found. Check out ")
  2122  		assertContainsOnce(t, st.LogStore.ManifestLog(store.MainTiltfileManifestName), "No resources found. Check out ")
  2123  
  2124  		buildRecord := st.TiltfileStates[model.MainTiltfileManifestName].LastBuild()
  2125  		assertContainsOnce(t, st.LogStore.SpanLog(buildRecord.SpanID), "No resources found. Check out ")
  2126  	})
  2127  
  2128  	f.cancel()
  2129  
  2130  	err := <-closeCh
  2131  	testutils.FailOnNonCanceledErr(t, err, "upper.Start failed")
  2132  }
  2133  
  2134  func TestUpperStart(t *testing.T) {
  2135  	f := newTestFixture(t)
  2136  	f.useRealTiltfileLoader()
  2137  
  2138  	tok := token.Token("unit test token")
  2139  	cloudAddress := "nonexistent.example.com"
  2140  
  2141  	closeCh := make(chan error)
  2142  
  2143  	f.WriteFile("Tiltfile", "")
  2144  	go func() {
  2145  		err := f.upper.Start(f.ctx, []string{"foo", "bar"}, model.TiltBuild{},
  2146  			f.JoinPath("Tiltfile"), store.TerminalModeHUD,
  2147  			analytics.OptIn, tok, cloudAddress)
  2148  		closeCh <- err
  2149  	}()
  2150  	f.WaitUntil("init action processed", func(state store.EngineState) bool {
  2151  		return !state.TiltStartTime.IsZero()
  2152  	})
  2153  
  2154  	f.withState(func(state store.EngineState) {
  2155  		require.Equal(t, []string{"foo", "bar"}, state.UserConfigState.Args)
  2156  		require.Equal(t, f.JoinPath("Tiltfile"), state.DesiredTiltfilePath)
  2157  		require.Equal(t, tok, state.Token)
  2158  		require.Equal(t, analytics.OptIn, state.AnalyticsEffectiveOpt())
  2159  		require.Equal(t, cloudAddress, state.CloudAddress)
  2160  	})
  2161  
  2162  	f.cancel()
  2163  
  2164  	err := <-closeCh
  2165  	testutils.FailOnNonCanceledErr(t, err, "upper.Start failed")
  2166  }
  2167  
  2168  func TestWatchManifestsWithCommonAncestor(t *testing.T) {
  2169  	f := newTestFixture(t)
  2170  	m1, m2 := NewManifestsWithCommonAncestor(f)
  2171  	f.Start([]model.Manifest{m1, m2})
  2172  
  2173  	f.waitForCompletedBuildCount(2)
  2174  
  2175  	call := f.nextCall("m1 build1")
  2176  	assert.Equal(t, m1.K8sTarget(), call.k8s())
  2177  
  2178  	call = f.nextCall("m2 build1")
  2179  	assert.Equal(t, m2.K8sTarget(), call.k8s())
  2180  
  2181  	f.WriteFile(filepath.Join("common", "a.txt"), "hello world")
  2182  
  2183  	aPath := f.JoinPath("common", "a.txt")
  2184  	f.fsWatcher.Events <- watch.NewFileEvent(aPath)
  2185  
  2186  	f.waitForCompletedBuildCount(4)
  2187  
  2188  	// Make sure that both builds are triggered, and that they
  2189  	// are triggered in a particular order.
  2190  	call = f.nextCall("m1 build2")
  2191  	assert.Equal(t, m1.K8sTarget(), call.k8s())
  2192  
  2193  	state := call.state[m1.ImageTargets[0].ID()]
  2194  	assert.Equal(t, map[string]bool{aPath: true}, state.FilesChangedSet)
  2195  
  2196  	// Make sure that when the second build is triggered, we did the bookkeeping
  2197  	// correctly around reusing the image and propagating DepsChanged when
  2198  	// we deploy the second k8s target.
  2199  	call = f.nextCall("m2 build2")
  2200  	assert.Equal(t, m2.K8sTarget(), call.k8s())
  2201  
  2202  	id := m2.ImageTargets[0].ID()
  2203  	result := f.b.resultsByID[id]
  2204  	assert.Equal(t, result, call.state[id].LastResult)
  2205  	assert.Equal(t, 0, len(call.state[id].FilesChangedSet))
  2206  
  2207  	id = m2.ImageTargets[1].ID()
  2208  	result = f.b.resultsByID[id]
  2209  
  2210  	// Assert the 2nd image was not re-used from the previous result.
  2211  	assert.NotEqual(t, result, call.state[id].LastResult)
  2212  	assert.Equal(t, map[model.TargetID]bool{m2.ImageTargets[0].ID(): true},
  2213  		call.state[id].DepsChangedSet)
  2214  
  2215  	err := f.Stop()
  2216  	assert.NoError(t, err)
  2217  	f.assertAllBuildsConsumed()
  2218  }
  2219  
  2220  func TestConfigChangeThatChangesManifestIsIncludedInManifestsChangedFile(t *testing.T) {
  2221  	// https://app.clubhouse.io/windmill/story/5701/test-testconfigchangethatchangesmanifestisincludedinmanifestschangedfile-is-flaky
  2222  	t.Skip("TODO(nick): fix this")
  2223  
  2224  	f := newTestFixture(t)
  2225  	f.useRealTiltfileLoader()
  2226  
  2227  	tiltfile := `
  2228  docker_build('gcr.io/windmill-public-containers/servantes/snack', '.')
  2229  k8s_yaml('snack.yaml')`
  2230  	f.WriteFile("Tiltfile", tiltfile)
  2231  	f.WriteFile("Dockerfile", `FROM iron/go:dev`)
  2232  	f.WriteFile("snack.yaml", testyaml.Deployment("snack", "gcr.io/windmill-public-containers/servantes/snack:old"))
  2233  
  2234  	f.loadAndStart()
  2235  
  2236  	f.waitForCompletedBuildCount(1)
  2237  
  2238  	f.WriteFile("snack.yaml", testyaml.Deployment("snack", "gcr.io/windmill-public-containers/servantes/snack:new"))
  2239  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("snack.yaml"))
  2240  
  2241  	f.waitForCompletedBuildCount(2)
  2242  
  2243  	f.withManifestState("snack", func(ms store.ManifestState) {
  2244  		require.Equal(t, []string{f.JoinPath("snack.yaml")}, ms.LastBuild().Edits)
  2245  	})
  2246  
  2247  	f.WriteFile("Dockerfile", `FROM iron/go:foobar`)
  2248  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Dockerfile"))
  2249  
  2250  	f.waitForCompletedBuildCount(3)
  2251  
  2252  	f.withManifestState("snack", func(ms store.ManifestState) {
  2253  		require.Equal(t, []string{f.JoinPath("Dockerfile")}, ms.LastBuild().Edits)
  2254  	})
  2255  }
  2256  
  2257  func TestSetAnalyticsOpt(t *testing.T) {
  2258  	f := newTestFixture(t)
  2259  
  2260  	opt := func(ia InitAction) InitAction {
  2261  		ia.AnalyticsUserOpt = analytics.OptIn
  2262  		return ia
  2263  	}
  2264  
  2265  	f.Start([]model.Manifest{}, opt)
  2266  	f.store.Dispatch(store.AnalyticsUserOptAction{Opt: analytics.OptOut})
  2267  	f.WaitUntil("opted out", func(state store.EngineState) bool {
  2268  		return state.AnalyticsEffectiveOpt() == analytics.OptOut
  2269  	})
  2270  
  2271  	// if we don't wait for 1 here, it's possible the state flips to out and back to in before the subscriber sees it,
  2272  	// and we end up with no events
  2273  	f.opter.WaitUntilCount(t, 1)
  2274  
  2275  	f.store.Dispatch(store.AnalyticsUserOptAction{Opt: analytics.OptIn})
  2276  	f.WaitUntil("opted in", func(state store.EngineState) bool {
  2277  		return state.AnalyticsEffectiveOpt() == analytics.OptIn
  2278  	})
  2279  
  2280  	f.opter.WaitUntilCount(t, 2)
  2281  
  2282  	err := f.Stop()
  2283  	if !assert.NoError(t, err) {
  2284  		return
  2285  	}
  2286  	assert.Equal(t, []analytics.Opt{analytics.OptOut, analytics.OptIn}, f.opter.Calls())
  2287  }
  2288  
  2289  func TestFeatureFlagsStoredOnState(t *testing.T) {
  2290  	f := newTestFixture(t)
  2291  
  2292  	f.Start([]model.Manifest{})
  2293  	f.ensureCluster()
  2294  
  2295  	f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{
  2296  		Name:       model.MainTiltfileManifestName,
  2297  		FinishTime: f.Now(),
  2298  		Features:   map[string]bool{"foo": true},
  2299  	})
  2300  
  2301  	f.WaitUntil("feature is enabled", func(state store.EngineState) bool {
  2302  		return state.Features["foo"] == true
  2303  	})
  2304  
  2305  	f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{
  2306  		Name:       model.MainTiltfileManifestName,
  2307  		FinishTime: f.Now(),
  2308  		Features:   map[string]bool{"foo": false},
  2309  	})
  2310  
  2311  	f.WaitUntil("feature is disabled", func(state store.EngineState) bool {
  2312  		return state.Features["foo"] == false
  2313  	})
  2314  }
  2315  
  2316  func TestTeamIDStoredOnState(t *testing.T) {
  2317  	f := newTestFixture(t)
  2318  
  2319  	f.Start([]model.Manifest{})
  2320  	f.ensureCluster()
  2321  
  2322  	f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{
  2323  		Name:       model.MainTiltfileManifestName,
  2324  		FinishTime: f.Now(),
  2325  		TeamID:     "sharks",
  2326  	})
  2327  
  2328  	f.WaitUntil("teamID is set to sharks", func(state store.EngineState) bool {
  2329  		return state.TeamID == "sharks"
  2330  	})
  2331  
  2332  	f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{
  2333  		Name:       model.MainTiltfileManifestName,
  2334  		FinishTime: f.Now(),
  2335  		TeamID:     "jets",
  2336  	})
  2337  
  2338  	f.WaitUntil("teamID is set to jets", func(state store.EngineState) bool {
  2339  		return state.TeamID == "jets"
  2340  	})
  2341  }
  2342  
  2343  func TestBuildLogAction(t *testing.T) {
  2344  	f := newTestFixture(t)
  2345  	f.bc.DisableForTesting()
  2346  
  2347  	manifest := f.newManifest("alert-injester")
  2348  	f.Start([]model.Manifest{manifest})
  2349  
  2350  	f.store.Dispatch(buildcontrols.BuildStartedAction{
  2351  		ManifestName: manifest.Name,
  2352  		StartTime:    f.Now(),
  2353  		SpanID:       SpanIDForBuildLog(1),
  2354  		Source:       "buildcontrol",
  2355  	})
  2356  
  2357  	f.store.Dispatch(store.NewLogAction(manifest.Name, SpanIDForBuildLog(1), logger.InfoLvl, nil, []byte(`a
  2358  bc
  2359  def
  2360  ghij`)))
  2361  
  2362  	f.WaitUntil("log appears", func(es store.EngineState) bool {
  2363  		ms, _ := es.ManifestState("alert-injester")
  2364  		spanID := ms.EarliestCurrentBuild().SpanID
  2365  		return spanID != "" && len(es.LogStore.SpanLog(spanID)) > 0
  2366  	})
  2367  
  2368  	f.withState(func(s store.EngineState) {
  2369  		assert.Contains(t, s.LogStore.String(), `alert-injest… │ a
  2370  alert-injest… │ bc
  2371  alert-injest… │ def
  2372  alert-injest… │ ghij`)
  2373  	})
  2374  
  2375  	err := f.Stop()
  2376  	assert.Nil(t, err)
  2377  }
  2378  
  2379  func TestBuildErrorLoggedOnceByUpper(t *testing.T) {
  2380  	f := newTestFixture(t)
  2381  
  2382  	manifest := f.newManifest("alert-injester")
  2383  	err := errors.New("cats and dogs, living together")
  2384  	f.SetNextBuildError(err)
  2385  
  2386  	f.Start([]model.Manifest{manifest})
  2387  
  2388  	f.waitForCompletedBuildCount(1)
  2389  
  2390  	// so the test name says "once", but the fake builder also logs once, so we get it twice
  2391  	f.withState(func(state store.EngineState) {
  2392  		require.Equal(t, 2, strings.Count(state.LogStore.String(), err.Error()))
  2393  	})
  2394  }
  2395  
  2396  func TestTiltfileChangedFilesOnlyLoggedAfterFirstBuild(t *testing.T) {
  2397  	f := newTestFixture(t)
  2398  	f.useRealTiltfileLoader()
  2399  
  2400  	f.WriteFile("Tiltfile", `
  2401  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile')
  2402  k8s_yaml('snack.yaml')`)
  2403  	f.WriteFile("Dockerfile", `FROM iron/go:dev1`)
  2404  	f.WriteFile("snack.yaml", simpleYAML)
  2405  	f.WriteFile("src/main.go", "hello")
  2406  
  2407  	f.loadAndStart()
  2408  
  2409  	f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool {
  2410  		return len(state.MainTiltfileState().BuildHistory) == 1
  2411  	})
  2412  	f.waitForCompletedBuildCount(1)
  2413  
  2414  	// we shouldn't log changes for first build
  2415  	f.withState(func(state store.EngineState) {
  2416  		require.NotContains(t, state.LogStore.String(), "changed: [")
  2417  	})
  2418  
  2419  	f.WriteFile("Tiltfile", `
  2420  docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile', ignore='foo')
  2421  k8s_yaml('snack.yaml')`)
  2422  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile"))
  2423  
  2424  	f.WaitUntil("Tiltfile reloaded", func(state store.EngineState) bool {
  2425  		return len(state.MainTiltfileState().BuildHistory) == 2
  2426  	})
  2427  	f.waitForCompletedBuildCount(2)
  2428  
  2429  	f.withState(func(state store.EngineState) {
  2430  		expectedMessage := fmt.Sprintf("1 File Changed: [%s]", f.JoinPath("Tiltfile"))
  2431  		require.Contains(t, state.LogStore.String(), expectedMessage)
  2432  	})
  2433  }
  2434  
  2435  func TestDeployUIDsInEngineState(t *testing.T) {
  2436  	f := newTestFixture(t)
  2437  
  2438  	uid := types.UID("fake-uid")
  2439  	f.b.nextDeployedUID = uid
  2440  
  2441  	manifest := f.newManifest("fe")
  2442  	f.Start([]model.Manifest{manifest})
  2443  
  2444  	_ = f.nextCall()
  2445  	f.WaitUntilManifestState("UID in ManifestState", "fe", func(state store.ManifestState) bool {
  2446  		return k8sconv.ContainsUID(state.K8sRuntimeState().ApplyFilter, uid)
  2447  	})
  2448  
  2449  	err := f.Stop()
  2450  	assert.NoError(t, err)
  2451  	f.assertAllBuildsConsumed()
  2452  }
  2453  
  2454  func TestEnableFeatureOnFail(t *testing.T) {
  2455  	f := newTestFixture(t)
  2456  	f.useRealTiltfileLoader()
  2457  
  2458  	f.WriteFile("Tiltfile", `
  2459  enable_feature('snapshots')
  2460  fail('goodnight moon')
  2461  `)
  2462  
  2463  	f.loadAndStart()
  2464  
  2465  	f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool {
  2466  		return len(state.MainTiltfileState().BuildHistory) == 1
  2467  	})
  2468  	f.withState(func(state store.EngineState) {
  2469  		assert.True(t, state.Features["snapshots"])
  2470  	})
  2471  }
  2472  
  2473  func TestSecretScrubbed(t *testing.T) {
  2474  	f := newTestFixture(t)
  2475  	f.useRealTiltfileLoader()
  2476  
  2477  	tiltfile := `
  2478  print('about to print secret')
  2479  print('aGVsbG8=')
  2480  k8s_yaml('secret.yaml')`
  2481  	f.WriteFile("Tiltfile", tiltfile)
  2482  	f.WriteFile("secret.yaml", `
  2483  apiVersion: v1
  2484  kind: Secret
  2485  metadata:
  2486    name: my-secret
  2487  data:
  2488    client-secret: aGVsbG8=
  2489  `)
  2490  
  2491  	f.loadAndStart()
  2492  
  2493  	f.waitForCompletedBuildCount(1)
  2494  
  2495  	f.withState(func(state store.EngineState) {
  2496  		log := state.LogStore.String()
  2497  		assert.Contains(t, log, "about to print secret")
  2498  		assert.NotContains(t, log, "aGVsbG8=")
  2499  		assert.Contains(t, log, "[redacted secret my-secret:client-secret]")
  2500  	})
  2501  }
  2502  
  2503  func TestShortSecretNotScrubbed(t *testing.T) {
  2504  	f := newTestFixture(t)
  2505  	f.useRealTiltfileLoader()
  2506  
  2507  	tiltfile := `
  2508  print('about to print secret: s')
  2509  k8s_yaml('secret.yaml')`
  2510  	f.WriteFile("Tiltfile", tiltfile)
  2511  	f.WriteFile("secret.yaml", `
  2512  apiVersion: v1
  2513  kind: Secret
  2514  metadata:
  2515    name: my-secret
  2516  stringData:
  2517    client-secret: s
  2518  `)
  2519  
  2520  	f.loadAndStart()
  2521  
  2522  	f.waitForCompletedBuildCount(1)
  2523  
  2524  	f.withState(func(state store.EngineState) {
  2525  		log := state.LogStore.String()
  2526  		assert.Contains(t, log, "about to print secret: s")
  2527  		assert.NotContains(t, log, "redacted")
  2528  	})
  2529  }
  2530  
  2531  func TestDisableDockerPrune(t *testing.T) {
  2532  	f := newTestFixture(t)
  2533  	f.useRealTiltfileLoader()
  2534  
  2535  	f.WriteFile("Dockerfile", `FROM iron/go:prod`)
  2536  	f.WriteFile("snack.yaml", simpleYAML)
  2537  
  2538  	f.WriteFile("Tiltfile", `
  2539  docker_prune_settings(disable=True)
  2540  `+simpleTiltfile)
  2541  
  2542  	f.loadAndStart()
  2543  
  2544  	f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool {
  2545  		return len(state.MainTiltfileState().BuildHistory) == 1
  2546  	})
  2547  	f.withState(func(state store.EngineState) {
  2548  		assert.False(t, state.DockerPruneSettings.Enabled)
  2549  	})
  2550  }
  2551  
  2552  func TestDockerPruneEnabledByDefault(t *testing.T) {
  2553  	f := newTestFixture(t)
  2554  	f.useRealTiltfileLoader()
  2555  
  2556  	f.WriteFile("Tiltfile", simpleTiltfile)
  2557  	f.WriteFile("Dockerfile", `FROM iron/go:prod`)
  2558  	f.WriteFile("snack.yaml", simpleYAML)
  2559  
  2560  	f.loadAndStart()
  2561  
  2562  	f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool {
  2563  		return len(state.MainTiltfileState().BuildHistory) == 1
  2564  	})
  2565  	f.withState(func(state store.EngineState) {
  2566  		assert.True(t, state.DockerPruneSettings.Enabled)
  2567  		assert.Equal(t, model.DockerPruneDefaultMaxAge, state.DockerPruneSettings.MaxAge)
  2568  		assert.Equal(t, model.DockerPruneDefaultInterval, state.DockerPruneSettings.Interval)
  2569  	})
  2570  }
  2571  
  2572  func TestHasEverBeenReadyK8s(t *testing.T) {
  2573  	f := newTestFixture(t)
  2574  
  2575  	m := f.newManifest("foobar")
  2576  	pb := f.registerForDeployer(m)
  2577  	f.Start([]model.Manifest{m})
  2578  
  2579  	f.waitForCompletedBuildCount(1)
  2580  	f.withManifestState(m.Name, func(ms store.ManifestState) {
  2581  		require.False(t, ms.RuntimeState.HasEverBeenReadyOrSucceeded())
  2582  	})
  2583  
  2584  	f.podEvent(pb.WithContainerReady(true).Build())
  2585  	f.WaitUntilManifestState("flagged ready", m.Name, func(state store.ManifestState) bool {
  2586  		return state.RuntimeState.HasEverBeenReadyOrSucceeded()
  2587  	})
  2588  }
  2589  
  2590  func TestHasEverBeenCompleteK8s(t *testing.T) {
  2591  	f := newTestFixture(t)
  2592  
  2593  	m := f.newManifest("foobar")
  2594  	pb := f.registerForDeployer(m)
  2595  	f.Start([]model.Manifest{m})
  2596  
  2597  	f.waitForCompletedBuildCount(1)
  2598  	f.withManifestState(m.Name, func(ms store.ManifestState) {
  2599  		require.False(t, ms.RuntimeState.HasEverBeenReadyOrSucceeded())
  2600  	})
  2601  
  2602  	f.podEvent(pb.WithPhase(string(v1.PodSucceeded)).Build())
  2603  	f.WaitUntilManifestState("flagged ready", m.Name, func(state store.ManifestState) bool {
  2604  		return state.RuntimeState.HasEverBeenReadyOrSucceeded()
  2605  	})
  2606  }
  2607  
  2608  func TestHasEverBeenReadyLocal(t *testing.T) {
  2609  	if runtime.GOOS == "windows" {
  2610  		t.Skip("flaky on windows")
  2611  	}
  2612  
  2613  	f := newTestFixture(t)
  2614  
  2615  	m := manifestbuilder.New(f, "foobar").WithLocalResource("foo", []string{f.Path()}).Build()
  2616  	f.SetNextBuildError(errors.New("failure!"))
  2617  	f.Start([]model.Manifest{m})
  2618  
  2619  	// first build will fail, HasEverBeenReadyOrSucceeded should be false
  2620  	f.waitForCompletedBuildCount(1)
  2621  	f.withManifestState(m.Name, func(ms store.ManifestState) {
  2622  		require.False(t, ms.RuntimeState.HasEverBeenReadyOrSucceeded())
  2623  	})
  2624  
  2625  	// second build will succeed, HasEverBeenReadyOrSucceeded should be true
  2626  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("bar", "main.go"))
  2627  	f.WaitUntilManifestState("flagged ready", m.Name, func(state store.ManifestState) bool {
  2628  		return state.RuntimeState.HasEverBeenReadyOrSucceeded()
  2629  	})
  2630  }
  2631  
  2632  func TestVersionSettingsStoredOnState(t *testing.T) {
  2633  	f := newTestFixture(t)
  2634  
  2635  	f.Start([]model.Manifest{})
  2636  	f.ensureCluster()
  2637  
  2638  	vs := model.VersionSettings{
  2639  		CheckUpdates: false,
  2640  	}
  2641  	f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{
  2642  		Name:            model.MainTiltfileManifestName,
  2643  		FinishTime:      f.Now(),
  2644  		VersionSettings: vs,
  2645  	})
  2646  
  2647  	f.WaitUntil("CheckVersionUpdates is set to false", func(state store.EngineState) bool {
  2648  		return state.VersionSettings.CheckUpdates == false
  2649  	})
  2650  
  2651  	filepath.Walk(f.Path(), func(path string, info os.FileInfo, err error) error {
  2652  		log.Printf("path: %s", path)
  2653  		return nil
  2654  	})
  2655  }
  2656  
  2657  func TestAnalyticsTiltfileOpt(t *testing.T) {
  2658  	f := newTestFixture(t)
  2659  
  2660  	f.Start([]model.Manifest{})
  2661  	f.ensureCluster()
  2662  
  2663  	f.withState(func(state store.EngineState) {
  2664  		assert.Equal(t, analytics.OptDefault, state.AnalyticsEffectiveOpt())
  2665  	})
  2666  
  2667  	f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{
  2668  		Name:                 model.MainTiltfileManifestName,
  2669  		FinishTime:           f.Now(),
  2670  		AnalyticsTiltfileOpt: analytics.OptIn,
  2671  	})
  2672  
  2673  	f.WaitUntil("analytics tiltfile opt-in", func(state store.EngineState) bool {
  2674  		return state.AnalyticsTiltfileOpt == analytics.OptIn
  2675  	})
  2676  
  2677  	f.withState(func(state store.EngineState) {
  2678  		assert.Equal(t, analytics.OptIn, state.AnalyticsEffectiveOpt())
  2679  	})
  2680  }
  2681  
  2682  func TestConfigArgsChangeCausesTiltfileRerun(t *testing.T) {
  2683  	f := newTestFixture(t)
  2684  	f.useRealTiltfileLoader()
  2685  
  2686  	f.WriteFile("Tiltfile", `
  2687  print('hello')
  2688  config.define_string_list('foo')
  2689  cfg = config.parse()
  2690  print('foo=', cfg['foo'])`)
  2691  
  2692  	opt := func(ia InitAction) InitAction {
  2693  		ia.UserArgs = []string{"--foo", "bar"}
  2694  		return ia
  2695  	}
  2696  
  2697  	f.loadAndStart(opt)
  2698  
  2699  	// Wait for both EngineState and apiserver state updates,
  2700  	// so we can write back to the apiserver.
  2701  	f.WaitUntil("first tiltfile build finishes", func(state store.EngineState) bool {
  2702  		var tf v1alpha1.Tiltfile
  2703  		_ = f.ctrlClient.Get(f.ctx,
  2704  			types.NamespacedName{Name: model.MainTiltfileManifestName.String()}, &tf)
  2705  		return len(state.MainTiltfileState().BuildHistory) == 1 &&
  2706  			tf.Status.Terminated != nil
  2707  	})
  2708  
  2709  	f.withState(func(state store.EngineState) {
  2710  		spanID := state.MainTiltfileState().LastBuild().SpanID
  2711  		require.Contains(t, state.LogStore.SpanLog(spanID), `foo= ["bar"]`)
  2712  	})
  2713  	err := tiltfiles.SetTiltfileArgs(f.ctx, f.ctrlClient, []string{"--foo", "baz", "--foo", "quu"})
  2714  	require.NoError(t, err)
  2715  
  2716  	f.WaitUntil("second tiltfile build finishes", func(state store.EngineState) bool {
  2717  		return len(state.MainTiltfileState().BuildHistory) == 2
  2718  	})
  2719  
  2720  	f.withState(func(state store.EngineState) {
  2721  		spanID := state.MainTiltfileState().LastBuild().SpanID
  2722  		require.Contains(t, state.LogStore.SpanLog(spanID), `foo= ["baz", "quu"]`)
  2723  	})
  2724  }
  2725  
  2726  func TestTelemetryLogAction(t *testing.T) {
  2727  	f := newTestFixture(t)
  2728  
  2729  	f.Start([]model.Manifest{})
  2730  
  2731  	f.store.Dispatch(store.NewLogAction(model.MainTiltfileManifestName, "0", logger.InfoLvl, nil, []byte("testing")))
  2732  
  2733  	f.WaitUntil("log is stored", func(state store.EngineState) bool {
  2734  		l := state.LogStore.ManifestLog(store.MainTiltfileManifestName)
  2735  		return strings.Contains(l, "testing")
  2736  	})
  2737  }
  2738  
  2739  func TestLocalResourceServeChangeCmd(t *testing.T) {
  2740  	f := newTestFixture(t)
  2741  	f.useRealTiltfileLoader()
  2742  
  2743  	f.WriteFile("Tiltfile", "local_resource('foo', serve_cmd='true')")
  2744  
  2745  	f.loadAndStart()
  2746  
  2747  	f.WaitUntil("true is served", func(state store.EngineState) bool {
  2748  		return strings.Contains(state.LogStore.ManifestLog("foo"), "Starting cmd true")
  2749  	})
  2750  
  2751  	f.WriteFile("Tiltfile", "local_resource('foo', serve_cmd='false')")
  2752  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile"))
  2753  
  2754  	f.WaitUntil("false is served", func(state store.EngineState) bool {
  2755  		return strings.Contains(state.LogStore.ManifestLog("foo"), "Starting cmd false")
  2756  	})
  2757  
  2758  	f.fe.RequireNoKnownProcess(t, "true")
  2759  
  2760  	err := f.Stop()
  2761  	require.NoError(t, err)
  2762  }
  2763  
  2764  func TestDefaultUpdateSettings(t *testing.T) {
  2765  	f := newTestFixture(t)
  2766  	f.useRealTiltfileLoader()
  2767  
  2768  	f.WriteFile("Dockerfile", `FROM iron/go:prod`)
  2769  	f.WriteFile("snack.yaml", simpleYAML)
  2770  
  2771  	f.WriteFile("Tiltfile", simpleTiltfile)
  2772  
  2773  	f.loadAndStart()
  2774  
  2775  	f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool {
  2776  		return len(state.MainTiltfileState().BuildHistory) == 1
  2777  	})
  2778  	f.withState(func(state store.EngineState) {
  2779  		assert.Equal(t, model.DefaultUpdateSettings(), state.UpdateSettings)
  2780  	})
  2781  }
  2782  
  2783  func TestSetK8sUpsertTimeout(t *testing.T) {
  2784  	f := newTestFixture(t)
  2785  	f.useRealTiltfileLoader()
  2786  
  2787  	f.WriteFile("Dockerfile", `FROM iron/go:prod`)
  2788  	f.WriteFile("snack.yaml", simpleYAML)
  2789  
  2790  	f.WriteFile("Tiltfile", `
  2791  update_settings(k8s_upsert_timeout_secs=123)
  2792  `+simpleTiltfile)
  2793  	f.loadAndStart()
  2794  
  2795  	f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool {
  2796  		return len(state.MainTiltfileState().BuildHistory) == 1
  2797  	})
  2798  	f.withState(func(state store.EngineState) {
  2799  		assert.Equal(t, 123*time.Second, state.UpdateSettings.K8sUpsertTimeout())
  2800  	})
  2801  }
  2802  
  2803  func TestSetMaxBuildSlots(t *testing.T) {
  2804  	f := newTestFixture(t)
  2805  	f.useRealTiltfileLoader()
  2806  
  2807  	f.WriteFile("Dockerfile", `FROM iron/go:prod`)
  2808  	f.WriteFile("snack.yaml", simpleYAML)
  2809  
  2810  	f.WriteFile("Tiltfile", `
  2811  update_settings(max_parallel_updates=123)
  2812  `+simpleTiltfile)
  2813  	f.loadAndStart()
  2814  
  2815  	f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool {
  2816  		return len(state.MainTiltfileState().BuildHistory) == 1
  2817  	})
  2818  	f.withState(func(state store.EngineState) {
  2819  		assert.Equal(t, 123, state.UpdateSettings.MaxParallelUpdates())
  2820  	})
  2821  }
  2822  
  2823  // https://github.com/tilt-dev/tilt/issues/3514
  2824  func TestTiltignoreRespectedOnError(t *testing.T) {
  2825  	f := newTestFixture(t)
  2826  	f.useRealTiltfileLoader()
  2827  
  2828  	f.WriteFile("a.txt", "hello")
  2829  	f.WriteFile("Tiltfile", `read_file('a.txt')
  2830  fail('x')`)
  2831  	f.WriteFile(".tiltignore", "a.txt")
  2832  
  2833  	f.Init(InitAction{
  2834  		TiltfilePath: f.JoinPath("Tiltfile"),
  2835  		TerminalMode: store.TerminalModeHUD,
  2836  		StartTime:    f.Now(),
  2837  	})
  2838  
  2839  	f.WaitUntil(".tiltignore processed", func(es store.EngineState) bool {
  2840  		var fw v1alpha1.FileWatch
  2841  		err := f.ctrlClient.Get(f.ctx, types.NamespacedName{Name: "configs:(Tiltfile)"}, &fw)
  2842  		if err != nil {
  2843  			return false
  2844  		}
  2845  		return strings.Contains(strings.Join(fw.Spec.Ignores[0].Patterns, "\n"), "a.txt")
  2846  	})
  2847  
  2848  	f.WriteFile(".tiltignore", "a.txt\nb.txt\n")
  2849  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile"))
  2850  
  2851  	f.WaitUntil(".tiltignore processed", func(es store.EngineState) bool {
  2852  		var fw v1alpha1.FileWatch
  2853  		err := f.ctrlClient.Get(f.ctx, types.NamespacedName{Name: "configs:(Tiltfile)"}, &fw)
  2854  		if err != nil {
  2855  			return false
  2856  		}
  2857  		return strings.Contains(strings.Join(fw.Spec.Ignores[0].Patterns, "\n"), "b.txt")
  2858  	})
  2859  
  2860  	err := f.Stop()
  2861  	assert.NoError(t, err)
  2862  }
  2863  
  2864  func TestHandleTiltfileTriggerQueue(t *testing.T) {
  2865  	f := newTestFixture(t)
  2866  	f.useRealTiltfileLoader()
  2867  
  2868  	f.WriteFile("Tiltfile", `print("hello world")`)
  2869  
  2870  	f.Init(InitAction{
  2871  		TiltfilePath: f.JoinPath("Tiltfile"),
  2872  		TerminalMode: store.TerminalModeHUD,
  2873  		StartTime:    f.Now(),
  2874  	})
  2875  
  2876  	f.WaitUntil("init action processed", func(state store.EngineState) bool {
  2877  		return !state.TiltStartTime.IsZero()
  2878  	})
  2879  
  2880  	f.withState(func(st store.EngineState) {
  2881  		assert.False(t, st.ManifestInTriggerQueue(model.MainTiltfileManifestName),
  2882  			"initial state should NOT have Tiltfile in trigger queue")
  2883  		assert.Equal(t, model.BuildReasonNone, st.MainTiltfileState().TriggerReason,
  2884  			"initial state should not have Tiltfile trigger reason")
  2885  	})
  2886  	action := store.AppendToTriggerQueueAction{Name: model.MainTiltfileManifestName, Reason: 123}
  2887  	f.store.Dispatch(action)
  2888  
  2889  	f.WaitUntil("Tiltfile trigger processed", func(st store.EngineState) bool {
  2890  		return st.ManifestInTriggerQueue(model.MainTiltfileManifestName) &&
  2891  			st.MainTiltfileState().TriggerReason == 123
  2892  	})
  2893  
  2894  	f.WaitUntil("Tiltfile built and trigger cleared", func(st store.EngineState) bool {
  2895  		return len(st.MainTiltfileState().BuildHistory) == 2 && // Tiltfile built b/c it was triggered...
  2896  
  2897  			// and the trigger was cleared
  2898  			!st.ManifestInTriggerQueue(model.MainTiltfileManifestName) &&
  2899  			st.MainTiltfileState().TriggerReason == model.BuildReasonNone
  2900  	})
  2901  
  2902  	err := f.Stop()
  2903  	assert.NoError(t, err)
  2904  }
  2905  
  2906  func TestOverrideTriggerModeEvent(t *testing.T) {
  2907  	f := newTestFixture(t)
  2908  
  2909  	manifest := f.newManifest("foo")
  2910  	f.Start([]model.Manifest{manifest})
  2911  	_ = f.nextCall()
  2912  
  2913  	f.WaitUntilManifest("manifest has triggerMode = auto (default)", "foo", func(mt store.ManifestTarget) bool {
  2914  		return mt.Manifest.TriggerMode == model.TriggerModeAuto
  2915  	})
  2916  
  2917  	f.upper.store.Dispatch(server.OverrideTriggerModeAction{
  2918  		ManifestNames: []model.ManifestName{"foo"},
  2919  		TriggerMode:   model.TriggerModeManualWithAutoInit,
  2920  	})
  2921  
  2922  	f.WaitUntilManifest("triggerMode updated", "foo", func(mt store.ManifestTarget) bool {
  2923  		return mt.Manifest.TriggerMode == model.TriggerModeManualWithAutoInit
  2924  	})
  2925  
  2926  	err := f.Stop()
  2927  	require.NoError(t, err)
  2928  	f.assertAllBuildsConsumed()
  2929  }
  2930  
  2931  func TestOverrideTriggerModeBadManifestLogsError(t *testing.T) {
  2932  	f := newTestFixture(t)
  2933  
  2934  	manifest := f.newManifest("foo")
  2935  	f.Start([]model.Manifest{manifest})
  2936  	_ = f.nextCall()
  2937  
  2938  	f.WaitUntilManifest("manifest has triggerMode = auto (default)", "foo", func(mt store.ManifestTarget) bool {
  2939  		return mt.Manifest.TriggerMode == model.TriggerModeAuto
  2940  	})
  2941  
  2942  	f.upper.store.Dispatch(server.OverrideTriggerModeAction{
  2943  		ManifestNames: []model.ManifestName{"bar"},
  2944  		TriggerMode:   model.TriggerModeManualWithAutoInit,
  2945  	})
  2946  
  2947  	f.log.AssertEventuallyContains(t, "no such manifest", stdTimeout)
  2948  
  2949  	err := f.Stop()
  2950  	require.NoError(t, err)
  2951  	f.assertAllBuildsConsumed()
  2952  }
  2953  
  2954  func TestOverrideTriggerModeBadTriggerModeLogsError(t *testing.T) {
  2955  	f := newTestFixture(t)
  2956  
  2957  	manifest := f.newManifest("foo")
  2958  	f.Start([]model.Manifest{manifest})
  2959  	_ = f.nextCall()
  2960  
  2961  	f.WaitUntilManifest("manifest has triggerMode = auto (default)", "foo", func(mt store.ManifestTarget) bool {
  2962  		return mt.Manifest.TriggerMode == model.TriggerModeAuto
  2963  	})
  2964  
  2965  	f.upper.store.Dispatch(server.OverrideTriggerModeAction{
  2966  		ManifestNames: []model.ManifestName{"fooo"},
  2967  		TriggerMode:   12345,
  2968  	})
  2969  
  2970  	f.log.AssertEventuallyContains(t, "invalid trigger mode", stdTimeout)
  2971  
  2972  	err := f.Stop()
  2973  	require.NoError(t, err)
  2974  	f.assertAllBuildsConsumed()
  2975  }
  2976  
  2977  func TestDisableButtonIsCreated(t *testing.T) {
  2978  	f := newTestFixture(t)
  2979  	f.useRealTiltfileLoader()
  2980  
  2981  	f.WriteFile("Tiltfile", `
  2982  enable_feature('disable_resources')
  2983  local_resource('foo', 'echo hi')
  2984  `)
  2985  	f.loadAndStart()
  2986  
  2987  	f.waitForCompletedBuildCount(1)
  2988  
  2989  	var b v1alpha1.UIButton
  2990  	require.Eventually(t, func() bool {
  2991  		err := f.ctrlClient.Get(f.ctx, types.NamespacedName{Name: "toggle-foo-disable"}, &b)
  2992  		require.NoError(t, ctrlclient.IgnoreNotFound(err))
  2993  		return err == nil
  2994  	}, time.Second, time.Millisecond)
  2995  
  2996  	require.Equal(t, "DisableToggle", b.Annotations[v1alpha1.AnnotationButtonType])
  2997  	require.Equal(t, []v1alpha1.UIInputSpec{
  2998  		{
  2999  			Name:   "action",
  3000  			Hidden: &v1alpha1.UIHiddenInputSpec{Value: "on"},
  3001  		},
  3002  	}, b.Spec.Inputs)
  3003  }
  3004  
  3005  func TestCmdServerDoesntStartWhenDisabled(t *testing.T) {
  3006  	f := newTestFixture(t)
  3007  	f.useRealTiltfileLoader()
  3008  
  3009  	f.WriteFile("Tiltfile", `print('dummy tiltfile with no resources')`)
  3010  
  3011  	f.loadAndStart()
  3012  
  3013  	f.WriteFile("Tiltfile", `print('tiltfile 1')
  3014  local_resource('foo', serve_cmd='echo hi; sleep 10')
  3015  local_resource('bar', 'true')
  3016  config.set_enabled_resources(['bar'])
  3017  `)
  3018  	f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile"))
  3019  
  3020  	// make sure we got to the point where we recognized the server is disabled without actually
  3021  	// running the command
  3022  	f.WaitUntil("disabled", func(state store.EngineState) bool {
  3023  		ds := f.localServerController.Get("foo").Status.DisableStatus
  3024  		return ds != nil && ds.Disabled
  3025  	})
  3026  
  3027  	require.Equal(t, f.log.String(), "")
  3028  }
  3029  
  3030  func TestDisabledResourceRemovedFromTriggerQueue(t *testing.T) {
  3031  	f := newTestFixture(t)
  3032  
  3033  	m := manifestbuilder.New(f, "foo").WithLocalResource("foo", []string{f.Path()}).Build()
  3034  
  3035  	f.Start([]model.Manifest{m})
  3036  
  3037  	f.waitForCompletedBuildCount(1)
  3038  
  3039  	f.bc.DisableForTesting()
  3040  
  3041  	f.store.Dispatch(store.AppendToTriggerQueueAction{Name: m.Name, Reason: model.BuildReasonFlagTriggerCLI})
  3042  
  3043  	f.WaitUntil("in trigger queue", func(state store.EngineState) bool {
  3044  		return state.ManifestInTriggerQueue(m.Name)
  3045  	})
  3046  
  3047  	f.setDisableState(m.Name, true)
  3048  
  3049  	f.WaitUntil("is removed from trigger queue", func(state store.EngineState) bool {
  3050  		return !state.ManifestInTriggerQueue(m.Name)
  3051  	})
  3052  }
  3053  
  3054  func TestLocalResourceNoServeCmdDeps(t *testing.T) {
  3055  	if runtime.GOOS == "windows" {
  3056  		t.Skip("TODO(nick): fix this")
  3057  	}
  3058  	f := newTestFixture(t)
  3059  	f.useRealTiltfileLoader()
  3060  
  3061  	// create a Tiltfile with 2 resources:
  3062  	// 	1. foo - update only, i.e. a job, with a readiness_probe also defined
  3063  	// 		(which should be ignored as there's no server to be ready!)
  3064  	// 	2. bar - local_resource w/ dep on foo
  3065  	f.WriteFile("Tiltfile", `
  3066  local_resource('foo', cmd='true', readiness_probe=probe(http_get=http_get_action(port=12345)))
  3067  local_resource('bar', serve_cmd='while true; do echo hi; sleep 30; done', resource_deps=['foo'])
  3068  `)
  3069  	f.loadAndStart()
  3070  
  3071  	f.waitForCompletedBuildCount(2)
  3072  
  3073  	f.withState(func(es store.EngineState) {
  3074  		require.True(t, strings.Contains(es.LogStore.ManifestLog("(Tiltfile)"),
  3075  			`WARNING: Ignoring readiness probe for local resource "foo" (no serve_cmd was defined)`),
  3076  			"Log did not contain ignored readiness probe warning")
  3077  	})
  3078  
  3079  	// foo should indicate that it has succeeded since there is no serve_cmd and thus no runtime status
  3080  	f.withManifestState("foo", func(ms store.ManifestState) {
  3081  		require.True(t, ms.RuntimeState.HasEverBeenReadyOrSucceeded())
  3082  		require.Equal(t, v1alpha1.RuntimeStatusNotApplicable, ms.RuntimeState.RuntimeStatus())
  3083  	})
  3084  
  3085  	f.WaitUntilManifestState("bar ready", "bar", func(ms store.ManifestState) bool {
  3086  		return ms.RuntimeState.HasEverBeenReadyOrSucceeded() && ms.RuntimeState.RuntimeStatus() == v1alpha1.RuntimeStatusOK
  3087  	})
  3088  }
  3089  
  3090  type testFixture struct {
  3091  	*tempdir.TempDirFixture
  3092  	t                          *testing.T
  3093  	ctx                        context.Context
  3094  	cancel                     func()
  3095  	clock                      clockwork.Clock
  3096  	upper                      Upper
  3097  	b                          *fakeBuildAndDeployer
  3098  	fsWatcher                  *fsevent.FakeMultiWatcher
  3099  	docker                     *docker.FakeClient
  3100  	kClient                    *k8s.FakeK8sClient
  3101  	hud                        hud.HeadsUpDisplay
  3102  	ts                         *hud.TerminalStream
  3103  	upperInitResult            chan error
  3104  	log                        *bufsync.ThreadSafeBuffer
  3105  	store                      *store.Store
  3106  	bc                         *BuildController
  3107  	cc                         *configs.ConfigsController
  3108  	dcc                        *dockercompose.FakeDCClient
  3109  	tfl                        *tiltfile.FakeTiltfileLoader
  3110  	realTFL                    tiltfile.TiltfileLoader
  3111  	opter                      *tiltanalytics.FakeOpter
  3112  	dp                         *dockerprune.DockerPruner
  3113  	fe                         *cmd.FakeExecer
  3114  	fpm                        *cmd.FakeProberManager
  3115  	overrideMaxParallelUpdates int
  3116  	ctrlClient                 ctrlclient.Client
  3117  	engineMode                 store.EngineMode
  3118  
  3119  	onchangeCh            chan bool
  3120  	sessionController     *session.Controller
  3121  	localServerController *local.ServerController
  3122  	execer                *localexec.FakeExecer
  3123  }
  3124  
  3125  type fixtureOptions struct {
  3126  	engineMode *store.EngineMode
  3127  }
  3128  
  3129  func newTestFixture(t *testing.T, options ...fixtureOptions) *testFixture {
  3130  	controllers.InitKlog(io.Discard)
  3131  	f := tempdir.NewTempDirFixture(t)
  3132  
  3133  	engineMode := store.EngineModeUp
  3134  	for _, o := range options {
  3135  		if o.engineMode != nil {
  3136  			engineMode = *o.engineMode
  3137  		}
  3138  	}
  3139  
  3140  	fs := afero.NewMemMapFs()
  3141  	base := xdg.NewFakeBase(f.Path(), fs)
  3142  	log := bufsync.NewThreadSafeBuffer()
  3143  	to := tiltanalytics.NewFakeOpter(analytics.OptIn)
  3144  	ctx, _, ta := testutils.ForkedCtxAndAnalyticsWithOpterForTest(log, to)
  3145  	ctx, cancel := context.WithTimeout(ctx, 15*time.Second)
  3146  
  3147  	cdc := controllers.ProvideDeferredClient()
  3148  	sch := v1alpha1.NewScheme()
  3149  
  3150  	watcher := fsevent.NewFakeMultiWatcher()
  3151  	kClient := k8s.NewFakeK8sClient(t)
  3152  	clusterClients := cluster.NewConnectionManager()
  3153  
  3154  	timerMaker := fsevent.MakeFakeTimerMaker(t)
  3155  
  3156  	dockerClient := docker.NewFakeClient()
  3157  
  3158  	fSub := fixtureSub{ch: make(chan bool, 1000)}
  3159  	st := store.NewStore(UpperReducer, store.LogActionsFlag(false))
  3160  	require.NoError(t, st.AddSubscriber(ctx, fSub))
  3161  
  3162  	err := os.Mkdir(f.JoinPath(".git"), os.FileMode(0777))
  3163  	if err != nil {
  3164  		t.Fatal(err)
  3165  	}
  3166  
  3167  	clock := clockwork.NewRealClock()
  3168  	env := clusterid.ProductDockerDesktop
  3169  	podSource := podlogstream.NewPodSource(ctx, kClient, v1alpha1.NewScheme(), clock)
  3170  	plsc := podlogstream.NewController(ctx, cdc, sch, st, kClient, podSource, clock)
  3171  	au := engineanalytics.NewAnalyticsUpdater(ta, engineanalytics.CmdTags{}, engineMode)
  3172  	ar := engineanalytics.ProvideAnalyticsReporter(ta, st, kClient, env, feature.MainDefaults)
  3173  	fakeDcc := dockercompose.NewFakeDockerComposeClient(t, ctx)
  3174  	k8sContextPlugin := k8scontext.NewPlugin("fake-context", "default", env)
  3175  	versionPlugin := version.NewPlugin(model.TiltBuild{Version: "0.5.0"})
  3176  	configPlugin := config.NewPlugin("up")
  3177  	execer := localexec.NewFakeExecer(t)
  3178  
  3179  	extPlugin := tiltextension.NewFakePlugin(
  3180  		tiltextension.NewFakeExtRepoReconciler(f.Path()),
  3181  		tiltextension.NewFakeExtReconciler(f.Path()))
  3182  	ciSettingsPlugin := cisettings.NewPlugin(0)
  3183  	realTFL := tiltfile.ProvideTiltfileLoader(ta,
  3184  		k8sContextPlugin, versionPlugin, configPlugin, extPlugin, ciSettingsPlugin,
  3185  		fakeDcc, "localhost", execer, feature.MainDefaults, env)
  3186  	tfl := tiltfile.NewFakeTiltfileLoader()
  3187  	cc := configs.NewConfigsController(cdc)
  3188  	tqs := configs.NewTriggerQueueSubscriber(cdc)
  3189  	serverOptions, err := server.ProvideTiltServerOptionsForTesting(ctx)
  3190  	require.NoError(t, err)
  3191  	webListener, err := server.ProvideWebListener("localhost", 0)
  3192  	require.NoError(t, err)
  3193  	hudsc := server.ProvideHeadsUpServerController(
  3194  		nil, "tilt-default", webListener, serverOptions,
  3195  		&server.HeadsUpServer{}, assets.NewFakeServer(), model.WebURL{})
  3196  	ns := k8s.Namespace("default")
  3197  	rd := kubernetesdiscovery.NewContainerRestartDetector()
  3198  	kdc := kubernetesdiscovery.NewReconciler(cdc, sch, clusterClients, rd, st)
  3199  	sw := k8swatch.NewServiceWatcher(clusterClients, ns)
  3200  	ewm := k8swatch.NewEventWatchManager(clusterClients, ns)
  3201  	tcum := cloud.NewStatusManager(httptest.NewFakeClientEmptyJSON(), clock)
  3202  	fe := cmd.NewFakeExecer()
  3203  	fpm := cmd.NewFakeProberManager()
  3204  	fwc := filewatch.NewController(cdc, st, watcher.NewSub, timerMaker.Maker(), v1alpha1.NewScheme(), clock)
  3205  	cmds := cmd.NewController(ctx, fe, fpm, cdc, st, clock, v1alpha1.NewScheme())
  3206  	lsc := local.NewServerController(cdc)
  3207  	sr := ctrlsession.NewReconciler(cdc, st, clock)
  3208  	sessionController := session.NewController(sr)
  3209  	ts := hud.NewTerminalStream(hud.NewIncrementalPrinter(log), hud.NewLogFilter(hud.FilterSourceAll, nil, hud.FilterLevel(logger.NoneLvl)), st)
  3210  	tp := prompt.NewTerminalPrompt(ta, prompt.TTYOpen, openurl.BrowserOpen,
  3211  		log, "localhost", model.WebURL{})
  3212  	h := hud.NewFakeHud()
  3213  
  3214  	uncached := controllers.UncachedObjects{}
  3215  	for _, obj := range v1alpha1.AllResourceObjects() {
  3216  		uncached = append(uncached, obj.(ctrlclient.Object))
  3217  	}
  3218  
  3219  	tscm, err := controllers.NewTiltServerControllerManager(
  3220  		serverOptions,
  3221  		sch,
  3222  		cdc,
  3223  		uncached)
  3224  	require.NoError(t, err, "Failed to create Tilt API server controller manager")
  3225  	pfr := apiportforward.NewReconciler(cdc, sch, st, clusterClients)
  3226  
  3227  	wsl := server.NewWebsocketList()
  3228  
  3229  	kar := kubernetesapply.NewReconciler(cdc, kClient, sch, st, execer)
  3230  	dcds := dockercomposeservice.NewDisableSubscriber(ctx, fakeDcc, clock)
  3231  	dcr := dockercomposeservice.NewReconciler(cdc, fakeDcc, dockerClient, st, sch, dcds)
  3232  
  3233  	tfr := ctrltiltfile.NewReconciler(st, tfl, dockerClient, cdc, sch, engineMode, "", "", 0)
  3234  	tbr := togglebutton.NewReconciler(cdc, sch)
  3235  	extr := extension.NewReconciler(cdc, sch, ta)
  3236  	extrr, err := extensionrepo.NewReconciler(cdc, st, base)
  3237  	require.NoError(t, err)
  3238  	cmr := configmap.NewReconciler(cdc, st)
  3239  
  3240  	cu := &containerupdate.FakeContainerUpdater{}
  3241  	lur := liveupdate.NewFakeReconciler(st, cu, cdc)
  3242  	dockerBuilder := build.NewDockerBuilder(dockerClient, nil)
  3243  	customBuilder := build.NewCustomBuilder(dockerClient, clock, cmds)
  3244  	kp := build.NewKINDLoader()
  3245  	ib := build.NewImageBuilder(dockerBuilder, customBuilder, kp)
  3246  	dir := dockerimage.NewReconciler(cdc, st, sch, dockerClient, ib)
  3247  	cir := cmdimage.NewReconciler(cdc, st, sch, dockerClient, ib)
  3248  	kubeconfigWriter := kubeconfig.NewWriter(base, fs, "tilt-default")
  3249  	localKubeconfigPathOnce := localexec.KubeconfigPathOnce(func() string {
  3250  		return "/path/to/kubeconfig-default.yaml"
  3251  	})
  3252  	clr := cluster.NewReconciler(ctx, cdc, st, clock, clusterClients, docker.LocalEnv{},
  3253  		cluster.FakeDockerClientOrError(dockerClient, nil),
  3254  		cluster.FakeKubernetesClientOrError(kClient, nil),
  3255  		wsl,
  3256  		kubeconfigWriter,
  3257  		localKubeconfigPathOnce)
  3258  	dclsr := dockercomposelogstream.NewReconciler(cdc, st, fakeDcc, dockerClient)
  3259  
  3260  	cb := controllers.NewControllerBuilder(tscm, controllers.ProvideControllers(
  3261  		fwc,
  3262  		cmds,
  3263  		plsc,
  3264  		kdc,
  3265  		kar,
  3266  		ctrluisession.NewReconciler(cdc, wsl),
  3267  		ctrluiresource.NewReconciler(cdc, wsl, st),
  3268  		ctrluibutton.NewReconciler(cdc, wsl, st),
  3269  		pfr,
  3270  		tfr,
  3271  		tbr,
  3272  		extr,
  3273  		extrr,
  3274  		lur,
  3275  		cmr,
  3276  		dir,
  3277  		cir,
  3278  		clr,
  3279  		dcr,
  3280  		imagemap.NewReconciler(cdc, st),
  3281  		dclsr,
  3282  		sr,
  3283  	))
  3284  
  3285  	dp := dockerprune.NewDockerPruner(dockerClient)
  3286  	dp.DisabledForTesting(true)
  3287  
  3288  	b := newFakeBuildAndDeployer(t, kClient, fakeDcc, cdc, kar, dcr)
  3289  	bc := NewBuildController(b)
  3290  
  3291  	ret := &testFixture{
  3292  		TempDirFixture:        f,
  3293  		t:                     t,
  3294  		ctx:                   ctx,
  3295  		cancel:                cancel,
  3296  		clock:                 clock,
  3297  		b:                     b,
  3298  		fsWatcher:             watcher,
  3299  		docker:                dockerClient,
  3300  		kClient:               b.kClient,
  3301  		hud:                   h,
  3302  		ts:                    ts,
  3303  		log:                   log,
  3304  		store:                 st,
  3305  		bc:                    bc,
  3306  		onchangeCh:            fSub.ch,
  3307  		cc:                    cc,
  3308  		dcc:                   fakeDcc,
  3309  		tfl:                   tfl,
  3310  		realTFL:               realTFL,
  3311  		opter:                 to,
  3312  		dp:                    dp,
  3313  		fe:                    fe,
  3314  		fpm:                   fpm,
  3315  		ctrlClient:            cdc,
  3316  		sessionController:     sessionController,
  3317  		localServerController: lsc,
  3318  		engineMode:            engineMode,
  3319  		execer:                execer,
  3320  	}
  3321  
  3322  	ret.disableEnvAnalyticsOpt()
  3323  
  3324  	tc := telemetry.NewController(clock, tracer.NewSpanCollector(ctx))
  3325  	podm := k8srollout.NewPodMonitor(clock)
  3326  
  3327  	uss := uisession.NewSubscriber(cdc)
  3328  	urs := uiresource.NewSubscriber(cdc)
  3329  
  3330  	subs := ProvideSubscribers(hudsc, tscm, cb, h, ts, tp, sw, bc, cc, tqs, ar, au, ewm, tcum, dp, tc, lsc, podm, sessionController, uss, urs)
  3331  	ret.upper, err = NewUpper(ctx, st, subs)
  3332  	require.NoError(t, err)
  3333  
  3334  	go func() {
  3335  		err := h.Run(ctx, ret.upper.Dispatch, hud.DefaultRefreshInterval)
  3336  		testutils.FailOnNonCanceledErr(t, err, "hud.Run failed")
  3337  	}()
  3338  
  3339  	t.Cleanup(ret.TearDown)
  3340  	return ret
  3341  }
  3342  
  3343  func (f *testFixture) Now() time.Time {
  3344  	return f.clock.Now()
  3345  }
  3346  
  3347  func (f *testFixture) fakeHud() *hud.FakeHud {
  3348  	fakeHud, ok := f.hud.(*hud.FakeHud)
  3349  	if !ok {
  3350  		f.t.Fatalf("called f.fakeHud() on a test fixure without a fakeHud (instead f.hud is of type: %T", f.hud)
  3351  	}
  3352  	return fakeHud
  3353  }
  3354  
  3355  // starts the upper with the given manifests, bypassing normal tiltfile loading
  3356  func (f *testFixture) Start(manifests []model.Manifest, initOptions ...initOption) {
  3357  	f.t.Helper()
  3358  	f.setManifests(manifests)
  3359  
  3360  	ia := InitAction{
  3361  		TiltfilePath: f.JoinPath("Tiltfile"),
  3362  		TerminalMode: store.TerminalModeHUD,
  3363  		StartTime:    f.Now(),
  3364  	}
  3365  	for _, o := range initOptions {
  3366  		ia = o(ia)
  3367  	}
  3368  	f.Init(ia)
  3369  }
  3370  
  3371  func (f *testFixture) useRealTiltfileLoader() {
  3372  	f.tfl.Delegate = f.realTFL
  3373  }
  3374  
  3375  func (f *testFixture) setManifests(manifests []model.Manifest) {
  3376  	f.tfl.Result.Manifests = manifests
  3377  	f.tfl.Result = f.tfl.Result.WithAllManifestsEnabled()
  3378  }
  3379  
  3380  func (f *testFixture) setMaxParallelUpdates(n int) {
  3381  	f.overrideMaxParallelUpdates = n
  3382  
  3383  	state := f.store.LockMutableStateForTesting()
  3384  	state.UpdateSettings = state.UpdateSettings.WithMaxParallelUpdates(n)
  3385  	f.store.UnlockMutableState()
  3386  }
  3387  
  3388  func (f *testFixture) disableEnvAnalyticsOpt() {
  3389  	state := f.store.LockMutableStateForTesting()
  3390  	state.AnalyticsEnvOpt = analytics.OptDefault
  3391  	f.store.UnlockMutableState()
  3392  }
  3393  
  3394  type initOption func(ia InitAction) InitAction
  3395  
  3396  func (f *testFixture) Init(action InitAction) {
  3397  	f.t.Helper()
  3398  
  3399  	ctx, cancel := context.WithCancel(f.ctx)
  3400  	defer cancel()
  3401  
  3402  	watchFiles := f.engineMode.WatchesFiles()
  3403  	f.upperInitResult = make(chan error, 10)
  3404  
  3405  	go func() {
  3406  		err := f.upper.Init(f.ctx, action)
  3407  		if err != nil && err != context.Canceled {
  3408  			// Print this out here in case the test never completes
  3409  			log.Printf("upper exited: %v\n", err)
  3410  			f.cancel()
  3411  		}
  3412  		cancel()
  3413  
  3414  		select {
  3415  		case f.upperInitResult <- err:
  3416  		default:
  3417  			fmt.Println("writing to upperInitResult would block!")
  3418  			panic(err)
  3419  		}
  3420  		close(f.upperInitResult)
  3421  	}()
  3422  
  3423  	f.WaitUntil("tiltfile build finishes", func(st store.EngineState) bool {
  3424  		return !st.MainTiltfileState().LastBuild().Empty()
  3425  	})
  3426  
  3427  	state := f.store.LockMutableStateForTesting()
  3428  	expectedFileWatches := ctrltiltfile.ToFileWatchObjects(ctrltiltfile.WatchInputs{
  3429  		TiltfileManifestName: model.MainTiltfileManifestName,
  3430  		Manifests:            state.Manifests(),
  3431  		ConfigFiles:          []string{action.TiltfilePath},
  3432  		TiltfilePath:         action.TiltfilePath,
  3433  	}, make(map[model.ManifestName]*v1alpha1.DisableSource))
  3434  	if f.overrideMaxParallelUpdates > 0 {
  3435  		state.UpdateSettings = state.UpdateSettings.WithMaxParallelUpdates(f.overrideMaxParallelUpdates)
  3436  	}
  3437  	f.store.UnlockMutableState()
  3438  
  3439  	f.PollUntil("watches set up", func() bool {
  3440  		if !watchFiles {
  3441  			return true
  3442  		}
  3443  
  3444  		// wait for FileWatch objects to exist AND have a status indicating they're running
  3445  		var fwList v1alpha1.FileWatchList
  3446  		if err := f.ctrlClient.List(ctx, &fwList); err != nil {
  3447  			// If the context was canceled but the file watches haven't been set up,
  3448  			// that's OK. Just continue executing the rest of the test.
  3449  			//
  3450  			// If the error wasn't intended, the error will be properly
  3451  			// handled in TearDown().
  3452  			if ctx.Done() != nil {
  3453  				return true
  3454  			}
  3455  
  3456  			return false
  3457  		}
  3458  
  3459  		remainingWatchNames := make(map[string]bool)
  3460  		for _, fw := range expectedFileWatches {
  3461  			remainingWatchNames[fw.GetName()] = true
  3462  		}
  3463  
  3464  		for _, fw := range fwList.Items {
  3465  			if !fw.Status.MonitorStartTime.IsZero() {
  3466  				delete(remainingWatchNames, fw.GetName())
  3467  			}
  3468  		}
  3469  		return len(remainingWatchNames) == 0
  3470  	})
  3471  }
  3472  
  3473  func (f *testFixture) Stop() error {
  3474  	f.cancel()
  3475  	err := <-f.upperInitResult
  3476  	if err == context.Canceled {
  3477  		return nil
  3478  	} else {
  3479  		return err
  3480  	}
  3481  }
  3482  
  3483  func (f *testFixture) WaitForExit() error {
  3484  	select {
  3485  	case <-time.After(stdTimeout):
  3486  		f.T().Fatalf("Timed out waiting for upper to exit")
  3487  		return nil
  3488  	case err := <-f.upperInitResult:
  3489  		return err
  3490  	}
  3491  }
  3492  
  3493  func (f *testFixture) WaitForNoExit() error {
  3494  	select {
  3495  	case <-time.After(stdTimeout):
  3496  		return nil
  3497  	case err := <-f.upperInitResult:
  3498  		f.T().Fatalf("upper exited when it shouldn't have")
  3499  		return err
  3500  	}
  3501  }
  3502  
  3503  func (f *testFixture) SetNextBuildError(err error) {
  3504  	// Before setting the nextBuildError, make sure that any in-flight builds (state.BuildStartedCount)
  3505  	// have hit the buildAndDeployer (f.b.buildCount); by the time we've incremented buildCount and
  3506  	// the fakeBaD mutex is unlocked, we've already grabbed the nextBuildError for that build,
  3507  	// so we can freely set it here for a future build.
  3508  	f.WaitUntil("any in-flight builds have hit the buildAndDeployer", func(state store.EngineState) bool {
  3509  		f.b.mu.Lock()
  3510  		defer f.b.mu.Unlock()
  3511  		return f.b.buildCount == state.BuildControllerStartCount
  3512  	})
  3513  
  3514  	_ = f.store.RLockState()
  3515  	f.b.mu.Lock()
  3516  	f.b.nextBuildError = err
  3517  	f.b.mu.Unlock()
  3518  	f.store.RUnlockState()
  3519  }
  3520  
  3521  // Wait until the given view test passes.
  3522  func (f *testFixture) WaitUntilHUD(msg string, isDone func(view.View) bool) {
  3523  	f.fakeHud().WaitUntil(f.T(), f.ctx, msg, isDone)
  3524  }
  3525  
  3526  func (f *testFixture) WaitUntilHUDResource(msg string, name model.ManifestName, isDone func(view.Resource) bool) {
  3527  	f.fakeHud().WaitUntilResource(f.T(), f.ctx, msg, name, isDone)
  3528  }
  3529  
  3530  // Wait until the given engine state test passes.
  3531  func (f *testFixture) WaitUntil(msg string, isDone func(store.EngineState) bool) {
  3532  	f.T().Helper()
  3533  
  3534  	ctx, cancel := context.WithTimeout(f.ctx, stdTimeout)
  3535  	defer cancel()
  3536  
  3537  	isCanceled := false
  3538  
  3539  	for {
  3540  		state := f.upper.store.RLockState()
  3541  		done := isDone(state)
  3542  		fatalErr := state.FatalError
  3543  		f.upper.store.RUnlockState()
  3544  		if done {
  3545  			return
  3546  		}
  3547  		if fatalErr != nil {
  3548  			f.T().Fatalf("Store had fatal error: %v", fatalErr)
  3549  		}
  3550  
  3551  		if isCanceled {
  3552  			_, _ = fmt.Fprintf(os.Stderr, "Test canceled. Dumping engine state:\n")
  3553  			encoder := store.CreateEngineStateEncoder(os.Stderr)
  3554  			require.NoError(f.T(), encoder.Encode(state))
  3555  			f.T().Fatalf("Timed out waiting for: %s", msg)
  3556  		}
  3557  
  3558  		select {
  3559  		case <-ctx.Done():
  3560  			// Let the loop run the isDone test one more time
  3561  			isCanceled = true
  3562  		case <-f.onchangeCh:
  3563  		}
  3564  	}
  3565  }
  3566  
  3567  func (f *testFixture) withState(tf func(store.EngineState)) {
  3568  	state := f.upper.store.RLockState()
  3569  	defer f.upper.store.RUnlockState()
  3570  	tf(state)
  3571  }
  3572  
  3573  func (f *testFixture) withManifestTarget(name model.ManifestName, tf func(ms store.ManifestTarget)) {
  3574  	f.withState(func(es store.EngineState) {
  3575  		mt, ok := es.ManifestTargets[name]
  3576  		if !ok {
  3577  			f.T().Fatalf("no manifest state for name %s", name)
  3578  		}
  3579  		tf(*mt)
  3580  	})
  3581  }
  3582  
  3583  func (f *testFixture) withManifestState(name model.ManifestName, tf func(ms store.ManifestState)) {
  3584  	f.withManifestTarget(name, func(mt store.ManifestTarget) {
  3585  		tf(*mt.State)
  3586  	})
  3587  }
  3588  
  3589  // Poll until the given state passes. This should be used for checking things outside
  3590  // the state loop. Don't use this to check state inside the state loop.
  3591  func (f *testFixture) PollUntil(msg string, isDone func() bool) {
  3592  	f.t.Helper()
  3593  	ctx, cancel := context.WithTimeout(f.ctx, stdTimeout)
  3594  	defer cancel()
  3595  
  3596  	ticker := time.NewTicker(10 * time.Millisecond)
  3597  	for {
  3598  		done := isDone()
  3599  		if done {
  3600  			return
  3601  		}
  3602  
  3603  		select {
  3604  		case <-ctx.Done():
  3605  			f.T().Fatalf("Timed out waiting for: %s", msg)
  3606  		case <-ticker.C:
  3607  		}
  3608  	}
  3609  }
  3610  
  3611  func (f *testFixture) WaitUntilManifest(msg string, name model.ManifestName, isDone func(store.ManifestTarget) bool) {
  3612  	f.t.Helper()
  3613  	f.WaitUntil(msg, func(es store.EngineState) bool {
  3614  		mt, ok := es.ManifestTargets[name]
  3615  		if !ok {
  3616  			return false
  3617  		}
  3618  		return isDone(*mt)
  3619  	})
  3620  }
  3621  
  3622  func (f *testFixture) WaitUntilManifestState(msg string, name model.ManifestName, isDone func(store.ManifestState) bool) {
  3623  	f.t.Helper()
  3624  	f.WaitUntilManifest(msg, name, func(mt store.ManifestTarget) bool {
  3625  		return isDone(*(mt.State))
  3626  	})
  3627  }
  3628  
  3629  // gets the args for the next BaD call and blocks until that build is reflected in EngineState
  3630  func (f *testFixture) nextCallComplete(msgAndArgs ...interface{}) buildAndDeployCall {
  3631  	f.t.Helper()
  3632  	call := f.nextCall(msgAndArgs...)
  3633  	f.waitForCompletedBuildCount(call.count)
  3634  	return call
  3635  }
  3636  
  3637  // gets the args passed to the next call to the BaDer
  3638  // note that if you're using this to block until a build happens, it only blocks until the BaDer itself finishes
  3639  // so it can return before the build has actually been processed by the upper or the EngineState reflects
  3640  // the completed build.
  3641  // using `nextCallComplete` will ensure you block until the EngineState reflects the completed build.
  3642  func (f *testFixture) nextCall(msgAndArgs ...interface{}) buildAndDeployCall {
  3643  	f.t.Helper()
  3644  	msg := "timed out waiting for BuildAndDeployCall"
  3645  	if len(msgAndArgs) > 0 {
  3646  		format := msgAndArgs[0].(string)
  3647  		args := msgAndArgs[1:]
  3648  		msg = fmt.Sprintf("%s: %s", msg, fmt.Sprintf(format, args...))
  3649  	}
  3650  
  3651  	for {
  3652  		select {
  3653  		case call := <-f.b.calls:
  3654  			return call
  3655  		case <-time.After(stdTimeout):
  3656  
  3657  			// If we timed out, look at the current buildcontrol state.
  3658  			state := f.upper.store.RLockState()
  3659  			target, holds := buildcontrol.NextTargetToBuild(state)
  3660  			msg = fmt.Sprintf("%s\nbuild control state: %+v, holds: %+v\n", msg, target, holds)
  3661  			f.upper.store.RUnlockState()
  3662  
  3663  			f.T().Fatal(msg)
  3664  		}
  3665  	}
  3666  }
  3667  
  3668  func (f *testFixture) assertNoCall(msgAndArgs ...interface{}) {
  3669  	f.t.Helper()
  3670  	msg := "expected there to be no BuildAndDeployCalls, but found one"
  3671  	if len(msgAndArgs) > 0 {
  3672  		msg = fmt.Sprintf("expected there to be no BuildAndDeployCalls, but found one: %s", msgAndArgs...)
  3673  	}
  3674  	for {
  3675  		select {
  3676  		case call := <-f.b.calls:
  3677  			f.T().Fatalf("%s\ncall:\n%s", msg, spew.Sdump(call))
  3678  		case <-time.After(200 * time.Millisecond):
  3679  			return
  3680  		}
  3681  	}
  3682  }
  3683  
  3684  func (f *testFixture) lastDeployedUID(manifestName model.ManifestName) types.UID {
  3685  	var manifest model.Manifest
  3686  	f.withManifestTarget(manifestName, func(mt store.ManifestTarget) {
  3687  		manifest = mt.Manifest
  3688  	})
  3689  	result := f.b.resultsByID[manifest.K8sTarget().ID()]
  3690  	k8sResult, ok := result.(store.K8sBuildResult)
  3691  	if !ok {
  3692  		return ""
  3693  	}
  3694  	if len(k8sResult.DeployedRefs) > 0 {
  3695  		return k8sResult.DeployedRefs[0].UID
  3696  	}
  3697  	return ""
  3698  }
  3699  
  3700  func (f *testFixture) startPod(pod *v1.Pod, manifestName model.ManifestName) {
  3701  	f.t.Helper()
  3702  	f.podEvent(pod)
  3703  	f.WaitUntilManifestState("pod appears", manifestName, func(ms store.ManifestState) bool {
  3704  		return ms.MostRecentPod().Name == pod.Name
  3705  	})
  3706  }
  3707  
  3708  func (f *testFixture) podLog(pod *v1.Pod, manifestName model.ManifestName, s string) {
  3709  	podID := k8s.PodID(pod.Name)
  3710  	f.upper.store.Dispatch(store.NewLogAction(manifestName, k8sconv.SpanIDForPod(manifestName, podID), logger.InfoLvl, nil, []byte(s+"\n")))
  3711  
  3712  	f.WaitUntil("pod log seen", func(es store.EngineState) bool {
  3713  		ms, _ := es.ManifestState(manifestName)
  3714  		spanID := k8sconv.SpanIDForPod(manifestName, k8s.PodID(ms.MostRecentPod().Name))
  3715  		return strings.Contains(es.LogStore.SpanLog(spanID), s)
  3716  	})
  3717  }
  3718  
  3719  func (f *testFixture) restartPod(pb podbuilder.PodBuilder) podbuilder.PodBuilder {
  3720  	restartCount := pb.RestartCount() + 1
  3721  	pb = pb.WithRestartCount(restartCount)
  3722  
  3723  	f.podEvent(pb.Build())
  3724  
  3725  	f.WaitUntilManifestState("pod restart seen", pb.ManifestName(), func(ms store.ManifestState) bool {
  3726  		return store.AllPodContainerRestarts(ms.MostRecentPod()) == int32(restartCount)
  3727  	})
  3728  	return pb
  3729  }
  3730  
  3731  func (f *testFixture) notifyAndWaitForPodStatus(pod *v1.Pod, mn model.ManifestName, pred func(pod v1alpha1.Pod) bool) {
  3732  	f.podEvent(pod)
  3733  	f.WaitUntilManifestState("pod status change seen", mn, func(state store.ManifestState) bool {
  3734  		return pred(state.MostRecentPod())
  3735  	})
  3736  }
  3737  
  3738  func (f *testFixture) waitForCompletedBuildCount(count int) {
  3739  	f.t.Helper()
  3740  	f.WaitUntil(fmt.Sprintf("%d builds done", count), func(state store.EngineState) bool {
  3741  		return state.CompletedBuildCount >= count
  3742  	})
  3743  }
  3744  
  3745  func (f *testFixture) LogLines() []string {
  3746  	return strings.Split(f.log.String(), "\n")
  3747  }
  3748  
  3749  func (f *testFixture) TearDown() {
  3750  	if f.T().Failed() {
  3751  		f.withState(func(es store.EngineState) {
  3752  			fmt.Println(es.LogStore.String())
  3753  		})
  3754  	}
  3755  	close(f.fsWatcher.Events)
  3756  	close(f.fsWatcher.Errors)
  3757  	f.cancel()
  3758  
  3759  	// If the test started an Init() in a goroutine, drain it.
  3760  	if f.upperInitResult != nil {
  3761  		<-f.upperInitResult
  3762  	}
  3763  }
  3764  
  3765  func (f *testFixture) registerForDeployer(manifest model.Manifest) podbuilder.PodBuilder {
  3766  	pb := podbuilder.New(f.t, manifest)
  3767  	f.b.targetObjectTree[manifest.K8sTarget().ID()] = pb.ObjectTreeEntities()
  3768  	return pb
  3769  }
  3770  
  3771  func (f *testFixture) podEvent(pod *v1.Pod) {
  3772  	f.t.Helper()
  3773  	for _, ownerRef := range pod.OwnerReferences {
  3774  		_, err := f.kClient.GetMetaByReference(f.ctx, v1.ObjectReference{
  3775  			UID:  ownerRef.UID,
  3776  			Name: ownerRef.Name,
  3777  		})
  3778  		if err != nil {
  3779  			f.t.Logf("Owner reference uid[%s] name[%s] for pod[%s] does not exist in fake client",
  3780  				ownerRef.UID, ownerRef.Name, pod.Name)
  3781  		}
  3782  	}
  3783  
  3784  	f.kClient.UpsertPod(pod)
  3785  }
  3786  
  3787  func (f *testFixture) newManifest(name string) model.Manifest {
  3788  	iTarget := NewSanchoLiveUpdateImageTarget(f)
  3789  	return manifestbuilder.New(f, model.ManifestName(name)).
  3790  		WithK8sYAML(SanchoYAML).
  3791  		WithImageTarget(iTarget).
  3792  		Build()
  3793  }
  3794  
  3795  func (f *testFixture) newManifestWithRef(name string, ref reference.Named) model.Manifest {
  3796  	refSel := container.NewRefSelector(ref)
  3797  
  3798  	iTarget := NewSanchoLiveUpdateImageTarget(f)
  3799  	iTarget = iTarget.MustWithRef(refSel)
  3800  
  3801  	return manifestbuilder.New(f, model.ManifestName(name)).
  3802  		WithK8sYAML(SanchoYAML).
  3803  		WithImageTarget(iTarget).
  3804  		Build()
  3805  }
  3806  
  3807  func (f *testFixture) newDockerBuildManifestWithBuildPath(name string, path string) model.Manifest {
  3808  	db := v1alpha1.DockerImageSpec{DockerfileContents: "FROM alpine", Context: path}
  3809  	iTarget := NewSanchoDockerBuildImageTarget(f).WithDockerImage(db)
  3810  	iTarget = iTarget.MustWithRef(container.MustParseSelector(strings.ToLower(name))) // each target should have a unique ID
  3811  	return manifestbuilder.New(f, model.ManifestName(name)).
  3812  		WithK8sYAML(SanchoYAML).
  3813  		WithImageTarget(iTarget).
  3814  		Build()
  3815  }
  3816  
  3817  func (f *testFixture) assertAllBuildsConsumed() {
  3818  	f.t.Helper()
  3819  	close(f.b.calls)
  3820  
  3821  	for call := range f.b.calls {
  3822  		f.T().Fatalf("Build not consumed: %s", spew.Sdump(call))
  3823  	}
  3824  }
  3825  
  3826  func (f *testFixture) loadAndStart(initOptions ...initOption) {
  3827  	f.t.Helper()
  3828  	ia := InitAction{
  3829  		TiltfilePath: f.JoinPath("Tiltfile"),
  3830  		TerminalMode: store.TerminalModeHUD,
  3831  		StartTime:    f.Now(),
  3832  	}
  3833  	for _, opt := range initOptions {
  3834  		ia = opt(ia)
  3835  	}
  3836  	f.Init(ia)
  3837  }
  3838  
  3839  func (f *testFixture) WriteConfigFiles(args ...string) {
  3840  	f.t.Helper()
  3841  	if (len(args) % 2) != 0 {
  3842  		f.T().Fatalf("WriteConfigFiles needs an even number of arguments; got %d", len(args))
  3843  	}
  3844  
  3845  	for i := 0; i < len(args); i += 2 {
  3846  		filename := f.JoinPath(args[i])
  3847  		contents := args[i+1]
  3848  		f.WriteFile(filename, contents)
  3849  
  3850  		// Fire an FS event thru the normal pipeline, so that manifests get marked dirty.
  3851  		f.fsWatcher.Events <- watch.NewFileEvent(filename)
  3852  	}
  3853  }
  3854  
  3855  func (f *testFixture) setupDCFixture() (redis, server model.Manifest) {
  3856  	dcp := filepath.Join(originalWD, "testdata", "fixture_docker-config.yml")
  3857  	dcpc, err := os.ReadFile(dcp)
  3858  	if err != nil {
  3859  		f.T().Fatal(err)
  3860  	}
  3861  	f.WriteFile("docker-compose.yml", string(dcpc))
  3862  
  3863  	dfp := filepath.Join(originalWD, "testdata", "server.dockerfile")
  3864  	dfc, err := os.ReadFile(dfp)
  3865  	if err != nil {
  3866  		f.T().Fatal(err)
  3867  	}
  3868  	f.WriteFile("Dockerfile", string(dfc))
  3869  
  3870  	f.WriteFile("Tiltfile", `docker_compose('docker-compose.yml')`)
  3871  
  3872  	f.dcc.WorkDir = f.Path()
  3873  	f.dcc.ConfigOutput = string(dcpc)
  3874  
  3875  	tlr := f.realTFL.Load(f.ctx, apitiltfile.MainTiltfile(f.JoinPath("Tiltfile"), nil), nil)
  3876  	if tlr.Error != nil {
  3877  		f.T().Fatal(tlr.Error)
  3878  	}
  3879  
  3880  	if len(tlr.Manifests) != 2 {
  3881  		f.T().Fatalf("Expected two manifests. Actual: %v", tlr.Manifests)
  3882  	}
  3883  
  3884  	require.NoError(f.t, model.InferImageProperties(tlr.Manifests))
  3885  
  3886  	return tlr.Manifests[0], tlr.Manifests[1]
  3887  }
  3888  
  3889  func (f *testFixture) setBuildLogOutput(id model.TargetID, output string) {
  3890  	f.b.buildLogOutput[id] = output
  3891  }
  3892  
  3893  func (f *testFixture) hudResource(name model.ManifestName) view.Resource {
  3894  	res, ok := f.fakeHud().LastView.Resource(name)
  3895  	if !ok {
  3896  		f.T().Fatalf("Resource not found: %s", name)
  3897  	}
  3898  	return res
  3899  }
  3900  
  3901  func (f *testFixture) completeBuildForManifest(m model.Manifest) {
  3902  	f.b.completeBuild(targetIDStringForManifest(m))
  3903  }
  3904  
  3905  func (f *testFixture) setDisableState(mn model.ManifestName, isDisabled bool) {
  3906  	err := tiltconfigmap.UpsertDisableConfigMap(f.ctx, f.ctrlClient, fmt.Sprintf("%s-disable", mn), "isDisabled", isDisabled)
  3907  	require.NoError(f.t, err)
  3908  
  3909  	f.WaitUntil("new disable state reflected in UIResource", func(state store.EngineState) bool {
  3910  		if uir, ok := state.UIResources[mn.String()]; ok {
  3911  			return uir.Status.DisableStatus.DisabledCount > 0 == isDisabled
  3912  		}
  3913  		return false
  3914  	})
  3915  }
  3916  
  3917  type fixtureSub struct {
  3918  	ch chan bool
  3919  }
  3920  
  3921  func (s fixtureSub) OnChange(ctx context.Context, st store.RStore, _ store.ChangeSummary) error {
  3922  	s.ch <- true
  3923  	return nil
  3924  }
  3925  
  3926  func (f *testFixture) ensureCluster() {
  3927  	f.ensureClusterNamed(v1alpha1.ClusterNameDefault)
  3928  }
  3929  
  3930  func (f *testFixture) ensureClusterNamed(name string) {
  3931  	f.t.Helper()
  3932  	err := f.ctrlClient.Create(f.ctx, &v1alpha1.Cluster{
  3933  		ObjectMeta: metav1.ObjectMeta{
  3934  			Name: name,
  3935  		},
  3936  		Spec: v1alpha1.ClusterSpec{
  3937  			Connection: &v1alpha1.ClusterConnection{
  3938  				Kubernetes: &v1alpha1.KubernetesClusterConnection{},
  3939  			},
  3940  		},
  3941  	})
  3942  	require.NoError(f.T(), err)
  3943  }
  3944  
  3945  func assertLineMatches(t *testing.T, lines []string, re *regexp.Regexp) {
  3946  	for _, line := range lines {
  3947  		if re.MatchString(line) {
  3948  			return
  3949  		}
  3950  	}
  3951  	t.Fatalf("Expected line to match: %s. Lines: %v", re.String(), lines)
  3952  }
  3953  
  3954  func assertContainsOnce(t *testing.T, s string, val string) {
  3955  	assert.Contains(t, s, val)
  3956  	assert.Equal(t, 1, strings.Count(s, val), "Expected string to appear only once")
  3957  }
  3958  
  3959  // stringifyTargetIDs attempts to make a unique string to identify any set of targets
  3960  // (order-agnostic) by sorting and then concatenating the target IDs.
  3961  func stringifyTargetIDs(targets []model.TargetSpec) string {
  3962  	ids := make([]string, len(targets))
  3963  	for i, t := range targets {
  3964  		ids[i] = t.ID().String()
  3965  	}
  3966  	sort.Strings(ids)
  3967  	return strings.Join(ids, "::")
  3968  }
  3969  
  3970  func targetIDStringForManifest(m model.Manifest) string {
  3971  	return stringifyTargetIDs(m.TargetSpecs())
  3972  }