github.com/tilt-dev/tilt@v0.36.0/internal/controllers/core/kubernetesapply/reconciler.go (about)

     1  package kubernetesapply
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"fmt"
     7  	"sync"
     8  
     9  	"github.com/pkg/errors"
    10  	batchv1 "k8s.io/api/batch/v1"
    11  	v1 "k8s.io/api/core/v1"
    12  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    13  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    14  	"k8s.io/apimachinery/pkg/runtime"
    15  	"k8s.io/apimachinery/pkg/runtime/schema"
    16  	"k8s.io/apimachinery/pkg/types"
    17  	ctrl "sigs.k8s.io/controller-runtime"
    18  	"sigs.k8s.io/controller-runtime/pkg/builder"
    19  	"sigs.k8s.io/controller-runtime/pkg/client"
    20  	ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
    21  	"sigs.k8s.io/controller-runtime/pkg/handler"
    22  	"sigs.k8s.io/controller-runtime/pkg/reconcile"
    23  
    24  	"github.com/tilt-dev/tilt/internal/container"
    25  	"github.com/tilt-dev/tilt/internal/controllers/apicmp"
    26  	"github.com/tilt-dev/tilt/internal/controllers/apis/configmap"
    27  	"github.com/tilt-dev/tilt/internal/controllers/apis/imagemap"
    28  	"github.com/tilt-dev/tilt/internal/controllers/apis/trigger"
    29  	"github.com/tilt-dev/tilt/internal/controllers/indexer"
    30  	"github.com/tilt-dev/tilt/internal/k8s"
    31  	"github.com/tilt-dev/tilt/internal/localexec"
    32  	"github.com/tilt-dev/tilt/internal/store"
    33  	"github.com/tilt-dev/tilt/internal/store/kubernetesapplys"
    34  	"github.com/tilt-dev/tilt/internal/timecmp"
    35  	"github.com/tilt-dev/tilt/pkg/apis"
    36  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    37  	"github.com/tilt-dev/tilt/pkg/logger"
    38  	"github.com/tilt-dev/tilt/pkg/model"
    39  )
    40  
    41  type deleteSpec struct {
    42  	entities  []k8s.K8sEntity
    43  	deleteCmd *v1alpha1.KubernetesApplyCmd
    44  	cluster   *v1alpha1.Cluster
    45  }
    46  
    47  type Reconciler struct {
    48  	st         store.RStore
    49  	k8sClient  k8s.Client
    50  	ctrlClient ctrlclient.Client
    51  	indexer    *indexer.Indexer
    52  	execer     localexec.Execer
    53  	requeuer   *indexer.Requeuer
    54  
    55  	mu sync.Mutex
    56  
    57  	// Protected by the mutex.
    58  	results map[types.NamespacedName]*Result
    59  }
    60  
    61  func (r *Reconciler) CreateBuilder(mgr ctrl.Manager) (*builder.Builder, error) {
    62  	b := ctrl.NewControllerManagedBy(mgr).
    63  		For(&v1alpha1.KubernetesApply{}).
    64  		Owns(&v1alpha1.KubernetesDiscovery{}).
    65  		WatchesRawSource(r.requeuer).
    66  		Watches(&v1alpha1.ImageMap{},
    67  			handler.EnqueueRequestsFromMapFunc(r.indexer.Enqueue)).
    68  		Watches(&v1alpha1.ConfigMap{},
    69  			handler.EnqueueRequestsFromMapFunc(r.indexer.Enqueue)).
    70  		Watches(&v1alpha1.Cluster{},
    71  			handler.EnqueueRequestsFromMapFunc(r.indexer.Enqueue))
    72  
    73  	trigger.SetupControllerRestartOn(b, r.indexer, func(obj ctrlclient.Object) *v1alpha1.RestartOnSpec {
    74  		return obj.(*v1alpha1.KubernetesApply).Spec.RestartOn
    75  	})
    76  
    77  	return b, nil
    78  }
    79  
    80  func NewReconciler(ctrlClient ctrlclient.Client, k8sClient k8s.Client, scheme *runtime.Scheme, st store.RStore, execer localexec.Execer) *Reconciler {
    81  	return &Reconciler{
    82  		ctrlClient: ctrlClient,
    83  		k8sClient:  k8sClient,
    84  		indexer:    indexer.NewIndexer(scheme, indexKubernetesApply),
    85  		execer:     execer,
    86  		st:         st,
    87  		results:    make(map[types.NamespacedName]*Result),
    88  		requeuer:   indexer.NewRequeuer(),
    89  	}
    90  }
    91  
    92  // Reconcile manages namespace watches for the modified KubernetesApply object.
    93  func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
    94  	nn := request.NamespacedName
    95  
    96  	var ka v1alpha1.KubernetesApply
    97  	err := r.ctrlClient.Get(ctx, nn, &ka)
    98  	r.indexer.OnReconcile(nn, &ka)
    99  	if err != nil && !apierrors.IsNotFound(err) {
   100  		return ctrl.Result{}, err
   101  	}
   102  
   103  	if apierrors.IsNotFound(err) || !ka.ObjectMeta.DeletionTimestamp.IsZero() {
   104  		result, err := r.manageOwnedKubernetesDiscovery(ctx, nn, nil)
   105  		if err != nil {
   106  			return ctrl.Result{}, err
   107  		}
   108  
   109  		r.recordDelete(nn)
   110  		toDelete := r.garbageCollect(nn, true)
   111  		r.bestEffortDelete(ctx, nn, toDelete, "garbage collecting Kubernetes objects")
   112  		r.clearRecord(nn)
   113  
   114  		r.st.Dispatch(kubernetesapplys.NewKubernetesApplyDeleteAction(request.NamespacedName.Name))
   115  		return result, nil
   116  	}
   117  
   118  	// The apiserver is the source of truth, and will ensure the engine state is up to date.
   119  	r.st.Dispatch(kubernetesapplys.NewKubernetesApplyUpsertAction(&ka))
   120  
   121  	// Get configmap's disable status
   122  	ctx = store.MustObjectLogHandler(ctx, r.st, &ka)
   123  	disableStatus, err := configmap.MaybeNewDisableStatus(ctx, r.ctrlClient, ka.Spec.DisableSource, ka.Status.DisableStatus)
   124  	if err != nil {
   125  		return ctrl.Result{}, err
   126  	}
   127  
   128  	r.recordDisableStatus(nn, ka.Spec, *disableStatus)
   129  
   130  	// Delete kubernetesapply if it's disabled
   131  	isDisabling := false
   132  	gcReason := "garbage collecting Kubernetes objects"
   133  	if disableStatus.State == v1alpha1.DisableStateDisabled {
   134  		gcReason = "deleting disabled Kubernetes objects"
   135  		isDisabling = true
   136  	} else {
   137  		// Fetch all the objects needed to apply this YAML.
   138  		var cluster v1alpha1.Cluster
   139  		if ka.Spec.Cluster != "" {
   140  			err := r.ctrlClient.Get(ctx, types.NamespacedName{Name: ka.Spec.Cluster}, &cluster)
   141  			if client.IgnoreNotFound(err) != nil {
   142  				return ctrl.Result{}, err
   143  			}
   144  		}
   145  
   146  		imageMaps, err := imagemap.NamesToObjects(ctx, r.ctrlClient, ka.Spec.ImageMaps)
   147  		if err != nil {
   148  			return ctrl.Result{}, err
   149  		}
   150  
   151  		lastRestartEvent, _, _, err := trigger.LastRestartEvent(ctx, r.ctrlClient, ka.Spec.RestartOn)
   152  		if err != nil {
   153  			return ctrl.Result{}, err
   154  		}
   155  
   156  		// Apply to the cluster if necessary.
   157  		//
   158  		// TODO(nick): Like with other reconcilers, there should always
   159  		// be a reason why we're not deploying, and we should update the
   160  		// Status field of KubernetesApply with that reason.
   161  		if r.shouldDeployOnReconcile(request.NamespacedName, &ka, &cluster, imageMaps, lastRestartEvent) {
   162  			_ = r.forceApplyHelper(ctx, nn, ka.Spec, &cluster, imageMaps)
   163  			gcReason = "garbage collecting removed Kubernetes objects"
   164  		}
   165  	}
   166  
   167  	toDelete := r.garbageCollect(nn, isDisabling)
   168  	r.bestEffortDelete(ctx, nn, toDelete, gcReason)
   169  
   170  	newKA, err := r.maybeUpdateStatus(ctx, nn, &ka)
   171  	if err != nil {
   172  		return ctrl.Result{}, err
   173  	}
   174  
   175  	return r.manageOwnedKubernetesDiscovery(ctx, nn, newKA)
   176  }
   177  
   178  // Determine if we should deploy the current YAML.
   179  //
   180  // Ensures:
   181  //  1. We have enough info to deploy, and
   182  //  2. Either we haven't deployed before,
   183  //     or one of the inputs has changed since the last deploy.
   184  func (r *Reconciler) shouldDeployOnReconcile(
   185  	nn types.NamespacedName,
   186  	ka *v1alpha1.KubernetesApply,
   187  	cluster *v1alpha1.Cluster,
   188  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
   189  	lastRestartEvent metav1.MicroTime,
   190  ) bool {
   191  	if ka.Annotations[v1alpha1.AnnotationManagedBy] != "" {
   192  		// Until resource dependencies are expressed in the API,
   193  		// we can't use reconciliation to deploy KubernetesApply objects
   194  		// managed by the buildcontrol engine.
   195  		return false
   196  	}
   197  
   198  	if ka.Spec.Cluster != "" {
   199  		isClusterOK := cluster != nil && cluster.Name != "" &&
   200  			cluster.Status.Error == "" && cluster.Status.Connection != nil
   201  		if !isClusterOK {
   202  			// Wait for the cluster to start.
   203  			return false
   204  		}
   205  	}
   206  
   207  	for _, imageMapName := range ka.Spec.ImageMaps {
   208  		_, ok := imageMaps[types.NamespacedName{Name: imageMapName}]
   209  		if !ok {
   210  			// We haven't built the images yet to deploy.
   211  			return false
   212  		}
   213  	}
   214  
   215  	r.mu.Lock()
   216  	result, ok := r.results[nn]
   217  	r.mu.Unlock()
   218  
   219  	if !ok || result.Status.LastApplyTime.IsZero() {
   220  		// We've never successfully deployed before, so deploy now.
   221  		return true
   222  	}
   223  
   224  	if !apicmp.DeepEqual(ka.Spec, result.Spec) {
   225  		// The YAML to deploy changed.
   226  		return true
   227  	}
   228  
   229  	imageMapNames := ka.Spec.ImageMaps
   230  	if len(imageMapNames) != len(result.ImageMapSpecs) ||
   231  		len(imageMapNames) != len(result.ImageMapStatuses) {
   232  		return true
   233  	}
   234  
   235  	for i, name := range ka.Spec.ImageMaps {
   236  		im := imageMaps[types.NamespacedName{Name: name}]
   237  		if !apicmp.DeepEqual(im.Spec, result.ImageMapSpecs[i]) {
   238  			return true
   239  		}
   240  		if !apicmp.DeepEqual(im.Status, result.ImageMapStatuses[i]) {
   241  			return true
   242  		}
   243  	}
   244  
   245  	if timecmp.After(lastRestartEvent, result.Status.LastApplyTime) {
   246  		return true
   247  	}
   248  
   249  	return false
   250  }
   251  
   252  // Inject the images into the YAML and apply it to the cluster, unconditionally.
   253  //
   254  // Does not update the API server, but does trigger a re-reconcile
   255  // so that the reconciliation loop will handle it.
   256  //
   257  // We expose this as a public method as a hack! Currently, in Tilt, BuildController
   258  // handles dependencies between resources. The API server doesn't know about build
   259  // dependencies yet. So Tiltfile-owned resources are applied manually, rather than
   260  // going through the normal reconcile system.
   261  func (r *Reconciler) ForceApply(
   262  	ctx context.Context,
   263  	nn types.NamespacedName,
   264  	spec v1alpha1.KubernetesApplySpec,
   265  	cluster *v1alpha1.Cluster,
   266  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap) v1alpha1.KubernetesApplyStatus {
   267  	status := r.forceApplyHelper(ctx, nn, spec, cluster, imageMaps)
   268  	r.requeuer.Add(nn)
   269  	return status
   270  }
   271  
   272  // A helper that applies the given specs to the cluster,
   273  // tracking the state of the deploy in the results map.
   274  func (r *Reconciler) forceApplyHelper(
   275  	ctx context.Context,
   276  	nn types.NamespacedName,
   277  	spec v1alpha1.KubernetesApplySpec,
   278  	cluster *v1alpha1.Cluster,
   279  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
   280  ) v1alpha1.KubernetesApplyStatus {
   281  
   282  	startTime := apis.NowMicro()
   283  	status := applyResult{
   284  		LastApplyStartTime: startTime,
   285  	}
   286  
   287  	recordErrorStatus := func(err error) v1alpha1.KubernetesApplyStatus {
   288  		status.LastApplyTime = apis.NowMicro()
   289  		status.Error = err.Error()
   290  		return r.recordApplyResult(nn, spec, cluster, imageMaps, status)
   291  	}
   292  
   293  	inputHash, err := ComputeInputHash(spec, imageMaps)
   294  	if err != nil {
   295  		return recordErrorStatus(err)
   296  	}
   297  
   298  	var deployed []k8s.K8sEntity
   299  	deployCtx := r.indentLogger(ctx)
   300  	if spec.YAML != "" {
   301  		deployed, err = r.runYAMLDeploy(deployCtx, spec, imageMaps)
   302  		if err != nil {
   303  			return recordErrorStatus(err)
   304  		}
   305  	} else {
   306  		deployed, err = r.runCmdDeploy(deployCtx, spec, cluster, imageMaps)
   307  		if err != nil {
   308  			return recordErrorStatus(err)
   309  		}
   310  	}
   311  
   312  	status.LastApplyTime = apis.NowMicro()
   313  	status.AppliedInputHash = inputHash
   314  	for _, d := range deployed {
   315  		d.Clean()
   316  	}
   317  
   318  	resultYAML, err := k8s.SerializeSpecYAML(deployed)
   319  	if err != nil {
   320  		return recordErrorStatus(err)
   321  	}
   322  
   323  	status.ResultYAML = resultYAML
   324  	status.Objects = deployed
   325  	return r.recordApplyResult(nn, spec, cluster, imageMaps, status)
   326  }
   327  
   328  func (r *Reconciler) printAppliedReport(ctx context.Context, msg string, deployed []k8s.K8sEntity) {
   329  	l := logger.Get(ctx)
   330  	l.Infof("%s", msg)
   331  
   332  	// Use a min component count of 2 for computing names,
   333  	// so that the resource type appears
   334  	displayNames := k8s.UniqueNames(deployed, 2)
   335  	for _, displayName := range displayNames {
   336  		l.Infof("  → %s", displayName)
   337  	}
   338  }
   339  
   340  func (r *Reconciler) runYAMLDeploy(ctx context.Context, spec v1alpha1.KubernetesApplySpec, imageMaps map[types.NamespacedName]*v1alpha1.ImageMap) ([]k8s.K8sEntity, error) {
   341  	// Create API objects.
   342  	newK8sEntities, err := r.createEntitiesToDeploy(ctx, imageMaps, spec)
   343  	if err != nil {
   344  		return newK8sEntities, err
   345  	}
   346  
   347  	logger.Get(ctx).Infof("Applying YAML to cluster")
   348  
   349  	timeout := spec.Timeout.Duration
   350  	if timeout == 0 {
   351  		timeout = v1alpha1.KubernetesApplyTimeoutDefault
   352  	}
   353  
   354  	deployed, err := r.k8sClient.Upsert(ctx, newK8sEntities, timeout)
   355  	if err != nil {
   356  		r.printAppliedReport(ctx, "Tried to apply objects to cluster:", newK8sEntities)
   357  		return nil, err
   358  	}
   359  	r.printAppliedReport(ctx, "Objects applied to cluster:", deployed)
   360  
   361  	return deployed, nil
   362  }
   363  
   364  func (r *Reconciler) maybeInjectKubeconfig(cmd *model.Cmd, cluster *v1alpha1.Cluster) error {
   365  	if cluster == nil ||
   366  		cluster.Status.Connection == nil ||
   367  		cluster.Status.Connection.Kubernetes == nil {
   368  		return errors.New("no kubernetes connection")
   369  	}
   370  	kubeconfig := cluster.Status.Connection.Kubernetes.ConfigPath
   371  	if kubeconfig == "" {
   372  		return fmt.Errorf("missing kubeconfig in cluster %s", cluster.Name)
   373  	}
   374  	cmd.Env = append(cmd.Env, fmt.Sprintf("KUBECONFIG=%s", kubeconfig))
   375  	return nil
   376  }
   377  
   378  func (r *Reconciler) runCmdDeploy(ctx context.Context, spec v1alpha1.KubernetesApplySpec,
   379  	cluster *v1alpha1.Cluster,
   380  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap) ([]k8s.K8sEntity, error) {
   381  	timeout := spec.Timeout.Duration
   382  	if timeout == 0 {
   383  		timeout = v1alpha1.KubernetesApplyTimeoutDefault
   384  	}
   385  	ctx, cancel := context.WithTimeout(ctx, timeout)
   386  	defer cancel()
   387  
   388  	var stdoutBuf bytes.Buffer
   389  	runIO := localexec.RunIO{
   390  		Stdout: &stdoutBuf,
   391  		Stderr: logger.Get(ctx).Writer(logger.InfoLvl),
   392  	}
   393  
   394  	cmd := toModelCmd(*spec.ApplyCmd)
   395  	err := imagemap.InjectIntoDeployEnv(&cmd, spec.ImageMaps, imageMaps)
   396  	if err != nil {
   397  		return nil, err
   398  	}
   399  	err = r.maybeInjectKubeconfig(&cmd, cluster)
   400  	if err != nil {
   401  		return nil, err
   402  	}
   403  
   404  	logger.Get(ctx).Infof("Running cmd: %s", cmd.String())
   405  	exitCode, err := r.execer.Run(ctx, cmd, runIO)
   406  	if err != nil {
   407  		return nil, fmt.Errorf("apply command failed: %v", err)
   408  	}
   409  
   410  	if exitCode != 0 {
   411  		var stdoutLog string
   412  		if stdoutBuf.Len() != 0 {
   413  			stdoutLog = fmt.Sprintf("\nstdout:\n%s\n", overflowEllipsis(stdoutBuf.String()))
   414  		}
   415  		if ctx.Err() != nil {
   416  			// process returned a non-zero exit code (generally 137) because it was killed by us
   417  			return nil, fmt.Errorf("apply command timed out after %s - see https://docs.tilt.dev/api.html#api.update_settings for how to increase%s", timeout.String(), stdoutLog)
   418  		}
   419  		return nil, fmt.Errorf("apply command exited with status %d%s", exitCode, stdoutLog)
   420  	}
   421  
   422  	// don't pass the bytes.Buffer directly to the YAML parser or it'll consume it and we can't print it out on failure
   423  	stdout := stdoutBuf.Bytes()
   424  	entities, err := k8s.ParseYAML(bytes.NewReader(stdout))
   425  	if err != nil {
   426  		return nil, fmt.Errorf("apply command returned malformed YAML: %v\nstdout:\n%s\n", err, overflowEllipsis(string(stdout)))
   427  	}
   428  
   429  	r.printAppliedReport(ctx, "Objects applied to cluster:", entities)
   430  
   431  	return entities, nil
   432  }
   433  
   434  const maxOverflow = 500
   435  
   436  // The stdout of a well-behaved apply function can be 100K+ (especially for CRDs)
   437  func overflowEllipsis(str string) string {
   438  	if len(str) > maxOverflow {
   439  		return fmt.Sprintf("%s\n... [truncated by Tilt] ...\n%s", str[0:maxOverflow/2], str[len(str)-maxOverflow/2:])
   440  	}
   441  	return str
   442  }
   443  
   444  func (r *Reconciler) indentLogger(ctx context.Context) context.Context {
   445  	l := logger.Get(ctx)
   446  	newL := logger.NewPrefixedLogger(logger.Blue(l).Sprint("     "), l)
   447  	return logger.WithLogger(ctx, newL)
   448  }
   449  
   450  type injectResult struct {
   451  	meta     k8s.EntityMeta
   452  	imageMap *v1alpha1.ImageMap
   453  }
   454  
   455  func (r *Reconciler) createEntitiesToDeploy(ctx context.Context,
   456  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
   457  	spec v1alpha1.KubernetesApplySpec) ([]k8s.K8sEntity, error) {
   458  	newK8sEntities := []k8s.K8sEntity{}
   459  
   460  	entities, err := k8s.ParseYAMLFromString(spec.YAML)
   461  	if err != nil {
   462  		return nil, err
   463  	}
   464  
   465  	locators, err := k8s.ParseImageLocators(spec.ImageLocators)
   466  	if err != nil {
   467  		return nil, err
   468  	}
   469  
   470  	var injectResults []injectResult
   471  	imageMapNames := spec.ImageMaps
   472  	injectedImageMaps := map[string]bool{}
   473  	for _, e := range entities {
   474  		e, err = k8s.InjectLabels(e, []model.LabelPair{
   475  			k8s.TiltManagedByLabel(),
   476  		})
   477  		if err != nil {
   478  			return nil, errors.Wrap(err, "deploy")
   479  		}
   480  
   481  		// If we're redeploying these workloads in response to image
   482  		// changes, we make sure image pull policy isn't set to "Always".
   483  		// Frequent applies don't work well with this setting, and makes things
   484  		// slower. See discussion:
   485  		// https://github.com/tilt-dev/tilt/issues/3209
   486  		if len(imageMaps) > 0 {
   487  			e, err = k8s.InjectImagePullPolicy(e, v1.PullIfNotPresent)
   488  			if err != nil {
   489  				return nil, err
   490  			}
   491  		}
   492  
   493  		if len(imageMaps) > 0 {
   494  			// StatefulSet pods should be managed in parallel when we're doing iterative
   495  			// development. See discussion:
   496  			// https://github.com/tilt-dev/tilt/issues/1962
   497  			// https://github.com/tilt-dev/tilt/issues/3906
   498  			e = k8s.InjectParallelPodManagementPolicy(e)
   499  		}
   500  
   501  		// Set the pull policy to IfNotPresent, to ensure that
   502  		// we get a locally built image instead of the remote one.
   503  		policy := v1.PullIfNotPresent
   504  		for _, imageMapName := range imageMapNames {
   505  			imageMap := imageMaps[types.NamespacedName{Name: imageMapName}]
   506  			imageMapSpec := imageMap.Spec
   507  			selector, err := container.SelectorFromImageMap(imageMapSpec)
   508  			if err != nil {
   509  				return nil, err
   510  			}
   511  			matchInEnvVars := imageMapSpec.MatchInEnvVars
   512  
   513  			if imageMap.Status.Image == "" {
   514  				return nil, fmt.Errorf("internal error: missing image status")
   515  			}
   516  
   517  			ref, err := container.ParseNamed(imageMap.Status.ImageFromCluster)
   518  			if err != nil {
   519  				return nil, fmt.Errorf("parsing image map status: %v", err)
   520  			}
   521  
   522  			var replaced bool
   523  			e, replaced, err = k8s.InjectImageDigest(e, selector, ref, locators, matchInEnvVars, policy)
   524  			if err != nil {
   525  				return nil, err
   526  			}
   527  			if replaced {
   528  				injectedImageMaps[imageMapName] = true
   529  				injectResults = append(injectResults, injectResult{
   530  					meta:     e,
   531  					imageMap: imageMap,
   532  				})
   533  
   534  				if imageMapSpec.OverrideCommand != nil || imageMapSpec.OverrideArgs != nil {
   535  					e, err = k8s.InjectCommandAndArgs(e, ref, imageMapSpec.OverrideCommand, imageMapSpec.OverrideArgs)
   536  					if err != nil {
   537  						return nil, err
   538  					}
   539  				}
   540  			}
   541  		}
   542  
   543  		// This needs to be after all the other injections, to ensure the hash includes the Tilt-generated
   544  		// image tag, etc
   545  		e, err := k8s.InjectPodTemplateSpecHashes(e)
   546  		if err != nil {
   547  			return nil, errors.Wrap(err, "injecting pod template hash")
   548  		}
   549  
   550  		newK8sEntities = append(newK8sEntities, e)
   551  	}
   552  
   553  	for _, name := range imageMapNames {
   554  		if !injectedImageMaps[name] {
   555  			return nil, fmt.Errorf("Docker image missing from yaml: %s", name)
   556  		}
   557  	}
   558  
   559  	l := logger.Get(ctx)
   560  	if l.Level().ShouldDisplay(logger.DebugLvl) {
   561  		if len(injectResults) != 0 {
   562  			l.Debugf("Injecting images into Kubernetes YAML:")
   563  			meta := make([]k8s.EntityMeta, len(injectResults))
   564  			for i := range injectResults {
   565  				meta[i] = injectResults[i].meta
   566  			}
   567  			names := k8s.UniqueNamesMeta(meta, 2)
   568  			for i := range injectResults {
   569  				l.Debugf(
   570  					"  → %s: %s ⇒ %s",
   571  					names[i],
   572  					injectResults[i].imageMap.Spec.Selector,
   573  					injectResults[i].imageMap.Status.Image,
   574  				)
   575  			}
   576  		} else {
   577  			l.Debugf("No images injected into Kubernetes YAML")
   578  		}
   579  	}
   580  
   581  	return newK8sEntities, nil
   582  }
   583  
   584  type applyResult struct {
   585  	ResultYAML         string
   586  	Error              string
   587  	LastApplyTime      metav1.MicroTime
   588  	LastApplyStartTime metav1.MicroTime
   589  	AppliedInputHash   string
   590  	Objects            []k8s.K8sEntity
   591  }
   592  
   593  // conditionsFromApply extracts any conditions based on the result.
   594  //
   595  // Currently, this is only used as part of special handling for Jobs, which
   596  // might have already completed successfully in the past.
   597  func conditionsFromApply(result applyResult) []metav1.Condition {
   598  	if result.Error != "" || len(result.Objects) == 0 {
   599  		return nil
   600  	}
   601  
   602  	for _, e := range result.Objects {
   603  		job, ok := e.Obj.(*batchv1.Job)
   604  		if !ok {
   605  			continue
   606  		}
   607  		for _, cond := range job.Status.Conditions {
   608  			if cond.Type == batchv1.JobComplete && cond.Status == v1.ConditionTrue {
   609  				return []metav1.Condition{
   610  					{
   611  						Type:   v1alpha1.ApplyConditionJobComplete,
   612  						Status: metav1.ConditionTrue,
   613  					},
   614  				}
   615  			}
   616  		}
   617  	}
   618  	return nil
   619  }
   620  
   621  // Create a result object if necessary. Caller must hold the mutex.
   622  func (r *Reconciler) ensureResultExists(nn types.NamespacedName) *Result {
   623  	existing, hasExisting := r.results[nn]
   624  	if hasExisting {
   625  		return existing
   626  	}
   627  
   628  	result := &Result{
   629  		DanglingObjects: objectRefSet{},
   630  	}
   631  	r.results[nn] = result
   632  	return result
   633  }
   634  
   635  // Record the results of a deploy to the local Result map.
   636  func (r *Reconciler) recordApplyResult(
   637  	nn types.NamespacedName,
   638  	spec v1alpha1.KubernetesApplySpec,
   639  	cluster *v1alpha1.Cluster,
   640  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
   641  	applyResult applyResult) v1alpha1.KubernetesApplyStatus {
   642  
   643  	r.mu.Lock()
   644  	defer r.mu.Unlock()
   645  
   646  	result := r.ensureResultExists(nn)
   647  
   648  	// Copy over status information from `forceApplyHelper`
   649  	// so other existing status information isn't overwritten
   650  	updatedStatus := result.Status.DeepCopy()
   651  	updatedStatus.ResultYAML = applyResult.ResultYAML
   652  	updatedStatus.Error = applyResult.Error
   653  	updatedStatus.LastApplyStartTime = applyResult.LastApplyStartTime
   654  	updatedStatus.LastApplyTime = applyResult.LastApplyTime
   655  	updatedStatus.AppliedInputHash = applyResult.AppliedInputHash
   656  	updatedStatus.Conditions = conditionsFromApply(applyResult)
   657  
   658  	result.Cluster = cluster
   659  	result.Spec = spec
   660  	result.Status = *updatedStatus
   661  	if spec.ApplyCmd != nil {
   662  		result.CmdApplied = true
   663  	}
   664  	result.SetAppliedObjects(newObjectRefSet(applyResult.Objects))
   665  
   666  	result.ImageMapSpecs = nil
   667  	result.ImageMapStatuses = nil
   668  	for _, imageMapName := range spec.ImageMaps {
   669  		im, ok := imageMaps[types.NamespacedName{Name: imageMapName}]
   670  		if !ok {
   671  			// this should never happen, but if it does, just continue quietly.
   672  			continue
   673  		}
   674  
   675  		result.ImageMapSpecs = append(result.ImageMapSpecs, im.Spec)
   676  		result.ImageMapStatuses = append(result.ImageMapStatuses, im.Status)
   677  	}
   678  
   679  	return result.Status
   680  }
   681  
   682  // Record that the apply has been disabled.
   683  func (r *Reconciler) recordDisableStatus(
   684  	nn types.NamespacedName,
   685  	spec v1alpha1.KubernetesApplySpec,
   686  	disableStatus v1alpha1.DisableStatus) {
   687  
   688  	r.mu.Lock()
   689  	defer r.mu.Unlock()
   690  
   691  	result := r.ensureResultExists(nn)
   692  	if apicmp.DeepEqual(result.Status.DisableStatus, &disableStatus) {
   693  		return
   694  	}
   695  
   696  	isDisabled := disableStatus.State == v1alpha1.DisableStateDisabled
   697  
   698  	update := result.Status.DeepCopy()
   699  	update.DisableStatus = &disableStatus
   700  	result.Status = *update
   701  
   702  	if isDisabled {
   703  		result.SetAppliedObjects(nil)
   704  	}
   705  }
   706  
   707  // Queue its applied objects for deletion.
   708  func (r *Reconciler) recordDelete(nn types.NamespacedName) {
   709  	r.mu.Lock()
   710  	defer r.mu.Unlock()
   711  
   712  	result := r.ensureResultExists(nn)
   713  	result.Status = v1alpha1.KubernetesApplyStatus{}
   714  	result.SetAppliedObjects(nil)
   715  }
   716  
   717  // Record that the delete command was run.
   718  func (r *Reconciler) recordDeleteCmdRun(nn types.NamespacedName) {
   719  	r.mu.Lock()
   720  	defer r.mu.Unlock()
   721  
   722  	result, isExisting := r.results[nn]
   723  	if isExisting {
   724  		result.CmdApplied = false
   725  	}
   726  }
   727  
   728  // Delete all state for a KubernetesApply, things have been cleaned up.
   729  func (r *Reconciler) clearRecord(nn types.NamespacedName) {
   730  	r.mu.Lock()
   731  	defer r.mu.Unlock()
   732  
   733  	delete(r.results, nn)
   734  }
   735  
   736  // Perform garbage collection for a particular KubernetesApply object.
   737  //
   738  // isDeleting: indicates whether this is a full delete or just
   739  // a cleanup of dangling objects.
   740  //
   741  // For custom deploy commands, we run the delete cmd.
   742  //
   743  // For YAML deploys, this is more complex:
   744  //
   745  // There are typically 4 ways objects get marked "dangling".
   746  // 1) Their owner A has been deleted.
   747  // 2) Their owner A has been disabled.
   748  // 3) They've been moved from owner A to owner B.
   749  // 4) Owner A has been re-applied with different arguments.
   750  //
   751  // Because the reconciler handles one owner at a time,
   752  // cases (1) and (3) are basically indistinguishable, and can
   753  // lead to race conditions if we're not careful (e.g., owner A's GC
   754  // deletes objects deployed by B).
   755  //
   756  // TODO(milas): in the case that the KA object was deleted, should we respect `tilt.dev/down-policy`?
   757  func (r *Reconciler) garbageCollect(nn types.NamespacedName, isDeleting bool) deleteSpec {
   758  	r.mu.Lock()
   759  	defer r.mu.Unlock()
   760  
   761  	result, isExisting := r.results[nn]
   762  	if !isExisting {
   763  		return deleteSpec{}
   764  	}
   765  
   766  	if !isDeleting && result.Status.Error != "" {
   767  		// do not attempt to delete any objects if the apply failed
   768  		// N.B. if the result is nil, that means the object was deleted, so objects WILL be deleted
   769  		return deleteSpec{}
   770  	}
   771  
   772  	if result.Spec.DeleteCmd != nil {
   773  		if !isDeleting || !result.CmdApplied {
   774  			// If there's a custom apply + delete command, GC only happens if
   775  			// the KubernetesApply object is being deleted (or disabled) and
   776  			// the apply command was actually executed (by Tilt).
   777  			return deleteSpec{}
   778  		}
   779  
   780  		// the object was deleted (so result is nil) and we have a custom delete cmd, so use that
   781  		// and skip diffing managed entities entirely
   782  		//
   783  		// We assume that the delete cmd deletes all dangling objects.
   784  		for k := range result.DanglingObjects {
   785  			delete(result.DanglingObjects, k)
   786  		}
   787  		result.clearApplyStatus()
   788  		return deleteSpec{
   789  			deleteCmd: result.Spec.DeleteCmd,
   790  			cluster:   result.Cluster,
   791  		}
   792  	}
   793  
   794  	// Reconcile the dangling objects against applied objects, ensuring that we're
   795  	// not deleting an object that was moved to another resource.
   796  	for _, result := range r.results {
   797  		for objRef := range result.AppliedObjects {
   798  			delete(result.DanglingObjects, objRef)
   799  		}
   800  	}
   801  
   802  	toDelete := make([]k8s.K8sEntity, 0, len(result.DanglingObjects))
   803  	for k, v := range result.DanglingObjects {
   804  		delete(result.DanglingObjects, k)
   805  		toDelete = append(toDelete, v)
   806  	}
   807  	if isDeleting {
   808  		result.clearApplyStatus()
   809  	}
   810  	return deleteSpec{
   811  		entities: toDelete,
   812  		cluster:  result.Cluster,
   813  	}
   814  }
   815  
   816  // A helper that deletes all Kubernetes objects, even if they haven't been applied yet.
   817  //
   818  // Namespaces are not deleted by default. Similar to `tilt down`, deleting namespaces
   819  // is likely to be more destructive than most users want from this operation.
   820  func (r *Reconciler) ForceDelete(ctx context.Context, nn types.NamespacedName,
   821  	spec v1alpha1.KubernetesApplySpec,
   822  	cluster *v1alpha1.Cluster,
   823  	reason string) error {
   824  
   825  	toDelete := deleteSpec{cluster: cluster}
   826  	if spec.YAML != "" {
   827  		entities, err := k8s.ParseYAMLFromString(spec.YAML)
   828  		if err != nil {
   829  			return fmt.Errorf("force delete: %v", err)
   830  		}
   831  
   832  		entities, _, err = k8s.Filter(entities, func(e k8s.K8sEntity) (b bool, err error) {
   833  			return e.GVK() != schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}, nil
   834  		})
   835  		if err != nil {
   836  			return err
   837  		}
   838  
   839  		toDelete.entities = k8s.ReverseSortedEntities(entities)
   840  	} else if spec.DeleteCmd != nil {
   841  		toDelete.deleteCmd = spec.DeleteCmd
   842  	}
   843  
   844  	r.recordDelete(nn)
   845  	r.bestEffortDelete(ctx, nn, toDelete, reason)
   846  	r.requeuer.Add(nn)
   847  	return nil
   848  }
   849  
   850  // Update the status if necessary.
   851  func (r *Reconciler) maybeUpdateStatus(ctx context.Context, nn types.NamespacedName, obj *v1alpha1.KubernetesApply) (*v1alpha1.KubernetesApply, error) {
   852  	newStatus := v1alpha1.KubernetesApplyStatus{}
   853  	existing, ok := r.results[nn]
   854  	if ok {
   855  		newStatus = existing.Status
   856  	}
   857  
   858  	if apicmp.DeepEqual(obj.Status, newStatus) {
   859  		return obj, nil
   860  	}
   861  
   862  	oldError := obj.Status.Error
   863  	newError := newStatus.Error
   864  	update := obj.DeepCopy()
   865  	update.Status = *(newStatus.DeepCopy())
   866  
   867  	err := r.ctrlClient.Status().Update(ctx, update)
   868  	if err != nil {
   869  		return nil, err
   870  	}
   871  
   872  	// Print new errors on objects that aren't managed by the buildcontroller.
   873  	if newError != "" && oldError != newError && update.Annotations[v1alpha1.AnnotationManagedBy] == "" {
   874  		logger.Get(ctx).Errorf("kubernetesapply %s: %s", obj.Name, newError)
   875  	}
   876  	return update, nil
   877  }
   878  
   879  func (r *Reconciler) bestEffortDelete(ctx context.Context, nn types.NamespacedName, toDelete deleteSpec, reason string) {
   880  	if len(toDelete.entities) == 0 && toDelete.deleteCmd == nil {
   881  		return
   882  	}
   883  
   884  	l := logger.Get(ctx)
   885  	l.Infof("Beginning %s", reason)
   886  
   887  	if len(toDelete.entities) != 0 {
   888  		err := r.k8sClient.Delete(ctx, toDelete.entities, 0)
   889  		if err != nil {
   890  			l.Errorf("Error %s: %v", reason, err)
   891  		}
   892  	}
   893  
   894  	if toDelete.deleteCmd != nil {
   895  		deleteCmd := toModelCmd(*toDelete.deleteCmd)
   896  		err := r.maybeInjectKubeconfig(&deleteCmd, toDelete.cluster)
   897  		if err != nil {
   898  			l.Errorf("Error %s: %v", reason, err)
   899  		}
   900  		if err := localexec.OneShotToLogger(ctx, r.execer, deleteCmd); err != nil {
   901  			l.Errorf("Error %s: %v", reason, err)
   902  		}
   903  		r.recordDeleteCmdRun(nn)
   904  	}
   905  }
   906  
   907  var imGVK = v1alpha1.SchemeGroupVersion.WithKind("ImageMap")
   908  var clusterGVK = v1alpha1.SchemeGroupVersion.WithKind("Cluster")
   909  
   910  // indexKubernetesApply returns keys for all the objects we need to watch based on the spec.
   911  func indexKubernetesApply(obj client.Object) []indexer.Key {
   912  	ka := obj.(*v1alpha1.KubernetesApply)
   913  	result := []indexer.Key{}
   914  	for _, name := range ka.Spec.ImageMaps {
   915  		result = append(result, indexer.Key{
   916  			Name: types.NamespacedName{Name: name},
   917  			GVK:  imGVK,
   918  		})
   919  	}
   920  	if ka.Spec.Cluster != "" {
   921  		result = append(result, indexer.Key{
   922  			Name: types.NamespacedName{Name: ka.Spec.Cluster},
   923  			GVK:  clusterGVK,
   924  		})
   925  	}
   926  
   927  	if ka.Spec.DisableSource != nil {
   928  		cm := ka.Spec.DisableSource.ConfigMap
   929  		if cm != nil {
   930  			cmGVK := v1alpha1.SchemeGroupVersion.WithKind("ConfigMap")
   931  			result = append(result, indexer.Key{
   932  				Name: types.NamespacedName{Name: cm.Name},
   933  				GVK:  cmGVK,
   934  			})
   935  		}
   936  	}
   937  	return result
   938  }
   939  
   940  // Keeps track of the state we currently know about.
   941  type Result struct {
   942  	Spec             v1alpha1.KubernetesApplySpec
   943  	Cluster          *v1alpha1.Cluster
   944  	ImageMapSpecs    []v1alpha1.ImageMapSpec
   945  	ImageMapStatuses []v1alpha1.ImageMapStatus
   946  
   947  	CmdApplied      bool
   948  	AppliedObjects  objectRefSet
   949  	DanglingObjects objectRefSet
   950  	Status          v1alpha1.KubernetesApplyStatus
   951  }
   952  
   953  // Set the status of applied objects to empty,
   954  // as if this had never been applied.
   955  func (r *Result) clearApplyStatus() {
   956  	if r.Status.LastApplyTime.IsZero() && r.Status.Error == "" {
   957  		return
   958  	}
   959  
   960  	update := r.Status.DeepCopy()
   961  	update.LastApplyTime = metav1.MicroTime{}
   962  	update.LastApplyStartTime = metav1.MicroTime{}
   963  	update.Error = ""
   964  	update.ResultYAML = ""
   965  	r.Status = *update
   966  }
   967  
   968  // Set a new collection of applied objects.
   969  //
   970  // Move all the currently applied objects to the dangling
   971  // collection for garbage collection.
   972  func (r *Result) SetAppliedObjects(set objectRefSet) {
   973  	for k, v := range r.AppliedObjects {
   974  		r.DanglingObjects[k] = v
   975  	}
   976  	r.AppliedObjects = set
   977  }
   978  
   979  type objectRef struct {
   980  	Name       string
   981  	Namespace  string
   982  	APIVersion string
   983  	Kind       string
   984  }
   985  
   986  type objectRefSet map[objectRef]k8s.K8sEntity
   987  
   988  func newObjectRefSet(entities []k8s.K8sEntity) objectRefSet {
   989  	r := make(objectRefSet, len(entities))
   990  	for _, e := range entities {
   991  		ref := e.ToObjectReference()
   992  		oRef := objectRef{
   993  			Name:       ref.Name,
   994  			Namespace:  ref.Namespace,
   995  			APIVersion: ref.APIVersion,
   996  			Kind:       ref.Kind,
   997  		}
   998  		r[oRef] = e
   999  	}
  1000  	return r
  1001  }
  1002  
  1003  func toModelCmd(cmd v1alpha1.KubernetesApplyCmd) model.Cmd {
  1004  	return model.Cmd{
  1005  		Argv: cmd.Args,
  1006  		Dir:  cmd.Dir,
  1007  		Env:  cmd.Env,
  1008  	}
  1009  }