github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/controllers/core/kubernetesapply/reconciler.go (about)

     1  package kubernetesapply
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"fmt"
     7  	"sync"
     8  
     9  	"github.com/pkg/errors"
    10  	batchv1 "k8s.io/api/batch/v1"
    11  	v1 "k8s.io/api/core/v1"
    12  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    13  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    14  	"k8s.io/apimachinery/pkg/runtime"
    15  	"k8s.io/apimachinery/pkg/runtime/schema"
    16  	"k8s.io/apimachinery/pkg/types"
    17  	ctrl "sigs.k8s.io/controller-runtime"
    18  	"sigs.k8s.io/controller-runtime/pkg/builder"
    19  	"sigs.k8s.io/controller-runtime/pkg/client"
    20  	ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
    21  	"sigs.k8s.io/controller-runtime/pkg/handler"
    22  	"sigs.k8s.io/controller-runtime/pkg/reconcile"
    23  
    24  	"github.com/tilt-dev/tilt/internal/container"
    25  	"github.com/tilt-dev/tilt/internal/controllers/apicmp"
    26  	"github.com/tilt-dev/tilt/internal/controllers/apis/configmap"
    27  	"github.com/tilt-dev/tilt/internal/controllers/apis/imagemap"
    28  	"github.com/tilt-dev/tilt/internal/controllers/apis/trigger"
    29  	"github.com/tilt-dev/tilt/internal/controllers/indexer"
    30  	"github.com/tilt-dev/tilt/internal/k8s"
    31  	"github.com/tilt-dev/tilt/internal/localexec"
    32  	"github.com/tilt-dev/tilt/internal/store"
    33  	"github.com/tilt-dev/tilt/internal/store/kubernetesapplys"
    34  	"github.com/tilt-dev/tilt/internal/timecmp"
    35  	"github.com/tilt-dev/tilt/pkg/apis"
    36  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    37  	"github.com/tilt-dev/tilt/pkg/logger"
    38  	"github.com/tilt-dev/tilt/pkg/model"
    39  )
    40  
    41  type deleteSpec struct {
    42  	entities  []k8s.K8sEntity
    43  	deleteCmd *v1alpha1.KubernetesApplyCmd
    44  	cluster   *v1alpha1.Cluster
    45  }
    46  
    47  type Reconciler struct {
    48  	st         store.RStore
    49  	k8sClient  k8s.Client
    50  	ctrlClient ctrlclient.Client
    51  	indexer    *indexer.Indexer
    52  	execer     localexec.Execer
    53  	requeuer   *indexer.Requeuer
    54  
    55  	mu sync.Mutex
    56  
    57  	// Protected by the mutex.
    58  	results map[types.NamespacedName]*Result
    59  }
    60  
    61  func (r *Reconciler) CreateBuilder(mgr ctrl.Manager) (*builder.Builder, error) {
    62  	b := ctrl.NewControllerManagedBy(mgr).
    63  		For(&v1alpha1.KubernetesApply{}).
    64  		Owns(&v1alpha1.KubernetesDiscovery{}).
    65  		WatchesRawSource(r.requeuer).
    66  		Watches(&v1alpha1.ImageMap{},
    67  			handler.EnqueueRequestsFromMapFunc(r.indexer.Enqueue)).
    68  		Watches(&v1alpha1.ConfigMap{},
    69  			handler.EnqueueRequestsFromMapFunc(r.indexer.Enqueue)).
    70  		Watches(&v1alpha1.Cluster{},
    71  			handler.EnqueueRequestsFromMapFunc(r.indexer.Enqueue))
    72  
    73  	trigger.SetupControllerRestartOn(b, r.indexer, func(obj ctrlclient.Object) *v1alpha1.RestartOnSpec {
    74  		return obj.(*v1alpha1.KubernetesApply).Spec.RestartOn
    75  	})
    76  
    77  	return b, nil
    78  }
    79  
    80  func NewReconciler(ctrlClient ctrlclient.Client, k8sClient k8s.Client, scheme *runtime.Scheme, st store.RStore, execer localexec.Execer) *Reconciler {
    81  	return &Reconciler{
    82  		ctrlClient: ctrlClient,
    83  		k8sClient:  k8sClient,
    84  		indexer:    indexer.NewIndexer(scheme, indexKubernetesApply),
    85  		execer:     execer,
    86  		st:         st,
    87  		results:    make(map[types.NamespacedName]*Result),
    88  		requeuer:   indexer.NewRequeuer(),
    89  	}
    90  }
    91  
    92  // Reconcile manages namespace watches for the modified KubernetesApply object.
    93  func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
    94  	nn := request.NamespacedName
    95  
    96  	var ka v1alpha1.KubernetesApply
    97  	err := r.ctrlClient.Get(ctx, nn, &ka)
    98  	r.indexer.OnReconcile(nn, &ka)
    99  	if err != nil && !apierrors.IsNotFound(err) {
   100  		return ctrl.Result{}, err
   101  	}
   102  
   103  	if apierrors.IsNotFound(err) || !ka.ObjectMeta.DeletionTimestamp.IsZero() {
   104  		result, err := r.manageOwnedKubernetesDiscovery(ctx, nn, nil)
   105  		if err != nil {
   106  			return ctrl.Result{}, err
   107  		}
   108  
   109  		r.recordDelete(nn)
   110  		toDelete := r.garbageCollect(nn, true)
   111  		r.bestEffortDelete(ctx, nn, toDelete, "garbage collecting Kubernetes objects")
   112  		r.clearRecord(nn)
   113  
   114  		r.st.Dispatch(kubernetesapplys.NewKubernetesApplyDeleteAction(request.NamespacedName.Name))
   115  		return result, nil
   116  	}
   117  
   118  	// The apiserver is the source of truth, and will ensure the engine state is up to date.
   119  	r.st.Dispatch(kubernetesapplys.NewKubernetesApplyUpsertAction(&ka))
   120  
   121  	// Get configmap's disable status
   122  	ctx = store.MustObjectLogHandler(ctx, r.st, &ka)
   123  	disableStatus, err := configmap.MaybeNewDisableStatus(ctx, r.ctrlClient, ka.Spec.DisableSource, ka.Status.DisableStatus)
   124  	if err != nil {
   125  		return ctrl.Result{}, err
   126  	}
   127  
   128  	r.recordDisableStatus(nn, ka.Spec, *disableStatus)
   129  
   130  	// Delete kubernetesapply if it's disabled
   131  	isDisabling := false
   132  	gcReason := "garbage collecting Kubernetes objects"
   133  	if disableStatus.State == v1alpha1.DisableStateDisabled {
   134  		gcReason = "deleting disabled Kubernetes objects"
   135  		isDisabling = true
   136  	} else {
   137  		// Fetch all the objects needed to apply this YAML.
   138  		var cluster v1alpha1.Cluster
   139  		if ka.Spec.Cluster != "" {
   140  			err := r.ctrlClient.Get(ctx, types.NamespacedName{Name: ka.Spec.Cluster}, &cluster)
   141  			if client.IgnoreNotFound(err) != nil {
   142  				return ctrl.Result{}, err
   143  			}
   144  		}
   145  
   146  		imageMaps, err := imagemap.NamesToObjects(ctx, r.ctrlClient, ka.Spec.ImageMaps)
   147  		if err != nil {
   148  			return ctrl.Result{}, err
   149  		}
   150  
   151  		lastRestartEvent, _, _, err := trigger.LastRestartEvent(ctx, r.ctrlClient, ka.Spec.RestartOn)
   152  		if err != nil {
   153  			return ctrl.Result{}, err
   154  		}
   155  
   156  		// Apply to the cluster if necessary.
   157  		//
   158  		// TODO(nick): Like with other reconcilers, there should always
   159  		// be a reason why we're not deploying, and we should update the
   160  		// Status field of KubernetesApply with that reason.
   161  		if r.shouldDeployOnReconcile(request.NamespacedName, &ka, &cluster, imageMaps, lastRestartEvent) {
   162  			_ = r.forceApplyHelper(ctx, nn, ka.Spec, &cluster, imageMaps)
   163  			gcReason = "garbage collecting removed Kubernetes objects"
   164  		}
   165  	}
   166  
   167  	toDelete := r.garbageCollect(nn, isDisabling)
   168  	r.bestEffortDelete(ctx, nn, toDelete, gcReason)
   169  
   170  	newKA, err := r.maybeUpdateStatus(ctx, nn, &ka)
   171  	if err != nil {
   172  		return ctrl.Result{}, err
   173  	}
   174  
   175  	return r.manageOwnedKubernetesDiscovery(ctx, nn, newKA)
   176  }
   177  
   178  // Determine if we should deploy the current YAML.
   179  //
   180  // Ensures:
   181  //  1. We have enough info to deploy, and
   182  //  2. Either we haven't deployed before,
   183  //     or one of the inputs has changed since the last deploy.
   184  func (r *Reconciler) shouldDeployOnReconcile(
   185  	nn types.NamespacedName,
   186  	ka *v1alpha1.KubernetesApply,
   187  	cluster *v1alpha1.Cluster,
   188  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
   189  	lastRestartEvent metav1.MicroTime,
   190  ) bool {
   191  	if ka.Annotations[v1alpha1.AnnotationManagedBy] != "" {
   192  		// Until resource dependencies are expressed in the API,
   193  		// we can't use reconciliation to deploy KubernetesApply objects
   194  		// managed by the buildcontrol engine.
   195  		return false
   196  	}
   197  
   198  	if ka.Spec.Cluster != "" {
   199  		isClusterOK := cluster != nil && cluster.Name != "" &&
   200  			cluster.Status.Error == "" && cluster.Status.Connection != nil
   201  		if !isClusterOK {
   202  			// Wait for the cluster to start.
   203  			return false
   204  		}
   205  	}
   206  
   207  	for _, imageMapName := range ka.Spec.ImageMaps {
   208  		_, ok := imageMaps[types.NamespacedName{Name: imageMapName}]
   209  		if !ok {
   210  			// We haven't built the images yet to deploy.
   211  			return false
   212  		}
   213  	}
   214  
   215  	r.mu.Lock()
   216  	result, ok := r.results[nn]
   217  	r.mu.Unlock()
   218  
   219  	if !ok || result.Status.LastApplyTime.IsZero() {
   220  		// We've never successfully deployed before, so deploy now.
   221  		return true
   222  	}
   223  
   224  	if !apicmp.DeepEqual(ka.Spec, result.Spec) {
   225  		// The YAML to deploy changed.
   226  		return true
   227  	}
   228  
   229  	imageMapNames := ka.Spec.ImageMaps
   230  	if len(imageMapNames) != len(result.ImageMapSpecs) ||
   231  		len(imageMapNames) != len(result.ImageMapStatuses) {
   232  		return true
   233  	}
   234  
   235  	for i, name := range ka.Spec.ImageMaps {
   236  		im := imageMaps[types.NamespacedName{Name: name}]
   237  		if !apicmp.DeepEqual(im.Spec, result.ImageMapSpecs[i]) {
   238  			return true
   239  		}
   240  		if !apicmp.DeepEqual(im.Status, result.ImageMapStatuses[i]) {
   241  			return true
   242  		}
   243  	}
   244  
   245  	if timecmp.After(lastRestartEvent, result.Status.LastApplyTime) {
   246  		return true
   247  	}
   248  
   249  	return false
   250  }
   251  
   252  // Inject the images into the YAML and apply it to the cluster, unconditionally.
   253  //
   254  // Does not update the API server, but does trigger a re-reconcile
   255  // so that the reconciliation loop will handle it.
   256  //
   257  // We expose this as a public method as a hack! Currently, in Tilt, BuildController
   258  // handles dependencies between resources. The API server doesn't know about build
   259  // dependencies yet. So Tiltfile-owned resources are applied manually, rather than
   260  // going through the normal reconcile system.
   261  func (r *Reconciler) ForceApply(
   262  	ctx context.Context,
   263  	nn types.NamespacedName,
   264  	spec v1alpha1.KubernetesApplySpec,
   265  	cluster *v1alpha1.Cluster,
   266  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap) v1alpha1.KubernetesApplyStatus {
   267  	status := r.forceApplyHelper(ctx, nn, spec, cluster, imageMaps)
   268  	r.requeuer.Add(nn)
   269  	return status
   270  }
   271  
   272  // A helper that applies the given specs to the cluster,
   273  // tracking the state of the deploy in the results map.
   274  func (r *Reconciler) forceApplyHelper(
   275  	ctx context.Context,
   276  	nn types.NamespacedName,
   277  	spec v1alpha1.KubernetesApplySpec,
   278  	cluster *v1alpha1.Cluster,
   279  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
   280  ) v1alpha1.KubernetesApplyStatus {
   281  
   282  	startTime := apis.NowMicro()
   283  	status := applyResult{
   284  		LastApplyStartTime: startTime,
   285  	}
   286  
   287  	recordErrorStatus := func(err error) v1alpha1.KubernetesApplyStatus {
   288  		status.LastApplyTime = apis.NowMicro()
   289  		status.Error = err.Error()
   290  		return r.recordApplyResult(nn, spec, cluster, imageMaps, status)
   291  	}
   292  
   293  	inputHash, err := ComputeInputHash(spec, imageMaps)
   294  	if err != nil {
   295  		return recordErrorStatus(err)
   296  	}
   297  
   298  	var deployed []k8s.K8sEntity
   299  	deployCtx := r.indentLogger(ctx)
   300  	if spec.YAML != "" {
   301  		deployed, err = r.runYAMLDeploy(deployCtx, spec, imageMaps)
   302  		if err != nil {
   303  			return recordErrorStatus(err)
   304  		}
   305  	} else {
   306  		deployed, err = r.runCmdDeploy(deployCtx, spec, cluster, imageMaps)
   307  		if err != nil {
   308  			return recordErrorStatus(err)
   309  		}
   310  	}
   311  
   312  	status.LastApplyTime = apis.NowMicro()
   313  	status.AppliedInputHash = inputHash
   314  	for _, d := range deployed {
   315  		d.Clean()
   316  	}
   317  
   318  	resultYAML, err := k8s.SerializeSpecYAML(deployed)
   319  	if err != nil {
   320  		return recordErrorStatus(err)
   321  	}
   322  
   323  	status.ResultYAML = resultYAML
   324  	status.Objects = deployed
   325  	return r.recordApplyResult(nn, spec, cluster, imageMaps, status)
   326  }
   327  
   328  func (r *Reconciler) printAppliedReport(ctx context.Context, msg string, deployed []k8s.K8sEntity) {
   329  	l := logger.Get(ctx)
   330  	l.Infof("%s", msg)
   331  
   332  	// Use a min component count of 2 for computing names,
   333  	// so that the resource type appears
   334  	displayNames := k8s.UniqueNames(deployed, 2)
   335  	for _, displayName := range displayNames {
   336  		l.Infof("  → %s", displayName)
   337  	}
   338  }
   339  
   340  func (r *Reconciler) runYAMLDeploy(ctx context.Context, spec v1alpha1.KubernetesApplySpec, imageMaps map[types.NamespacedName]*v1alpha1.ImageMap) ([]k8s.K8sEntity, error) {
   341  	// Create API objects.
   342  	newK8sEntities, err := r.createEntitiesToDeploy(ctx, imageMaps, spec)
   343  	if err != nil {
   344  		return newK8sEntities, err
   345  	}
   346  
   347  	logger.Get(ctx).Infof("Applying YAML to cluster")
   348  
   349  	timeout := spec.Timeout.Duration
   350  	if timeout == 0 {
   351  		timeout = v1alpha1.KubernetesApplyTimeoutDefault
   352  	}
   353  
   354  	deployed, err := r.k8sClient.Upsert(ctx, newK8sEntities, timeout)
   355  	if err != nil {
   356  		r.printAppliedReport(ctx, "Tried to apply objects to cluster:", newK8sEntities)
   357  		return nil, err
   358  	}
   359  	r.printAppliedReport(ctx, "Objects applied to cluster:", deployed)
   360  
   361  	return deployed, nil
   362  }
   363  
   364  func (r *Reconciler) maybeInjectKubeconfig(cmd *model.Cmd, cluster *v1alpha1.Cluster) {
   365  	if cluster == nil ||
   366  		cluster.Status.Connection == nil ||
   367  		cluster.Status.Connection.Kubernetes == nil {
   368  		return
   369  	}
   370  	kubeconfig := cluster.Status.Connection.Kubernetes.ConfigPath
   371  	if kubeconfig == "" {
   372  		return
   373  	}
   374  	cmd.Env = append(cmd.Env, fmt.Sprintf("KUBECONFIG=%s", kubeconfig))
   375  }
   376  
   377  func (r *Reconciler) runCmdDeploy(ctx context.Context, spec v1alpha1.KubernetesApplySpec,
   378  	cluster *v1alpha1.Cluster,
   379  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap) ([]k8s.K8sEntity, error) {
   380  	timeout := spec.Timeout.Duration
   381  	if timeout == 0 {
   382  		timeout = v1alpha1.KubernetesApplyTimeoutDefault
   383  	}
   384  	ctx, cancel := context.WithTimeout(ctx, timeout)
   385  	defer cancel()
   386  
   387  	var stdoutBuf bytes.Buffer
   388  	runIO := localexec.RunIO{
   389  		Stdout: &stdoutBuf,
   390  		Stderr: logger.Get(ctx).Writer(logger.InfoLvl),
   391  	}
   392  
   393  	cmd := toModelCmd(*spec.ApplyCmd)
   394  	err := imagemap.InjectIntoDeployEnv(&cmd, spec.ImageMaps, imageMaps)
   395  	if err != nil {
   396  		return nil, err
   397  	}
   398  	r.maybeInjectKubeconfig(&cmd, cluster)
   399  
   400  	logger.Get(ctx).Infof("Running cmd: %s", cmd.String())
   401  	exitCode, err := r.execer.Run(ctx, cmd, runIO)
   402  	if err != nil {
   403  		return nil, fmt.Errorf("apply command failed: %v", err)
   404  	}
   405  
   406  	if exitCode != 0 {
   407  		var stdoutLog string
   408  		if stdoutBuf.Len() != 0 {
   409  			stdoutLog = fmt.Sprintf("\nstdout:\n%s\n", overflowEllipsis(stdoutBuf.String()))
   410  		}
   411  		if ctx.Err() != nil {
   412  			// process returned a non-zero exit code (generally 137) because it was killed by us
   413  			return nil, fmt.Errorf("apply command timed out after %s - see https://docs.tilt.dev/api.html#api.update_settings for how to increase%s", timeout.String(), stdoutLog)
   414  		}
   415  		return nil, fmt.Errorf("apply command exited with status %d%s", exitCode, stdoutLog)
   416  	}
   417  
   418  	// don't pass the bytes.Buffer directly to the YAML parser or it'll consume it and we can't print it out on failure
   419  	stdout := stdoutBuf.Bytes()
   420  	entities, err := k8s.ParseYAML(bytes.NewReader(stdout))
   421  	if err != nil {
   422  		return nil, fmt.Errorf("apply command returned malformed YAML: %v\nstdout:\n%s\n", err, overflowEllipsis(string(stdout)))
   423  	}
   424  
   425  	r.printAppliedReport(ctx, "Objects applied to cluster:", entities)
   426  
   427  	return entities, nil
   428  }
   429  
   430  const maxOverflow = 500
   431  
   432  // The stdout of a well-behaved apply function can be 100K+ (especially for CRDs)
   433  func overflowEllipsis(str string) string {
   434  	if len(str) > maxOverflow {
   435  		return fmt.Sprintf("%s\n... [truncated by Tilt] ...\n%s", str[0:maxOverflow/2], str[len(str)-maxOverflow/2:])
   436  	}
   437  	return str
   438  }
   439  
   440  func (r *Reconciler) indentLogger(ctx context.Context) context.Context {
   441  	l := logger.Get(ctx)
   442  	newL := logger.NewPrefixedLogger(logger.Blue(l).Sprint("     "), l)
   443  	return logger.WithLogger(ctx, newL)
   444  }
   445  
   446  type injectResult struct {
   447  	meta     k8s.EntityMeta
   448  	imageMap *v1alpha1.ImageMap
   449  }
   450  
   451  func (r *Reconciler) createEntitiesToDeploy(ctx context.Context,
   452  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
   453  	spec v1alpha1.KubernetesApplySpec) ([]k8s.K8sEntity, error) {
   454  	newK8sEntities := []k8s.K8sEntity{}
   455  
   456  	entities, err := k8s.ParseYAMLFromString(spec.YAML)
   457  	if err != nil {
   458  		return nil, err
   459  	}
   460  
   461  	locators, err := k8s.ParseImageLocators(spec.ImageLocators)
   462  	if err != nil {
   463  		return nil, err
   464  	}
   465  
   466  	var injectResults []injectResult
   467  	imageMapNames := spec.ImageMaps
   468  	injectedImageMaps := map[string]bool{}
   469  	for _, e := range entities {
   470  		e, err = k8s.InjectLabels(e, []model.LabelPair{
   471  			k8s.TiltManagedByLabel(),
   472  		})
   473  		if err != nil {
   474  			return nil, errors.Wrap(err, "deploy")
   475  		}
   476  
   477  		// If we're redeploying these workloads in response to image
   478  		// changes, we make sure image pull policy isn't set to "Always".
   479  		// Frequent applies don't work well with this setting, and makes things
   480  		// slower. See discussion:
   481  		// https://github.com/tilt-dev/tilt/issues/3209
   482  		if len(imageMaps) > 0 {
   483  			e, err = k8s.InjectImagePullPolicy(e, v1.PullIfNotPresent)
   484  			if err != nil {
   485  				return nil, err
   486  			}
   487  		}
   488  
   489  		if len(imageMaps) > 0 {
   490  			// StatefulSet pods should be managed in parallel when we're doing iterative
   491  			// development. See discussion:
   492  			// https://github.com/tilt-dev/tilt/issues/1962
   493  			// https://github.com/tilt-dev/tilt/issues/3906
   494  			e = k8s.InjectParallelPodManagementPolicy(e)
   495  		}
   496  
   497  		// Set the pull policy to IfNotPresent, to ensure that
   498  		// we get a locally built image instead of the remote one.
   499  		policy := v1.PullIfNotPresent
   500  		for _, imageMapName := range imageMapNames {
   501  			imageMap := imageMaps[types.NamespacedName{Name: imageMapName}]
   502  			imageMapSpec := imageMap.Spec
   503  			selector, err := container.SelectorFromImageMap(imageMapSpec)
   504  			if err != nil {
   505  				return nil, err
   506  			}
   507  			matchInEnvVars := imageMapSpec.MatchInEnvVars
   508  
   509  			if imageMap.Status.Image == "" {
   510  				return nil, fmt.Errorf("internal error: missing image status")
   511  			}
   512  
   513  			ref, err := container.ParseNamed(imageMap.Status.ImageFromCluster)
   514  			if err != nil {
   515  				return nil, fmt.Errorf("parsing image map status: %v", err)
   516  			}
   517  
   518  			var replaced bool
   519  			e, replaced, err = k8s.InjectImageDigest(e, selector, ref, locators, matchInEnvVars, policy)
   520  			if err != nil {
   521  				return nil, err
   522  			}
   523  			if replaced {
   524  				injectedImageMaps[imageMapName] = true
   525  				injectResults = append(injectResults, injectResult{
   526  					meta:     e,
   527  					imageMap: imageMap,
   528  				})
   529  
   530  				if imageMapSpec.OverrideCommand != nil || imageMapSpec.OverrideArgs != nil {
   531  					e, err = k8s.InjectCommandAndArgs(e, ref, imageMapSpec.OverrideCommand, imageMapSpec.OverrideArgs)
   532  					if err != nil {
   533  						return nil, err
   534  					}
   535  				}
   536  			}
   537  		}
   538  
   539  		// This needs to be after all the other injections, to ensure the hash includes the Tilt-generated
   540  		// image tag, etc
   541  		e, err := k8s.InjectPodTemplateSpecHashes(e)
   542  		if err != nil {
   543  			return nil, errors.Wrap(err, "injecting pod template hash")
   544  		}
   545  
   546  		newK8sEntities = append(newK8sEntities, e)
   547  	}
   548  
   549  	for _, name := range imageMapNames {
   550  		if !injectedImageMaps[name] {
   551  			return nil, fmt.Errorf("Docker image missing from yaml: %s", name)
   552  		}
   553  	}
   554  
   555  	l := logger.Get(ctx)
   556  	if l.Level().ShouldDisplay(logger.DebugLvl) {
   557  		if len(injectResults) != 0 {
   558  			l.Debugf("Injecting images into Kubernetes YAML:")
   559  			meta := make([]k8s.EntityMeta, len(injectResults))
   560  			for i := range injectResults {
   561  				meta[i] = injectResults[i].meta
   562  			}
   563  			names := k8s.UniqueNamesMeta(meta, 2)
   564  			for i := range injectResults {
   565  				l.Debugf(
   566  					"  → %s: %s ⇒ %s",
   567  					names[i],
   568  					injectResults[i].imageMap.Spec.Selector,
   569  					injectResults[i].imageMap.Status.Image,
   570  				)
   571  			}
   572  		} else {
   573  			l.Debugf("No images injected into Kubernetes YAML")
   574  		}
   575  	}
   576  
   577  	return newK8sEntities, nil
   578  }
   579  
   580  type applyResult struct {
   581  	ResultYAML         string
   582  	Error              string
   583  	LastApplyTime      metav1.MicroTime
   584  	LastApplyStartTime metav1.MicroTime
   585  	AppliedInputHash   string
   586  	Objects            []k8s.K8sEntity
   587  }
   588  
   589  // conditionsFromApply extracts any conditions based on the result.
   590  //
   591  // Currently, this is only used as part of special handling for Jobs, which
   592  // might have already completed successfully in the past.
   593  func conditionsFromApply(result applyResult) []metav1.Condition {
   594  	if result.Error != "" || len(result.Objects) == 0 {
   595  		return nil
   596  	}
   597  
   598  	for _, e := range result.Objects {
   599  		job, ok := e.Obj.(*batchv1.Job)
   600  		if !ok {
   601  			continue
   602  		}
   603  		for _, cond := range job.Status.Conditions {
   604  			if cond.Type == batchv1.JobComplete && cond.Status == v1.ConditionTrue {
   605  				return []metav1.Condition{
   606  					{
   607  						Type:   v1alpha1.ApplyConditionJobComplete,
   608  						Status: metav1.ConditionTrue,
   609  					},
   610  				}
   611  			}
   612  		}
   613  	}
   614  	return nil
   615  }
   616  
   617  // Create a result object if necessary. Caller must hold the mutex.
   618  func (r *Reconciler) ensureResultExists(nn types.NamespacedName) *Result {
   619  	existing, hasExisting := r.results[nn]
   620  	if hasExisting {
   621  		return existing
   622  	}
   623  
   624  	result := &Result{
   625  		DanglingObjects: objectRefSet{},
   626  	}
   627  	r.results[nn] = result
   628  	return result
   629  }
   630  
   631  // Record the results of a deploy to the local Result map.
   632  func (r *Reconciler) recordApplyResult(
   633  	nn types.NamespacedName,
   634  	spec v1alpha1.KubernetesApplySpec,
   635  	cluster *v1alpha1.Cluster,
   636  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
   637  	applyResult applyResult) v1alpha1.KubernetesApplyStatus {
   638  
   639  	r.mu.Lock()
   640  	defer r.mu.Unlock()
   641  
   642  	result := r.ensureResultExists(nn)
   643  
   644  	// Copy over status information from `forceApplyHelper`
   645  	// so other existing status information isn't overwritten
   646  	updatedStatus := result.Status.DeepCopy()
   647  	updatedStatus.ResultYAML = applyResult.ResultYAML
   648  	updatedStatus.Error = applyResult.Error
   649  	updatedStatus.LastApplyStartTime = applyResult.LastApplyStartTime
   650  	updatedStatus.LastApplyTime = applyResult.LastApplyTime
   651  	updatedStatus.AppliedInputHash = applyResult.AppliedInputHash
   652  	updatedStatus.Conditions = conditionsFromApply(applyResult)
   653  
   654  	result.Cluster = cluster
   655  	result.Spec = spec
   656  	result.Status = *updatedStatus
   657  	if spec.ApplyCmd != nil {
   658  		result.CmdApplied = true
   659  	}
   660  	result.SetAppliedObjects(newObjectRefSet(applyResult.Objects))
   661  
   662  	result.ImageMapSpecs = nil
   663  	result.ImageMapStatuses = nil
   664  	for _, imageMapName := range spec.ImageMaps {
   665  		im, ok := imageMaps[types.NamespacedName{Name: imageMapName}]
   666  		if !ok {
   667  			// this should never happen, but if it does, just continue quietly.
   668  			continue
   669  		}
   670  
   671  		result.ImageMapSpecs = append(result.ImageMapSpecs, im.Spec)
   672  		result.ImageMapStatuses = append(result.ImageMapStatuses, im.Status)
   673  	}
   674  
   675  	return result.Status
   676  }
   677  
   678  // Record that the apply has been disabled.
   679  func (r *Reconciler) recordDisableStatus(
   680  	nn types.NamespacedName,
   681  	spec v1alpha1.KubernetesApplySpec,
   682  	disableStatus v1alpha1.DisableStatus) {
   683  
   684  	r.mu.Lock()
   685  	defer r.mu.Unlock()
   686  
   687  	result := r.ensureResultExists(nn)
   688  	if apicmp.DeepEqual(result.Status.DisableStatus, &disableStatus) {
   689  		return
   690  	}
   691  
   692  	isDisabled := disableStatus.State == v1alpha1.DisableStateDisabled
   693  
   694  	update := result.Status.DeepCopy()
   695  	update.DisableStatus = &disableStatus
   696  	result.Status = *update
   697  
   698  	if isDisabled {
   699  		result.SetAppliedObjects(nil)
   700  	}
   701  }
   702  
   703  // Queue its applied objects for deletion.
   704  func (r *Reconciler) recordDelete(nn types.NamespacedName) {
   705  	r.mu.Lock()
   706  	defer r.mu.Unlock()
   707  
   708  	result := r.ensureResultExists(nn)
   709  	result.Status = v1alpha1.KubernetesApplyStatus{}
   710  	result.SetAppliedObjects(nil)
   711  }
   712  
   713  // Record that the delete command was run.
   714  func (r *Reconciler) recordDeleteCmdRun(nn types.NamespacedName) {
   715  	r.mu.Lock()
   716  	defer r.mu.Unlock()
   717  
   718  	result, isExisting := r.results[nn]
   719  	if isExisting {
   720  		result.CmdApplied = false
   721  	}
   722  }
   723  
   724  // Delete all state for a KubernetesApply, things have been cleaned up.
   725  func (r *Reconciler) clearRecord(nn types.NamespacedName) {
   726  	r.mu.Lock()
   727  	defer r.mu.Unlock()
   728  
   729  	delete(r.results, nn)
   730  }
   731  
   732  // Perform garbage collection for a particular KubernetesApply object.
   733  //
   734  // isDeleting: indicates whether this is a full delete or just
   735  // a cleanup of dangling objects.
   736  //
   737  // For custom deploy commands, we run the delete cmd.
   738  //
   739  // For YAML deploys, this is more complex:
   740  //
   741  // There are typically 4 ways objects get marked "dangling".
   742  // 1) Their owner A has been deleted.
   743  // 2) Their owner A has been disabled.
   744  // 3) They've been moved from owner A to owner B.
   745  // 4) Owner A has been re-applied with different arguments.
   746  //
   747  // Because the reconciler handles one owner at a time,
   748  // cases (1) and (3) are basically indistinguishable, and can
   749  // lead to race conditions if we're not careful (e.g., owner A's GC
   750  // deletes objects deployed by B).
   751  //
   752  // TODO(milas): in the case that the KA object was deleted, should we respect `tilt.dev/down-policy`?
   753  func (r *Reconciler) garbageCollect(nn types.NamespacedName, isDeleting bool) deleteSpec {
   754  	r.mu.Lock()
   755  	defer r.mu.Unlock()
   756  
   757  	result, isExisting := r.results[nn]
   758  	if !isExisting {
   759  		return deleteSpec{}
   760  	}
   761  
   762  	if !isDeleting && result.Status.Error != "" {
   763  		// do not attempt to delete any objects if the apply failed
   764  		// N.B. if the result is nil, that means the object was deleted, so objects WILL be deleted
   765  		return deleteSpec{}
   766  	}
   767  
   768  	if result.Spec.DeleteCmd != nil {
   769  		if !isDeleting || !result.CmdApplied {
   770  			// If there's a custom apply + delete command, GC only happens if
   771  			// the KubernetesApply object is being deleted (or disabled) and
   772  			// the apply command was actually executed (by Tilt).
   773  			return deleteSpec{}
   774  		}
   775  
   776  		// the object was deleted (so result is nil) and we have a custom delete cmd, so use that
   777  		// and skip diffing managed entities entirely
   778  		//
   779  		// We assume that the delete cmd deletes all dangling objects.
   780  		for k := range result.DanglingObjects {
   781  			delete(result.DanglingObjects, k)
   782  		}
   783  		result.clearApplyStatus()
   784  		return deleteSpec{
   785  			deleteCmd: result.Spec.DeleteCmd,
   786  			cluster:   result.Cluster,
   787  		}
   788  	}
   789  
   790  	// Reconcile the dangling objects against applied objects, ensuring that we're
   791  	// not deleting an object that was moved to another resource.
   792  	for _, result := range r.results {
   793  		for objRef := range result.AppliedObjects {
   794  			delete(result.DanglingObjects, objRef)
   795  		}
   796  	}
   797  
   798  	toDelete := make([]k8s.K8sEntity, 0, len(result.DanglingObjects))
   799  	for k, v := range result.DanglingObjects {
   800  		delete(result.DanglingObjects, k)
   801  		toDelete = append(toDelete, v)
   802  	}
   803  	if isDeleting {
   804  		result.clearApplyStatus()
   805  	}
   806  	return deleteSpec{
   807  		entities: toDelete,
   808  		cluster:  result.Cluster,
   809  	}
   810  }
   811  
   812  // A helper that deletes all Kubernetes objects, even if they haven't been applied yet.
   813  //
   814  // Namespaces are not deleted by default. Similar to `tilt down`, deleting namespaces
   815  // is likely to be more destructive than most users want from this operation.
   816  func (r *Reconciler) ForceDelete(ctx context.Context, nn types.NamespacedName,
   817  	spec v1alpha1.KubernetesApplySpec,
   818  	cluster *v1alpha1.Cluster,
   819  	reason string) error {
   820  
   821  	toDelete := deleteSpec{cluster: cluster}
   822  	if spec.YAML != "" {
   823  		entities, err := k8s.ParseYAMLFromString(spec.YAML)
   824  		if err != nil {
   825  			return fmt.Errorf("force delete: %v", err)
   826  		}
   827  
   828  		entities, _, err = k8s.Filter(entities, func(e k8s.K8sEntity) (b bool, err error) {
   829  			return e.GVK() != schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}, nil
   830  		})
   831  		if err != nil {
   832  			return err
   833  		}
   834  
   835  		toDelete.entities = k8s.ReverseSortedEntities(entities)
   836  	} else if spec.DeleteCmd != nil {
   837  		toDelete.deleteCmd = spec.DeleteCmd
   838  	}
   839  
   840  	r.recordDelete(nn)
   841  	r.bestEffortDelete(ctx, nn, toDelete, reason)
   842  	r.requeuer.Add(nn)
   843  	return nil
   844  }
   845  
   846  // Update the status if necessary.
   847  func (r *Reconciler) maybeUpdateStatus(ctx context.Context, nn types.NamespacedName, obj *v1alpha1.KubernetesApply) (*v1alpha1.KubernetesApply, error) {
   848  	newStatus := v1alpha1.KubernetesApplyStatus{}
   849  	existing, ok := r.results[nn]
   850  	if ok {
   851  		newStatus = existing.Status
   852  	}
   853  
   854  	if apicmp.DeepEqual(obj.Status, newStatus) {
   855  		return obj, nil
   856  	}
   857  
   858  	oldError := obj.Status.Error
   859  	newError := newStatus.Error
   860  	update := obj.DeepCopy()
   861  	update.Status = *(newStatus.DeepCopy())
   862  
   863  	err := r.ctrlClient.Status().Update(ctx, update)
   864  	if err != nil {
   865  		return nil, err
   866  	}
   867  
   868  	// Print new errors on objects that aren't managed by the buildcontroller.
   869  	if newError != "" && oldError != newError && update.Annotations[v1alpha1.AnnotationManagedBy] == "" {
   870  		logger.Get(ctx).Errorf("kubernetesapply %s: %s", obj.Name, newError)
   871  	}
   872  	return update, nil
   873  }
   874  
   875  func (r *Reconciler) bestEffortDelete(ctx context.Context, nn types.NamespacedName, toDelete deleteSpec, reason string) {
   876  	if len(toDelete.entities) == 0 && toDelete.deleteCmd == nil {
   877  		return
   878  	}
   879  
   880  	l := logger.Get(ctx)
   881  	l.Infof("Beginning %s", reason)
   882  
   883  	if len(toDelete.entities) != 0 {
   884  		err := r.k8sClient.Delete(ctx, toDelete.entities, 0)
   885  		if err != nil {
   886  			l.Errorf("Error %s: %v", reason, err)
   887  		}
   888  	}
   889  
   890  	if toDelete.deleteCmd != nil {
   891  		deleteCmd := toModelCmd(*toDelete.deleteCmd)
   892  		r.maybeInjectKubeconfig(&deleteCmd, toDelete.cluster)
   893  		if err := localexec.OneShotToLogger(ctx, r.execer, deleteCmd); err != nil {
   894  			l.Errorf("Error %s: %v", reason, err)
   895  		}
   896  		r.recordDeleteCmdRun(nn)
   897  	}
   898  }
   899  
   900  var imGVK = v1alpha1.SchemeGroupVersion.WithKind("ImageMap")
   901  var clusterGVK = v1alpha1.SchemeGroupVersion.WithKind("Cluster")
   902  
   903  // indexKubernetesApply returns keys for all the objects we need to watch based on the spec.
   904  func indexKubernetesApply(obj client.Object) []indexer.Key {
   905  	ka := obj.(*v1alpha1.KubernetesApply)
   906  	result := []indexer.Key{}
   907  	for _, name := range ka.Spec.ImageMaps {
   908  		result = append(result, indexer.Key{
   909  			Name: types.NamespacedName{Name: name},
   910  			GVK:  imGVK,
   911  		})
   912  	}
   913  	if ka.Spec.Cluster != "" {
   914  		result = append(result, indexer.Key{
   915  			Name: types.NamespacedName{Name: ka.Spec.Cluster},
   916  			GVK:  clusterGVK,
   917  		})
   918  	}
   919  
   920  	if ka.Spec.DisableSource != nil {
   921  		cm := ka.Spec.DisableSource.ConfigMap
   922  		if cm != nil {
   923  			cmGVK := v1alpha1.SchemeGroupVersion.WithKind("ConfigMap")
   924  			result = append(result, indexer.Key{
   925  				Name: types.NamespacedName{Name: cm.Name},
   926  				GVK:  cmGVK,
   927  			})
   928  		}
   929  	}
   930  	return result
   931  }
   932  
   933  // Keeps track of the state we currently know about.
   934  type Result struct {
   935  	Spec             v1alpha1.KubernetesApplySpec
   936  	Cluster          *v1alpha1.Cluster
   937  	ImageMapSpecs    []v1alpha1.ImageMapSpec
   938  	ImageMapStatuses []v1alpha1.ImageMapStatus
   939  
   940  	CmdApplied      bool
   941  	AppliedObjects  objectRefSet
   942  	DanglingObjects objectRefSet
   943  	Status          v1alpha1.KubernetesApplyStatus
   944  }
   945  
   946  // Set the status of applied objects to empty,
   947  // as if this had never been applied.
   948  func (r *Result) clearApplyStatus() {
   949  	if r.Status.LastApplyTime.IsZero() && r.Status.Error == "" {
   950  		return
   951  	}
   952  
   953  	update := r.Status.DeepCopy()
   954  	update.LastApplyTime = metav1.MicroTime{}
   955  	update.LastApplyStartTime = metav1.MicroTime{}
   956  	update.Error = ""
   957  	update.ResultYAML = ""
   958  	r.Status = *update
   959  }
   960  
   961  // Set a new collection of applied objects.
   962  //
   963  // Move all the currently applied objects to the dangling
   964  // collection for garbage collection.
   965  func (r *Result) SetAppliedObjects(set objectRefSet) {
   966  	for k, v := range r.AppliedObjects {
   967  		r.DanglingObjects[k] = v
   968  	}
   969  	r.AppliedObjects = set
   970  }
   971  
   972  type objectRef struct {
   973  	Name       string
   974  	Namespace  string
   975  	APIVersion string
   976  	Kind       string
   977  }
   978  
   979  type objectRefSet map[objectRef]k8s.K8sEntity
   980  
   981  func newObjectRefSet(entities []k8s.K8sEntity) objectRefSet {
   982  	r := make(objectRefSet, len(entities))
   983  	for _, e := range entities {
   984  		ref := e.ToObjectReference()
   985  		oRef := objectRef{
   986  			Name:       ref.Name,
   987  			Namespace:  ref.Namespace,
   988  			APIVersion: ref.APIVersion,
   989  			Kind:       ref.Kind,
   990  		}
   991  		r[oRef] = e
   992  	}
   993  	return r
   994  }
   995  
   996  func toModelCmd(cmd v1alpha1.KubernetesApplyCmd) model.Cmd {
   997  	return model.Cmd{
   998  		Argv: cmd.Args,
   999  		Dir:  cmd.Dir,
  1000  		Env:  cmd.Env,
  1001  	}
  1002  }