github.com/tilt-dev/tilt@v0.36.0/internal/controllers/core/dockercomposeservice/reconciler.go (about)

     1  package dockercomposeservice
     2  
     3  import (
     4  	"context"
     5  	"strings"
     6  	"sync"
     7  
     8  	dtypes "github.com/docker/docker/api/types"
     9  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    10  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    11  	"k8s.io/apimachinery/pkg/runtime"
    12  	"k8s.io/apimachinery/pkg/types"
    13  	ctrl "sigs.k8s.io/controller-runtime"
    14  	"sigs.k8s.io/controller-runtime/pkg/builder"
    15  	"sigs.k8s.io/controller-runtime/pkg/client"
    16  	ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
    17  	"sigs.k8s.io/controller-runtime/pkg/handler"
    18  	"sigs.k8s.io/controller-runtime/pkg/reconcile"
    19  
    20  	"github.com/docker/go-connections/nat"
    21  
    22  	"github.com/tilt-dev/tilt/internal/controllers/apicmp"
    23  	"github.com/tilt-dev/tilt/internal/controllers/apis/configmap"
    24  	"github.com/tilt-dev/tilt/internal/controllers/apis/imagemap"
    25  	"github.com/tilt-dev/tilt/internal/controllers/indexer"
    26  	"github.com/tilt-dev/tilt/internal/docker"
    27  	"github.com/tilt-dev/tilt/internal/dockercompose"
    28  	"github.com/tilt-dev/tilt/internal/filteredwriter"
    29  	"github.com/tilt-dev/tilt/internal/store"
    30  	"github.com/tilt-dev/tilt/internal/store/dockercomposeservices"
    31  	"github.com/tilt-dev/tilt/pkg/apis"
    32  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    33  	"github.com/tilt-dev/tilt/pkg/logger"
    34  	"github.com/tilt-dev/tilt/pkg/model"
    35  )
    36  
    37  type Reconciler struct {
    38  	dcc          dockercompose.DockerComposeClient
    39  	dc           docker.Client
    40  	st           store.RStore
    41  	ctrlClient   ctrlclient.Client
    42  	indexer      *indexer.Indexer
    43  	requeuer     *indexer.Requeuer
    44  	disableQueue *DisableSubscriber
    45  	mu           sync.Mutex
    46  
    47  	// Protected by the mutex.
    48  	results                        map[types.NamespacedName]*Result
    49  	resultsByServiceName           map[string]*Result
    50  	healthcheckOutputByServiceName map[string]string
    51  	projectWatches                 map[string]*ProjectWatch
    52  }
    53  
    54  func (r *Reconciler) CreateBuilder(mgr ctrl.Manager) (*builder.Builder, error) {
    55  	b := ctrl.NewControllerManagedBy(mgr).
    56  		For(&v1alpha1.DockerComposeService{}).
    57  		WatchesRawSource(r.requeuer).
    58  		Watches(&v1alpha1.ImageMap{},
    59  			handler.EnqueueRequestsFromMapFunc(r.indexer.Enqueue)).
    60  		Watches(&v1alpha1.ConfigMap{},
    61  			handler.EnqueueRequestsFromMapFunc(r.indexer.Enqueue))
    62  
    63  	return b, nil
    64  }
    65  
    66  func NewReconciler(
    67  	ctrlClient ctrlclient.Client,
    68  	dcc dockercompose.DockerComposeClient,
    69  	dc docker.Client,
    70  	st store.RStore,
    71  	scheme *runtime.Scheme,
    72  	disableQueue *DisableSubscriber,
    73  ) *Reconciler {
    74  	return &Reconciler{
    75  		ctrlClient:                     ctrlClient,
    76  		dcc:                            dcc,
    77  		dc:                             dc.ForOrchestrator(model.OrchestratorDC),
    78  		indexer:                        indexer.NewIndexer(scheme, indexDockerComposeService),
    79  		st:                             st,
    80  		requeuer:                       indexer.NewRequeuer(),
    81  		disableQueue:                   disableQueue,
    82  		results:                        make(map[types.NamespacedName]*Result),
    83  		resultsByServiceName:           make(map[string]*Result),
    84  		healthcheckOutputByServiceName: make(map[string]string),
    85  		projectWatches:                 make(map[string]*ProjectWatch),
    86  	}
    87  }
    88  
    89  // Redeploy the docker compose service when its spec
    90  // changes or any of its dependencies change.
    91  func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
    92  	nn := request.NamespacedName
    93  
    94  	var obj v1alpha1.DockerComposeService
    95  	err := r.ctrlClient.Get(ctx, nn, &obj)
    96  	r.indexer.OnReconcile(nn, &obj)
    97  	if err != nil && !apierrors.IsNotFound(err) {
    98  		return ctrl.Result{}, err
    99  	}
   100  
   101  	if apierrors.IsNotFound(err) || !obj.ObjectMeta.DeletionTimestamp.IsZero() {
   102  		rs, ok := r.updateForDisableQueue(nn, true /* deleting */)
   103  		if ok {
   104  			r.disableQueue.UpdateQueue(rs)
   105  		}
   106  		r.clearResult(nn)
   107  
   108  		r.st.Dispatch(dockercomposeservices.NewDockerComposeServiceDeleteAction(nn.Name))
   109  		r.manageOwnedProjectWatches(ctx)
   110  		return r.manageOwnedLogStream(ctx, nn, nil)
   111  	}
   112  
   113  	r.st.Dispatch(dockercomposeservices.NewDockerComposeServiceUpsertAction(&obj))
   114  
   115  	// Get configmap's disable status
   116  	ctx = store.MustObjectLogHandler(ctx, r.st, &obj)
   117  	disableStatus, err := configmap.MaybeNewDisableStatus(ctx, r.ctrlClient, obj.Spec.DisableSource, obj.Status.DisableStatus)
   118  	if err != nil {
   119  		return ctrl.Result{}, err
   120  	}
   121  
   122  	r.recordSpecAndDisableStatus(nn, obj.Spec, *disableStatus)
   123  
   124  	rs, ok := r.updateForDisableQueue(nn, disableStatus.State == v1alpha1.DisableStateDisabled)
   125  	if ok {
   126  		r.disableQueue.UpdateQueue(rs)
   127  		if disableStatus.State == v1alpha1.DisableStateDisabled {
   128  			r.recordRmOnDisable(nn)
   129  		}
   130  	}
   131  
   132  	if disableStatus.State != v1alpha1.DisableStateDisabled {
   133  		// Fetch all the images needed to apply this YAML.
   134  		imageMaps, err := imagemap.NamesToObjects(ctx, r.ctrlClient, obj.Spec.ImageMaps)
   135  		if err != nil {
   136  			return ctrl.Result{}, err
   137  		}
   138  
   139  		// Apply to the cluster if necessary.
   140  		if r.shouldDeployOnReconcile(request.NamespacedName, &obj, imageMaps) {
   141  			// If we have no image dependencies in tilt, tell docker compose
   142  			// to handle any necessary image builds.
   143  			dcManagedBuild := len(imageMaps) == 0
   144  			_ = r.forceApplyHelper(ctx, nn, obj.Spec, imageMaps, dcManagedBuild)
   145  		}
   146  	}
   147  
   148  	// TODO(nick): Deploy dockercompose services that aren't managed via buildcontrol
   149  
   150  	err = r.maybeUpdateStatus(ctx, nn, &obj)
   151  	if err != nil {
   152  		return ctrl.Result{}, err
   153  	}
   154  	r.manageOwnedProjectWatches(ctx)
   155  	return r.manageOwnedLogStream(ctx, nn, &obj)
   156  }
   157  
   158  // Determine if we should deploy the current YAML.
   159  //
   160  // Ensures:
   161  //  1. We have enough info to deploy, and
   162  //  2. Either we haven't deployed before,
   163  //     or one of the inputs has changed since the last deploy.
   164  func (r *Reconciler) shouldDeployOnReconcile(
   165  	nn types.NamespacedName,
   166  	obj *v1alpha1.DockerComposeService,
   167  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
   168  ) bool {
   169  	if obj.Annotations[v1alpha1.AnnotationManagedBy] != "" {
   170  		// Until resource dependencies are expressed in the API,
   171  		// we can't use reconciliation to deploy KubernetesApply objects
   172  		// managed by the buildcontrol engine.
   173  		return false
   174  	}
   175  
   176  	for _, imageMapName := range obj.Spec.ImageMaps {
   177  		_, ok := imageMaps[types.NamespacedName{Name: imageMapName}]
   178  		if !ok {
   179  			// We haven't built the images yet to deploy.
   180  			return false
   181  		}
   182  	}
   183  
   184  	r.mu.Lock()
   185  	result, ok := r.results[nn]
   186  	r.mu.Unlock()
   187  
   188  	if !ok || result.Status.LastApplyStartTime.IsZero() {
   189  		// We've never successfully deployed before, so deploy now.
   190  		return true
   191  	}
   192  
   193  	if !apicmp.DeepEqual(obj.Spec, result.Spec) {
   194  		// The YAML to deploy changed.
   195  		return true
   196  	}
   197  
   198  	imageMapNames := obj.Spec.ImageMaps
   199  	if len(imageMapNames) != len(result.ImageMapSpecs) ||
   200  		len(imageMapNames) != len(result.ImageMapStatuses) {
   201  		return true
   202  	}
   203  
   204  	for i, name := range obj.Spec.ImageMaps {
   205  		im := imageMaps[types.NamespacedName{Name: name}]
   206  		if !apicmp.DeepEqual(im.Spec, result.ImageMapSpecs[i]) {
   207  
   208  			return true
   209  		}
   210  		if !apicmp.DeepEqual(im.Status, result.ImageMapStatuses[i]) {
   211  			return true
   212  		}
   213  	}
   214  
   215  	return false
   216  }
   217  
   218  // We need to update the disable queue in two cases:
   219  // 1) If the resource is enabled (to clear any pending deletes), or
   220  // 2) If the resource is deleted but still running (to kickoff a delete).
   221  func (r *Reconciler) updateForDisableQueue(nn types.NamespacedName, isDisabled bool) (resourceState, bool) {
   222  	r.mu.Lock()
   223  	defer r.mu.Unlock()
   224  
   225  	result, isExisting := r.results[nn]
   226  	if !isExisting {
   227  		return resourceState{}, false
   228  	}
   229  
   230  	if !isDisabled {
   231  		return resourceState{Name: nn.Name, Spec: result.Spec}, true
   232  	}
   233  
   234  	// We only need to do cleanup if there's a container available.
   235  	if result.Status.ContainerState != nil {
   236  		return resourceState{
   237  			Name:         nn.Name,
   238  			Spec:         result.Spec,
   239  			NeedsCleanup: true,
   240  			StartTime:    result.Status.ContainerState.StartedAt.Time,
   241  		}, true
   242  	}
   243  
   244  	return resourceState{}, false
   245  }
   246  
   247  // Records that a delete was performed.
   248  func (r *Reconciler) recordRmOnDisable(nn types.NamespacedName) {
   249  	r.mu.Lock()
   250  	defer r.mu.Unlock()
   251  
   252  	result, isExisting := r.results[nn]
   253  	if !isExisting {
   254  		return
   255  	}
   256  
   257  	result.Status.ContainerID = ""
   258  	result.Status.ContainerName = ""
   259  	result.Status.ContainerState = nil
   260  	result.Status.PortBindings = nil
   261  }
   262  
   263  // Removes all state for an object.
   264  func (r *Reconciler) clearResult(nn types.NamespacedName) {
   265  	r.mu.Lock()
   266  	defer r.mu.Unlock()
   267  	result, ok := r.results[nn]
   268  	if ok {
   269  		delete(r.resultsByServiceName, result.Spec.Service)
   270  		delete(r.results, nn)
   271  	}
   272  }
   273  
   274  // Create a result object if necessary. Caller must hold the mutex.
   275  func (r *Reconciler) ensureResultExists(nn types.NamespacedName) *Result {
   276  	existing, hasExisting := r.results[nn]
   277  	if hasExisting {
   278  		return existing
   279  	}
   280  
   281  	result := &Result{Name: nn}
   282  	r.results[nn] = result
   283  	return result
   284  }
   285  
   286  // Record disable state of the service.
   287  func (r *Reconciler) recordSpecAndDisableStatus(
   288  	nn types.NamespacedName,
   289  	spec v1alpha1.DockerComposeServiceSpec,
   290  	disableStatus v1alpha1.DisableStatus) {
   291  	r.mu.Lock()
   292  	defer r.mu.Unlock()
   293  
   294  	result := r.ensureResultExists(nn)
   295  	if !apicmp.DeepEqual(result.Spec, spec) {
   296  		delete(r.resultsByServiceName, result.Spec.Service)
   297  		result.Spec = spec
   298  		result.ProjectHash = dockercomposeservices.MustHashProject(spec.Project)
   299  		r.resultsByServiceName[result.Spec.Service] = result
   300  	}
   301  
   302  	if apicmp.DeepEqual(result.Status.DisableStatus, &disableStatus) {
   303  		return
   304  	}
   305  
   306  	update := result.Status.DeepCopy()
   307  	update.DisableStatus = &disableStatus
   308  	result.Status = *update
   309  }
   310  
   311  // A helper that deletes the Docker Compose service, even if they haven't been applied yet.
   312  //
   313  // Primarily intended so that the build controller can do force restarts.
   314  func (r *Reconciler) ForceDelete(
   315  	ctx context.Context,
   316  	nn types.NamespacedName,
   317  	spec v1alpha1.DockerComposeServiceSpec,
   318  	reason string) error {
   319  	out := logger.Get(ctx).Writer(logger.InfoLvl)
   320  	out = filteredwriter.New(out, func(s string) bool {
   321  		// https://app.shortcut.com/windmill/story/13147/docker-compose-down-messages-for-disabled-resources-may-be-confusing
   322  		return strings.HasPrefix(s, "Going to remove")
   323  	})
   324  	err := r.dcc.Rm(ctx, []v1alpha1.DockerComposeServiceSpec{spec}, out, out)
   325  	if err != nil {
   326  		logger.Get(ctx).Errorf("Error %s: %v", reason, err)
   327  	}
   328  	r.clearResult(nn)
   329  	r.requeuer.Add(nn)
   330  	return nil
   331  }
   332  
   333  // Apply the DockerCompose service spec, unconditionally,
   334  // and requeue the reconciler so that it updates the apiserver.
   335  //
   336  // We expose this as a public method as a hack! Currently, in Tilt, BuildController
   337  // handles dependencies between resources. The API server doesn't know about build
   338  // dependencies yet. So Tiltfile-owned resources are applied manually, rather than
   339  // going through the normal reconcile system.
   340  func (r *Reconciler) ForceApply(
   341  	ctx context.Context,
   342  	nn types.NamespacedName,
   343  	spec v1alpha1.DockerComposeServiceSpec,
   344  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
   345  	dcManagedBuild bool) v1alpha1.DockerComposeServiceStatus {
   346  	status := r.forceApplyHelper(ctx, nn, spec, imageMaps, dcManagedBuild)
   347  	r.requeuer.Add(nn)
   348  	return status
   349  }
   350  
   351  // Records status when an apply fail.
   352  // This might mean the image build failed, if we're using dc-managed image builds.
   353  // Does not necessarily clear the current running container.
   354  func (r *Reconciler) recordApplyError(
   355  	nn types.NamespacedName,
   356  	spec v1alpha1.DockerComposeServiceSpec,
   357  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
   358  	err error,
   359  	startTime metav1.MicroTime,
   360  ) v1alpha1.DockerComposeServiceStatus {
   361  	r.mu.Lock()
   362  	defer r.mu.Unlock()
   363  
   364  	result := r.ensureResultExists(nn)
   365  	status := result.Status.DeepCopy()
   366  	status.LastApplyStartTime = startTime
   367  	status.LastApplyFinishTime = apis.NowMicro()
   368  	status.ApplyError = err.Error()
   369  	result.Status = *status
   370  	result.SetImageMapInputs(spec, imageMaps)
   371  	return *status
   372  }
   373  
   374  // Records status when an apply succeeds.
   375  func (r *Reconciler) recordApplyStatus(
   376  	nn types.NamespacedName,
   377  	spec v1alpha1.DockerComposeServiceSpec,
   378  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
   379  	newStatus v1alpha1.DockerComposeServiceStatus,
   380  ) v1alpha1.DockerComposeServiceStatus {
   381  	r.mu.Lock()
   382  	defer r.mu.Unlock()
   383  
   384  	result := r.ensureResultExists(nn)
   385  	disableStatus := result.Status.DisableStatus
   386  	newStatus.DisableStatus = disableStatus
   387  	result.Status = newStatus
   388  	result.SetImageMapInputs(spec, imageMaps)
   389  
   390  	return newStatus
   391  }
   392  
   393  // A helper that applies the given specs to the cluster,
   394  // tracking the state of the deploy in the results map.
   395  func (r *Reconciler) forceApplyHelper(
   396  	ctx context.Context,
   397  	nn types.NamespacedName,
   398  	spec v1alpha1.DockerComposeServiceSpec,
   399  	imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
   400  	// TODO(nick): Figure out a better way to infer the dcManagedBuild setting.
   401  	dcManagedBuild bool,
   402  ) v1alpha1.DockerComposeServiceStatus {
   403  	startTime := apis.NowMicro()
   404  	stdout := logger.Get(ctx).Writer(logger.InfoLvl)
   405  	stderr := logger.Get(ctx).Writer(logger.InfoLvl)
   406  	err := r.dcc.Up(ctx, spec, dcManagedBuild, stdout, stderr)
   407  	if err != nil {
   408  		return r.recordApplyError(nn, spec, imageMaps, err, startTime)
   409  	}
   410  
   411  	// grab the initial container state
   412  	cid, err := r.dcc.ContainerID(ctx, spec)
   413  	if err != nil {
   414  		return r.recordApplyError(nn, spec, imageMaps, err, startTime)
   415  	}
   416  
   417  	containerJSON, err := r.dc.ContainerInspect(ctx, string(cid))
   418  	if err != nil {
   419  		logger.Get(ctx).Debugf("Error inspecting container %s: %v", cid, err)
   420  	}
   421  
   422  	name := ""
   423  	var containerState *dtypes.ContainerState
   424  	if containerJSON.ContainerJSONBase != nil && containerJSON.ContainerJSONBase.State != nil {
   425  		containerState = containerJSON.ContainerJSONBase.State
   426  
   427  		// NOTE(nick): For some reason, docker container names start with "/"
   428  		// but are printed to the user without it.
   429  		name = strings.TrimPrefix(containerJSON.ContainerJSONBase.Name, "/")
   430  	}
   431  
   432  	var ports nat.PortMap
   433  	if containerJSON.NetworkSettings != nil {
   434  		ports = containerJSON.NetworkSettings.NetworkSettingsBase.Ports
   435  	}
   436  
   437  	status := dockercompose.ToServiceStatus(cid, name, containerState, ports)
   438  	status.LastApplyStartTime = startTime
   439  	status.LastApplyFinishTime = apis.NowMicro()
   440  	return r.recordApplyStatus(nn, spec, imageMaps, status)
   441  }
   442  
   443  // Update the status on the apiserver if necessary.
   444  func (r *Reconciler) maybeUpdateStatus(ctx context.Context, nn types.NamespacedName, obj *v1alpha1.DockerComposeService) error {
   445  	newStatus := v1alpha1.DockerComposeServiceStatus{}
   446  	existing, ok := r.results[nn]
   447  	if ok {
   448  		newStatus = existing.Status
   449  	}
   450  
   451  	if apicmp.DeepEqual(obj.Status, newStatus) {
   452  		return nil
   453  	}
   454  
   455  	oldError := obj.Status.ApplyError
   456  	newError := newStatus.ApplyError
   457  	update := obj.DeepCopy()
   458  	update.Status = *(newStatus.DeepCopy())
   459  
   460  	err := r.ctrlClient.Status().Update(ctx, update)
   461  	if err != nil {
   462  		return err
   463  	}
   464  
   465  	// Print new errors on objects that aren't managed by the buildcontroller.
   466  	if newError != "" && oldError != newError && update.Annotations[v1alpha1.AnnotationManagedBy] == "" {
   467  		logger.Get(ctx).Errorf("dockercomposeservice %s: %s", obj.Name, newError)
   468  	}
   469  	return nil
   470  }
   471  
   472  var imGVK = v1alpha1.SchemeGroupVersion.WithKind("ImageMap")
   473  
   474  // indexDockerComposeService returns keys for all the objects we need to watch based on the spec.
   475  func indexDockerComposeService(obj client.Object) []indexer.Key {
   476  	dcs := obj.(*v1alpha1.DockerComposeService)
   477  	result := []indexer.Key{}
   478  	for _, name := range dcs.Spec.ImageMaps {
   479  		result = append(result, indexer.Key{
   480  			Name: types.NamespacedName{Name: name},
   481  			GVK:  imGVK,
   482  		})
   483  	}
   484  
   485  	if dcs.Spec.DisableSource != nil {
   486  		cm := dcs.Spec.DisableSource.ConfigMap
   487  		if cm != nil {
   488  			cmGVK := v1alpha1.SchemeGroupVersion.WithKind("ConfigMap")
   489  			result = append(result, indexer.Key{
   490  				Name: types.NamespacedName{Name: cm.Name},
   491  				GVK:  cmGVK,
   492  			})
   493  		}
   494  	}
   495  
   496  	return result
   497  }
   498  
   499  // Keeps track of the state we currently know about.
   500  type Result struct {
   501  	Name             types.NamespacedName
   502  	Spec             v1alpha1.DockerComposeServiceSpec
   503  	ImageMapSpecs    []v1alpha1.ImageMapSpec
   504  	ImageMapStatuses []v1alpha1.ImageMapStatus
   505  	ProjectHash      string
   506  
   507  	Status v1alpha1.DockerComposeServiceStatus
   508  }
   509  
   510  func (r *Result) SetImageMapInputs(spec v1alpha1.DockerComposeServiceSpec, imageMaps map[types.NamespacedName]*v1alpha1.ImageMap) {
   511  	r.ImageMapSpecs = nil
   512  	r.ImageMapStatuses = nil
   513  	for _, imageMapName := range spec.ImageMaps {
   514  		im, ok := imageMaps[types.NamespacedName{Name: imageMapName}]
   515  		if !ok {
   516  			// this should never happen, but if it does, just continue quietly.
   517  			continue
   518  		}
   519  
   520  		r.ImageMapSpecs = append(r.ImageMapSpecs, im.Spec)
   521  		r.ImageMapStatuses = append(r.ImageMapStatuses, im.Status)
   522  	}
   523  }
   524  
   525  // Keeps track of the projects we're currently watching.
   526  type ProjectWatch struct {
   527  	ctx     context.Context
   528  	cancel  func()
   529  	project v1alpha1.DockerComposeProject
   530  	hash    string
   531  }