github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/controllers/core/dockercomposelogstream/reconciler.go (about)

     1  package dockercomposelogstream
     2  
     3  import (
     4  	"context"
     5  	"io"
     6  	"sync"
     7  	"time"
     8  
     9  	dtypes "github.com/docker/docker/api/types"
    10  	"github.com/docker/docker/pkg/stdcopy"
    11  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    12  	"k8s.io/apimachinery/pkg/types"
    13  	ctrl "sigs.k8s.io/controller-runtime"
    14  	"sigs.k8s.io/controller-runtime/pkg/builder"
    15  	ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
    16  	"sigs.k8s.io/controller-runtime/pkg/reconcile"
    17  
    18  	"github.com/tilt-dev/tilt/internal/controllers/apicmp"
    19  	"github.com/tilt-dev/tilt/internal/controllers/indexer"
    20  	"github.com/tilt-dev/tilt/internal/docker"
    21  	"github.com/tilt-dev/tilt/internal/dockercompose"
    22  	"github.com/tilt-dev/tilt/internal/engine/runtimelog"
    23  	"github.com/tilt-dev/tilt/internal/store"
    24  	"github.com/tilt-dev/tilt/internal/store/dockercomposeservices"
    25  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    26  	"github.com/tilt-dev/tilt/pkg/logger"
    27  	"github.com/tilt-dev/tilt/pkg/model"
    28  )
    29  
    30  type ContainerInfo struct {
    31  	ID    string
    32  	State *v1alpha1.DockerContainerState
    33  	TTY   bool
    34  }
    35  
    36  type Reconciler struct {
    37  	client   ctrlclient.Client
    38  	store    store.RStore
    39  	dcc      dockercompose.DockerComposeClient
    40  	dc       docker.Client
    41  	requeuer *indexer.Requeuer
    42  
    43  	mu sync.Mutex
    44  
    45  	// Protected by the mutex.
    46  	results        map[types.NamespacedName]*Result
    47  	containers     map[serviceKey]*ContainerInfo
    48  	projectWatches map[string]*ProjectWatch
    49  }
    50  
    51  var _ reconcile.Reconciler = &Reconciler{}
    52  
    53  func NewReconciler(client ctrlclient.Client, store store.RStore,
    54  	dcc dockercompose.DockerComposeClient, dc docker.Client) *Reconciler {
    55  	return &Reconciler{
    56  		client:         client,
    57  		store:          store,
    58  		dcc:            dcc,
    59  		dc:             dc.ForOrchestrator(model.OrchestratorDC),
    60  		projectWatches: make(map[string]*ProjectWatch),
    61  		results:        make(map[types.NamespacedName]*Result),
    62  		containers:     make(map[serviceKey]*ContainerInfo),
    63  		requeuer:       indexer.NewRequeuer(),
    64  	}
    65  }
    66  
    67  func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
    68  	r.mu.Lock()
    69  	defer r.mu.Unlock()
    70  
    71  	nn := req.NamespacedName
    72  	obj := &v1alpha1.DockerComposeLogStream{}
    73  	err := r.client.Get(ctx, nn, obj)
    74  	if err != nil && !apierrors.IsNotFound(err) {
    75  		return ctrl.Result{}, err
    76  	}
    77  
    78  	if apierrors.IsNotFound(err) || obj.ObjectMeta.DeletionTimestamp != nil {
    79  		r.clearResult(nn)
    80  		r.manageOwnedProjectWatches()
    81  		return ctrl.Result{}, nil
    82  	}
    83  
    84  	ctx = store.MustObjectLogHandler(ctx, r.store, obj)
    85  	r.manageLogWatch(ctx, nn, obj)
    86  
    87  	// The project event streamer depends on the project we read in manageLogWatch().
    88  	r.manageOwnedProjectWatches()
    89  
    90  	return ctrl.Result{}, nil
    91  }
    92  
    93  // Removes all state for an object.
    94  func (r *Reconciler) clearResult(nn types.NamespacedName) {
    95  	result, ok := r.results[nn]
    96  	if ok {
    97  		if result.watch != nil {
    98  			result.watch.cancel()
    99  		}
   100  		delete(r.results, nn)
   101  	}
   102  }
   103  
   104  // Looks up the container state for the current object, if possible.
   105  func (r *Reconciler) reconcileContainerState(ctx context.Context, obj *v1alpha1.DockerComposeLogStream, serviceKey serviceKey) {
   106  	id, err := r.dcc.ContainerID(ctx, v1alpha1.DockerComposeServiceSpec{Project: obj.Spec.Project, Service: obj.Spec.Service})
   107  	if err != nil {
   108  		return
   109  	}
   110  
   111  	state, err := r.getContainerInfo(ctx, string(id))
   112  	if err != nil {
   113  		return
   114  	}
   115  	r.recordContainerInfo(serviceKey, state)
   116  }
   117  
   118  // Starts the log watcher if necessary.
   119  func (r *Reconciler) manageLogWatch(ctx context.Context, nn types.NamespacedName, obj *v1alpha1.DockerComposeLogStream) {
   120  	// Make sure the result is up to date.
   121  	result, ok := r.results[nn]
   122  	changed := ok && !apicmp.DeepEqual(result.spec, obj.Spec)
   123  	if changed && result.watch != nil {
   124  		result.watch.cancel()
   125  		result.watch = nil
   126  	}
   127  
   128  	if !ok {
   129  		result = &Result{
   130  			name:      nn,
   131  			loggerCtx: store.MustObjectLogHandler(ctx, r.store, obj),
   132  		}
   133  		r.results[nn] = result
   134  	}
   135  
   136  	if changed || !ok {
   137  		result.spec = obj.Spec
   138  		result.projectHash = dockercomposeservices.MustHashProject(obj.Spec.Project)
   139  	}
   140  
   141  	serviceKey := result.serviceKey()
   142  	r.reconcileContainerState(ctx, obj, serviceKey)
   143  
   144  	container := r.containers[serviceKey]
   145  	if container == nil {
   146  		return
   147  	}
   148  	containerState := container.State
   149  	containerID := container.ID
   150  
   151  	// Docker evidently records the container start time asynchronously, so it can actually be AFTER
   152  	// the first log timestamps (also reported by Docker), so we pad it by a second to reduce the
   153  	// number of potentially duplicative logs
   154  	startWatchTime := containerState.StartedAt.Time.Add(-time.Second)
   155  	if result.watch != nil {
   156  		if !result.watch.Done() {
   157  			// watcher is already running
   158  			return
   159  		}
   160  
   161  		if result.watch.containerID == containerID && !result.watch.startWatchTime.Before(startWatchTime) {
   162  			// watcher finished but the container hasn't started up again
   163  			// (N.B. we cannot compare on the container ID because containers can restart and be re-used
   164  			// 	after being stopped for jobs that run to completion but are re-triggered)
   165  			return
   166  		}
   167  	}
   168  
   169  	if ctx.Err() != nil {
   170  		return
   171  	}
   172  
   173  	ctx, cancel := context.WithCancel(ctx)
   174  	manifestName := model.ManifestName(obj.Annotations[v1alpha1.AnnotationManifest])
   175  	w := &watch{
   176  		ctx:            ctx,
   177  		cancel:         cancel,
   178  		manifestName:   manifestName,
   179  		nn:             nn,
   180  		spec:           obj.Spec,
   181  		startWatchTime: startWatchTime,
   182  		containerID:    containerID,
   183  		tty:            container.TTY,
   184  	}
   185  	result.watch = w
   186  	go r.consumeLogs(w)
   187  }
   188  
   189  func (r *Reconciler) consumeLogs(watch *watch) {
   190  	defer func() {
   191  		watch.cancel()
   192  		r.requeuer.Add(watch.nn)
   193  	}()
   194  
   195  	ctx := watch.ctx
   196  	if ctx.Err() != nil {
   197  		return
   198  	}
   199  	startTime := watch.startWatchTime
   200  
   201  	for {
   202  		readCloser, err := r.dc.ContainerLogs(ctx, watch.containerID, dtypes.ContainerLogsOptions{
   203  			ShowStdout: true,
   204  			ShowStderr: true,
   205  			Follow:     true,
   206  			Since:      startTime.Format(time.RFC3339Nano),
   207  		})
   208  		if err != nil || ctx.Err() != nil {
   209  			// container may not exist anymore, bail and let the reconciler retry.
   210  			return
   211  		}
   212  
   213  		actionWriter := &LogActionWriter{
   214  			store:        r.store,
   215  			manifestName: watch.manifestName,
   216  		}
   217  
   218  		reader := runtimelog.NewHardCancelReader(ctx, readCloser)
   219  
   220  		if watch.tty {
   221  			_, err = io.Copy(actionWriter, reader)
   222  		} else {
   223  			_, err = stdcopy.StdCopy(actionWriter, actionWriter, reader)
   224  		}
   225  		_ = readCloser.Close()
   226  		if err == nil || ctx.Err() != nil {
   227  			// stop tailing because either:
   228  			// 	* docker-compose logs exited naturally -> this means the container exited, so a new watcher will
   229  			// 	  be created once a new container is seen
   230  			//  * context was canceled -> manifest is no longer in engine & being torn-down
   231  			return
   232  		}
   233  
   234  		// something went wrong with docker-compose, log it and re-attach, starting from the last
   235  		// successfully logged timestamp
   236  		logger.Get(watch.ctx).Debugf("Error streaming %s logs: %v", watch.nn.Name, err)
   237  		startTime = time.Now()
   238  	}
   239  }
   240  
   241  func (r *Reconciler) CreateBuilder(mgr ctrl.Manager) (*builder.Builder, error) {
   242  	b := ctrl.NewControllerManagedBy(mgr).
   243  		For(&v1alpha1.DockerComposeLogStream{}).
   244  		WatchesRawSource(r.requeuer)
   245  
   246  	return b, nil
   247  }
   248  
   249  type watch struct {
   250  	ctx            context.Context
   251  	cancel         func()
   252  	manifestName   model.ManifestName
   253  	nn             types.NamespacedName
   254  	spec           v1alpha1.DockerComposeLogStreamSpec
   255  	startWatchTime time.Time
   256  	containerID    string
   257  	tty            bool
   258  }
   259  
   260  func (w *watch) Done() bool {
   261  	return w.ctx.Err() != nil
   262  }
   263  
   264  // Keeps track of the state we currently know about.
   265  type Result struct {
   266  	loggerCtx   context.Context
   267  	name        types.NamespacedName
   268  	projectHash string
   269  	spec        v1alpha1.DockerComposeLogStreamSpec
   270  	watch       *watch
   271  }
   272  
   273  func (r *Result) serviceKey() serviceKey {
   274  	return serviceKey{service: r.spec.Service, projectHash: r.projectHash}
   275  }
   276  
   277  // Index the containers from each docker compose service.
   278  type serviceKey struct {
   279  	service     string
   280  	projectHash string
   281  }