github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/controllers/core/dockercomposeservice/disable_watcher.go (about)

     1  package dockercomposeservice
     2  
     3  import (
     4  	"context"
     5  	"sort"
     6  	"strings"
     7  	"sync"
     8  	"time"
     9  
    10  	"github.com/jonboulle/clockwork"
    11  
    12  	"github.com/tilt-dev/tilt/internal/dockercompose"
    13  	"github.com/tilt-dev/tilt/internal/filteredwriter"
    14  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    15  	"github.com/tilt-dev/tilt/pkg/logger"
    16  )
    17  
    18  const disableDebounceDelay = 200 * time.Millisecond
    19  
    20  type DisableSubscriber struct {
    21  	ctx            context.Context
    22  	dcc            dockercompose.DockerComposeClient
    23  	clock          clockwork.Clock
    24  	mu             sync.Mutex
    25  	resourceStates map[string]resourceState
    26  
    27  	// track the start times of containers we've already tried to rm, so we don't try to rm state we've already
    28  	// processed
    29  	// (the subscriber will continue reporting that the resource needs cleanup until we successfully kill the
    30  	// container and the DC event watcher notices and updates EngineState)
    31  	lastDisableStartTimes map[string]time.Time
    32  
    33  	// since the goroutines are generally unobservable no-ops if we do something bad like spawn one for every OnChange,
    34  	// we need to instrument for observability in testing
    35  	goroutinesSpawnedForTesting int
    36  }
    37  
    38  type resourceState struct {
    39  	Name                string
    40  	Spec                v1alpha1.DockerComposeServiceSpec
    41  	NeedsCleanup        bool
    42  	CurrentlyCleaningUp bool
    43  	StartTime           time.Time
    44  }
    45  
    46  func NewDisableSubscriber(ctx context.Context, dcc dockercompose.DockerComposeClient, clock clockwork.Clock) *DisableSubscriber {
    47  	return &DisableSubscriber{
    48  		ctx:                   ctx,
    49  		dcc:                   dcc,
    50  		clock:                 clock,
    51  		resourceStates:        make(map[string]resourceState),
    52  		lastDisableStartTimes: make(map[string]time.Time),
    53  	}
    54  }
    55  
    56  func (w *DisableSubscriber) UpdateQueue(rs resourceState) {
    57  	w.mu.Lock()
    58  	defer w.mu.Unlock()
    59  
    60  	kickoffCleanup := false
    61  	name := rs.Name
    62  	rs.CurrentlyCleaningUp = w.resourceStates[name].CurrentlyCleaningUp
    63  	if rs.NeedsCleanup && !rs.CurrentlyCleaningUp {
    64  		rs.CurrentlyCleaningUp = true
    65  		kickoffCleanup = true
    66  	}
    67  	w.resourceStates[name] = rs
    68  
    69  	if kickoffCleanup {
    70  		go func() {
    71  
    72  			// docker-compose rm can take 5-10 seconds
    73  			// we sleep a bit here so that if a bunch of resources are disabled in bulk, we do them all at once rather
    74  			// than starting the first one we see, and then getting the rest in a second docker-compose rm call
    75  			select {
    76  			case <-w.ctx.Done():
    77  				return
    78  			case <-w.clock.After(disableDebounceDelay):
    79  			}
    80  
    81  			w.Reconcile(w.ctx)
    82  			w.mu.Lock()
    83  			rs := w.resourceStates[name]
    84  			rs.CurrentlyCleaningUp = false
    85  			w.resourceStates[name] = rs
    86  			w.mu.Unlock()
    87  		}()
    88  	}
    89  }
    90  
    91  func (w *DisableSubscriber) Reconcile(ctx context.Context) {
    92  	var toDisable []v1alpha1.DockerComposeServiceSpec
    93  
    94  	w.mu.Lock()
    95  
    96  	for _, entry := range w.resourceStates {
    97  		lastDisableStartTime := w.lastDisableStartTimes[entry.Spec.Service]
    98  		if entry.NeedsCleanup && entry.StartTime.After(lastDisableStartTime) {
    99  			toDisable = append(toDisable, entry.Spec)
   100  			w.lastDisableStartTimes[entry.Spec.Service] = entry.StartTime
   101  		}
   102  	}
   103  
   104  	// Alphabetical order
   105  	sort.Slice(toDisable, func(i, j int) bool {
   106  		return toDisable[i].Service < toDisable[j].Service
   107  	})
   108  
   109  	w.mu.Unlock()
   110  
   111  	if len(toDisable) == 0 {
   112  		return
   113  	}
   114  
   115  	// Upon disabling, the DC event watcher will notice the container has stopped and update
   116  	// the resource's RuntimeStatus, preventing it from being re-added to specsToDisable.
   117  
   118  	// NB: For now, DC output only goes to the global log
   119  	// 1. `docker-compose` rm is slow, so we don't want to call it serially, once per resource
   120  	// 2. we've had bad luck with concurrent `docker-compose` processes, so we don't want to do it in parallel
   121  	// 3. we can't break the DC output up by resource
   122  	// 4. our logger doesn't support writing the same span to multiple manifests
   123  	//    (https://app.shortcut.com/windmill/story/13140/support-logging-to-multiple-manifests)
   124  
   125  	// `docker-compose rm` output is a bit of a pickle. On one hand, the command can take several seconds,
   126  	// so it's nice to let it write to the log in real time (rather than only on error), to give the user
   127  	// feedback that something is happening. On the other hand, `docker-compose rm` does tty tricks that
   128  	// don't work in the Tilt log, which makes it ugly.
   129  	out := logger.Get(ctx).Writer(logger.InfoLvl)
   130  
   131  	out = filteredwriter.New(out, func(s string) bool {
   132  		// https://app.shortcut.com/windmill/story/13147/docker-compose-down-messages-for-disabled-resources-may-be-confusing
   133  		return strings.HasPrefix(s, "Going to remove")
   134  	})
   135  
   136  	err := w.dcc.Rm(ctx, toDisable, out, out)
   137  	if err != nil {
   138  		var namesToDisable []string
   139  		for _, e := range toDisable {
   140  			namesToDisable = append(namesToDisable, e.Service)
   141  		}
   142  		logger.Get(ctx).Errorf("error stopping disabled docker compose services %v, error: %v", namesToDisable, err)
   143  	}
   144  }