github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/engine/k8swatch/event_watch_manager.go (about)

     1  package k8swatch
     2  
     3  import (
     4  	"context"
     5  	"sync"
     6  	"time"
     7  
     8  	"k8s.io/apimachinery/pkg/types"
     9  
    10  	"github.com/tilt-dev/tilt/internal/controllers/apis/cluster"
    11  	"github.com/tilt-dev/tilt/internal/timecmp"
    12  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    13  
    14  	"github.com/pkg/errors"
    15  	v1 "k8s.io/api/core/v1"
    16  
    17  	"github.com/tilt-dev/tilt/internal/k8s"
    18  	"github.com/tilt-dev/tilt/internal/store"
    19  	"github.com/tilt-dev/tilt/pkg/model"
    20  )
    21  
    22  // TODO(nick): Right now, the EventWatchManager, PodWatcher, and ServiceWatcher
    23  // all look very similar, with a few subtle differences (particularly in how
    24  // we decide whether two objects are related, and how we index those relationships).
    25  //
    26  // We're probably missing some abstractions here.
    27  //
    28  // TODO(nick): We should also add garbage collection and/or handle Delete events
    29  // from the kubernetes informer properly.
    30  type EventWatchManager struct {
    31  	mu sync.RWMutex
    32  
    33  	clients   *cluster.ClientManager
    34  	clientKey watcherClientKey
    35  
    36  	watcherKnownState watcherKnownState
    37  
    38  	// An index that maps the UID of Kubernetes resources to the UIDs of
    39  	// all events that they own (transitively).
    40  	//
    41  	// For example, a Deployment UID might contain a set of N event UIDs.
    42  	knownDescendentEventUIDs map[clusterUID]k8s.UIDSet
    43  
    44  	// An index of all the known events, by UID
    45  	knownEvents map[clusterUID]*v1.Event
    46  }
    47  
    48  func NewEventWatchManager(clients cluster.ClientProvider, cfgNS k8s.Namespace) *EventWatchManager {
    49  	return &EventWatchManager{
    50  		clients:                  cluster.NewClientManager(clients),
    51  		clientKey:                watcherClientKey{name: "events"},
    52  		watcherKnownState:        newWatcherKnownState(cfgNS),
    53  		knownDescendentEventUIDs: make(map[clusterUID]k8s.UIDSet),
    54  		knownEvents:              make(map[clusterUID]*v1.Event),
    55  	}
    56  }
    57  
    58  type eventWatchTaskList struct {
    59  	watcherTaskList
    60  	tiltStartTime time.Time
    61  }
    62  
    63  func (m *EventWatchManager) diff(st store.RStore) eventWatchTaskList {
    64  	state := st.RLockState()
    65  	defer st.RUnlockState()
    66  
    67  	watcherTaskList := m.watcherKnownState.createTaskList(state)
    68  	return eventWatchTaskList{
    69  		watcherTaskList: watcherTaskList,
    70  		tiltStartTime:   state.TiltStartTime,
    71  	}
    72  }
    73  
    74  func (m *EventWatchManager) OnChange(ctx context.Context, st store.RStore, summary store.ChangeSummary) error {
    75  	if summary.IsLogOnly() {
    76  		return nil
    77  	}
    78  
    79  	m.mu.Lock()
    80  	defer m.mu.Unlock()
    81  
    82  	clusters := m.handleClusterChanges(st, summary)
    83  
    84  	taskList := m.diff(st)
    85  
    86  	for _, teardown := range taskList.teardownNamespaces {
    87  		watcher, ok := m.watcherKnownState.namespaceWatches[teardown]
    88  		if ok {
    89  			watcher.cancel()
    90  		}
    91  		delete(m.watcherKnownState.namespaceWatches, teardown)
    92  	}
    93  
    94  	for _, setup := range taskList.setupNamespaces {
    95  		m.setupWatch(ctx, st, clusters, setup, taskList.tiltStartTime)
    96  	}
    97  
    98  	if len(taskList.newUIDs) > 0 {
    99  		m.setupNewUIDs(ctx, st, taskList.newUIDs)
   100  	}
   101  	return nil
   102  }
   103  
   104  func (m *EventWatchManager) handleClusterChanges(st store.RStore, summary store.ChangeSummary) map[types.NamespacedName]*v1alpha1.Cluster {
   105  	clusters := make(map[types.NamespacedName]*v1alpha1.Cluster)
   106  	state := st.RLockState()
   107  	for k, v := range state.Clusters {
   108  		clusters[types.NamespacedName{Name: k}] = v.DeepCopy()
   109  	}
   110  	st.RUnlockState()
   111  
   112  	for clusterNN := range summary.Clusters.Changes {
   113  		c := clusters[clusterNN]
   114  		if c != nil && !m.clients.Refresh(m.clientKey, c) {
   115  			// cluster config didn't change
   116  			// N.B. if the cluster is nil (does not exist in state), we'll
   117  			// 	clear everything for it as well
   118  			continue
   119  		}
   120  
   121  		// cluster config changed, remove all state so it can be re-built
   122  		for key := range m.knownEvents {
   123  			if key.cluster == clusterNN {
   124  				delete(m.knownEvents, key)
   125  			}
   126  		}
   127  
   128  		for key := range m.knownDescendentEventUIDs {
   129  			if key.cluster == clusterNN {
   130  				delete(m.knownDescendentEventUIDs, key)
   131  			}
   132  		}
   133  
   134  		m.watcherKnownState.resetStateForCluster(clusterNN)
   135  	}
   136  
   137  	return clusters
   138  }
   139  
   140  func (m *EventWatchManager) setupWatch(ctx context.Context, st store.RStore, clusters map[types.NamespacedName]*v1alpha1.Cluster, key clusterNamespace, tiltStartTime time.Time) {
   141  	kCli, err := m.clients.GetK8sClient(m.clientKey, clusters[key.cluster])
   142  	if err != nil {
   143  		// ignore errors, if the cluster status changes, the subscriber
   144  		// will be re-run and the namespaces will be picked up again as new
   145  		// since watcherKnownState isn't updated
   146  		return
   147  	}
   148  
   149  	ch, err := kCli.WatchEvents(ctx, key.namespace)
   150  	if err != nil {
   151  		err = errors.Wrapf(err, "Error watching events. Are you connected to kubernetes?\nTry running `kubectl get events -n %q`", key.namespace)
   152  		st.Dispatch(store.NewErrorAction(err))
   153  		return
   154  	}
   155  
   156  	ctx, cancel := context.WithCancel(ctx)
   157  	m.watcherKnownState.namespaceWatches[key] = namespaceWatch{cancel: cancel}
   158  
   159  	go m.dispatchEventsLoop(ctx, kCli.OwnerFetcher(), key.cluster, ch, st, tiltStartTime)
   160  }
   161  
   162  // When new UIDs are deployed, go through all our known events and dispatch
   163  // new actions. This handles the case where we get the event
   164  // before the deploy id shows up in the manifest, which is way more common than
   165  // you would think.
   166  func (m *EventWatchManager) setupNewUIDs(ctx context.Context, st store.RStore, newUIDs map[clusterUID]model.ManifestName) {
   167  	for newUID, mn := range newUIDs {
   168  		m.watcherKnownState.knownDeployedUIDs[newUID] = mn
   169  
   170  		descendants := m.knownDescendentEventUIDs[newUID]
   171  		for uid := range descendants {
   172  			event, ok := m.knownEvents[clusterUID{cluster: newUID.cluster, uid: uid}]
   173  			if ok {
   174  				st.Dispatch(store.NewK8sEventAction(event, mn))
   175  			}
   176  		}
   177  	}
   178  }
   179  
   180  // Check to see if this event corresponds to any of our manifests.
   181  //
   182  // We do this by comparing the event's InvolvedObject UID and its owner UIDs
   183  // against what we've deployed to the cluster. Returns the ManifestName that it
   184  // matched against.
   185  //
   186  // If the event doesn't match an existing deployed resource, keep it in local
   187  // state, so we can match it later if the owner UID shows up.
   188  func (m *EventWatchManager) triageEventUpdate(clusterNN types.NamespacedName, event *v1.Event,
   189  	objTree k8s.ObjectRefTree) model.ManifestName {
   190  	m.mu.Lock()
   191  	defer m.mu.Unlock()
   192  
   193  	uid := clusterUID{cluster: clusterNN, uid: event.UID}
   194  	m.knownEvents[uid] = event
   195  
   196  	// Set up the descendent index of the involved object
   197  	for _, ownerUID := range objTree.UIDs() {
   198  		ownerKey := clusterUID{cluster: clusterNN, uid: ownerUID}
   199  		set, ok := m.knownDescendentEventUIDs[ownerKey]
   200  		if !ok {
   201  			set = k8s.NewUIDSet()
   202  			m.knownDescendentEventUIDs[ownerKey] = set
   203  		}
   204  		set.Add(uid.uid)
   205  	}
   206  
   207  	// Find the manifest name
   208  	for _, ownerUID := range objTree.UIDs() {
   209  		mn, ok := m.watcherKnownState.knownDeployedUIDs[clusterUID{cluster: clusterNN, uid: ownerUID}]
   210  		if ok {
   211  			return mn
   212  		}
   213  	}
   214  
   215  	return ""
   216  }
   217  
   218  func (m *EventWatchManager) dispatchEventChange(ctx context.Context, of k8s.OwnerFetcher, clusterNN types.NamespacedName, event *v1.Event, st store.RStore) {
   219  	objTree, err := of.OwnerTreeOfRef(ctx, event.InvolvedObject)
   220  	if err != nil {
   221  		// In locked-down clusters, the user may not have access to certain types of resources
   222  		// so it's normal for there to be errors. Ignore them.
   223  		return
   224  	}
   225  
   226  	mn := m.triageEventUpdate(clusterNN, event, objTree)
   227  	if mn == "" {
   228  		return
   229  	}
   230  
   231  	st.Dispatch(store.NewK8sEventAction(event, mn))
   232  }
   233  
   234  func (m *EventWatchManager) dispatchEventsLoop(ctx context.Context, of k8s.OwnerFetcher, clusterNN types.NamespacedName, ch <-chan *v1.Event, st store.RStore, tiltStartTime time.Time) {
   235  	for {
   236  		select {
   237  		case event, ok := <-ch:
   238  			if !ok {
   239  				return
   240  			}
   241  
   242  			// on startup, k8s will give us a bunch of event objects that happened
   243  			// before tilt started, which leads to flooding the k8s api with lookups
   244  			// on those events' involvedObjects we don't care about those events, so
   245  			// ignore them.
   246  			//
   247  			// TODO(nick): We might need to remove this check and optimize
   248  			// it in a different way. We want Tilt to be to attach to existing
   249  			// resources, and these resources might have pre-existing events.
   250  			if timecmp.Before(event.ObjectMeta.CreationTimestamp, tiltStartTime) {
   251  				continue
   252  			}
   253  
   254  			if !ShouldLogEvent(event) {
   255  				continue
   256  			}
   257  
   258  			go m.dispatchEventChange(ctx, of, clusterNN, event, st)
   259  
   260  		case <-ctx.Done():
   261  			return
   262  		}
   263  	}
   264  }
   265  
   266  const ImagePullingReason = "Pulling"
   267  const ImagePulledReason = "Pulled"
   268  
   269  func ShouldLogEvent(e *v1.Event) bool {
   270  	if e.Type != v1.EventTypeNormal {
   271  		return true
   272  	}
   273  
   274  	return e.Reason == ImagePullingReason || e.Reason == ImagePulledReason
   275  }