github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/engine/k8swatch/watcher.go (about) 1 package k8swatch 2 3 import ( 4 "context" 5 6 "k8s.io/apimachinery/pkg/types" 7 8 "github.com/tilt-dev/tilt/internal/k8s" 9 "github.com/tilt-dev/tilt/internal/store" 10 "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" 11 "github.com/tilt-dev/tilt/pkg/model" 12 ) 13 14 type clusterNamespace struct { 15 cluster types.NamespacedName 16 namespace k8s.Namespace 17 } 18 19 type clusterUID struct { 20 cluster types.NamespacedName 21 uid types.UID 22 } 23 24 // Common utility methods for watching kubernetes resources 25 type watcherTaskList struct { 26 watchableNamespaces []clusterNamespace 27 setupNamespaces []clusterNamespace 28 teardownNamespaces []clusterNamespace 29 newUIDs map[clusterUID]model.ManifestName 30 } 31 32 type namespaceWatch struct { 33 cancel context.CancelFunc 34 } 35 36 type watcherKnownState struct { 37 cfgNS k8s.Namespace 38 namespaceWatches map[clusterNamespace]namespaceWatch 39 knownDeployedUIDs map[clusterUID]model.ManifestName 40 } 41 42 func newWatcherKnownState(cfgNS k8s.Namespace) watcherKnownState { 43 return watcherKnownState{ 44 cfgNS: cfgNS, 45 namespaceWatches: make(map[clusterNamespace]namespaceWatch), 46 knownDeployedUIDs: make(map[clusterUID]model.ManifestName), 47 } 48 } 49 50 // Diff the contents of the engine state against the deployed UIDs that the 51 // watcher already knows about, and create a task list of things to do. 52 // 53 // Assumes we're holding an RLock on both states. 54 func (ks *watcherKnownState) createTaskList(state store.EngineState) watcherTaskList { 55 newUIDs := make(map[clusterUID]model.ManifestName) 56 seenUIDs := make(map[clusterUID]bool) 57 namespaces := make(map[clusterNamespace]bool) 58 for _, mt := range state.Targets() { 59 if !mt.Manifest.IsK8s() { 60 continue 61 } 62 63 // TODO(milas): read the Cluster object name from the spec once available 64 clusterNN := types.NamespacedName{Name: v1alpha1.ClusterNameDefault} 65 66 name := mt.Manifest.Name 67 68 // Collect all the new UIDs 69 applyFilter := mt.State.K8sRuntimeState().ApplyFilter 70 if applyFilter != nil { 71 for _, ref := range applyFilter.DeployedRefs { 72 namespace := k8s.Namespace(ref.Namespace) 73 if namespace == "" { 74 namespace = ks.cfgNS 75 } 76 if namespace == "" { 77 namespace = k8s.DefaultNamespace 78 } 79 nsKey := clusterNamespace{cluster: clusterNN, namespace: namespace} 80 namespaces[nsKey] = true 81 82 // Our data model allows people to have the same resource defined in 83 // multiple manifests, and so we can have the same deployed UID in 84 // multiple manifests. 85 // 86 // This check protects us from infinite loops where the diff keeps flipping 87 // between the two manifests. 88 // 89 // Ideally, our data model would prevent this from happening entirely. 90 uidKey := clusterUID{cluster: clusterNN, uid: ref.UID} 91 if seenUIDs[uidKey] { 92 continue 93 } 94 seenUIDs[uidKey] = true 95 96 oldName := ks.knownDeployedUIDs[uidKey] 97 if name != oldName { 98 newUIDs[uidKey] = name 99 } 100 } 101 } 102 } 103 104 // If we're no longer deploying a manifest, delete it from the known deployed UIDs. 105 // This ensures that if it shows up again, we process it correctly. 106 for uid := range ks.knownDeployedUIDs { 107 if !seenUIDs[uid] { 108 delete(ks.knownDeployedUIDs, uid) 109 } 110 } 111 112 var watchableNamespaces []clusterNamespace 113 var setupNamespaces []clusterNamespace 114 var teardownNamespaces []clusterNamespace 115 116 for needed := range namespaces { 117 watchableNamespaces = append(watchableNamespaces, needed) 118 if _, ok := ks.namespaceWatches[needed]; !ok { 119 setupNamespaces = append(setupNamespaces, needed) 120 } 121 } 122 123 for existing := range ks.namespaceWatches { 124 if _, ok := namespaces[existing]; !ok { 125 teardownNamespaces = append(teardownNamespaces, existing) 126 } 127 } 128 129 return watcherTaskList{ 130 watchableNamespaces: watchableNamespaces, 131 setupNamespaces: setupNamespaces, 132 teardownNamespaces: teardownNamespaces, 133 newUIDs: newUIDs, 134 } 135 } 136 137 func (ks *watcherKnownState) resetStateForCluster(clusterKey types.NamespacedName) { 138 for key, watch := range ks.namespaceWatches { 139 if key.cluster == clusterKey { 140 watch.cancel() 141 delete(ks.namespaceWatches, key) 142 } 143 } 144 145 for key := range ks.knownDeployedUIDs { 146 if key.cluster == clusterKey { 147 delete(ks.knownDeployedUIDs, key) 148 } 149 } 150 } 151 152 // watcherClientKey bridges apiserver and engine subscriber semantics. 153 // 154 // In apiserver reconcilers, each object is evaluated individually, so each 155 // one needs to be notified of client changes and reset its own state. 156 // 157 // The engine subscribers evaluate on global engine state, so this acts as a 158 // singleton key for them. The name is purely informative for debugging 159 // purposes (collisions aren't an issue - the subscribers do not share a 160 // ClientManager instance as there's no real advantage to doing so). 161 type watcherClientKey struct { 162 name string 163 } 164 165 func (w watcherClientKey) GetName() string { 166 return w.name 167 } 168 169 func (w watcherClientKey) GetNamespace() string { 170 return "tilt-engine" 171 }