github.com/grahambrereton-form3/tilt@v0.10.18/internal/engine/runtimelog/podlogmanager.go (about) 1 package runtimelog 2 3 import ( 4 "context" 5 "fmt" 6 "io" 7 "time" 8 9 v1 "k8s.io/api/core/v1" 10 11 "github.com/windmilleng/tilt/internal/container" 12 "github.com/windmilleng/tilt/internal/k8s" 13 "github.com/windmilleng/tilt/internal/store" 14 "github.com/windmilleng/tilt/pkg/logger" 15 "github.com/windmilleng/tilt/pkg/model" 16 ) 17 18 // Collects logs from deployed containers. 19 type PodLogManager struct { 20 kClient k8s.Client 21 22 watches map[podLogKey]PodLogWatch 23 } 24 25 func NewPodLogManager(kClient k8s.Client) *PodLogManager { 26 return &PodLogManager{ 27 kClient: kClient, 28 watches: make(map[podLogKey]PodLogWatch), 29 } 30 } 31 32 func cancelAll(watches []PodLogWatch) { 33 for _, w := range watches { 34 w.cancel() 35 } 36 } 37 38 // Diff the current watches against the state store of what 39 // we're supposed to be watching, returning the changes 40 // we need to make. 41 func (m *PodLogManager) diff(ctx context.Context, st store.RStore) (setup []PodLogWatch, teardown []PodLogWatch) { 42 state := st.RLockState() 43 defer st.RUnlockState() 44 45 // If we're not watching the FS for changes, then don't bother watching logs. 46 if !state.WatchFiles { 47 return nil, nil 48 } 49 50 stateWatches := make(map[podLogKey]bool) 51 for _, ms := range state.ManifestStates() { 52 runtime := ms.K8sRuntimeState() 53 for _, pod := range runtime.PodList() { 54 if !m.shouldWatchPodLogs(pod) { 55 continue 56 } 57 58 // if pod has more than one container, we should prefix logs with the container name 59 shouldPrefix := len(pod.Containers) > 1 60 61 for _, c := range pod.Containers { 62 // Key the log watcher by the container id, so we auto-restart the 63 // watching if the container crashes. 64 key := podLogKey{ 65 podID: pod.PodID, 66 cID: c.ID, 67 } 68 stateWatches[key] = true 69 70 existing, isActive := m.watches[key] 71 72 // Only stream logs that have happened since Tilt started. 73 // 74 // TODO(nick): We should really record when we started the `kubectl apply`, 75 // and only stream logs since that happened. 76 startWatchTime := state.TiltStartTime 77 if isActive { 78 if existing.ctx.Err() == nil { 79 // The active pod watcher is still tailing the logs, 80 // nothing to do. 81 continue 82 } 83 84 // The active pod watcher got cancelled somehow, 85 // so we need to create a new one that picks up 86 // where it left off. 87 startWatchTime = <-existing.terminationTime 88 } 89 90 ctx, cancel := context.WithCancel(ctx) 91 w := PodLogWatch{ 92 ctx: ctx, 93 cancel: cancel, 94 name: ms.Name, 95 podID: pod.PodID, 96 cName: c.Name, 97 namespace: pod.Namespace, 98 startWatchTime: startWatchTime, 99 terminationTime: make(chan time.Time, 1), 100 shouldPrefix: shouldPrefix, 101 } 102 m.watches[key] = w 103 setup = append(setup, w) 104 } 105 } 106 } 107 108 for key, value := range m.watches { 109 _, inState := stateWatches[key] 110 if !inState { 111 delete(m.watches, key) 112 teardown = append(teardown, value) 113 } 114 } 115 116 return setup, teardown 117 } 118 119 func (m *PodLogManager) shouldWatchPodLogs(pod store.Pod) bool { 120 if pod.PodID == "" || len(pod.Containers) == 0 { 121 return false 122 } 123 124 // If an ID or name for the containers hasn't been created yet, weird things 125 // will happen when we try to store them in the `m.watches` map. This should 126 // only happen if the pod is still in a weird creating state. It shouldn't 127 // happen when user code is running. 128 for _, container := range pod.Containers { 129 if container.Name == "" || container.ID == "" { 130 return false 131 } 132 } 133 134 // Only try to fetch logs if pod is in a state that can handle it; 135 // otherwise, it may reject our connection. 136 if !(pod.Phase == v1.PodRunning || pod.Phase == v1.PodSucceeded || 137 pod.Phase == v1.PodFailed) { 138 return false 139 } 140 141 return true 142 } 143 144 func (m *PodLogManager) OnChange(ctx context.Context, st store.RStore) { 145 setup, teardown := m.diff(ctx, st) 146 for _, watch := range teardown { 147 watch.cancel() 148 } 149 150 for _, watch := range setup { 151 go m.consumeLogs(watch, st) 152 } 153 } 154 155 func (m *PodLogManager) consumeLogs(watch PodLogWatch, st store.RStore) { 156 defer func() { 157 watch.terminationTime <- time.Now() 158 watch.cancel() 159 }() 160 161 name := watch.name 162 pID := watch.podID 163 containerName := watch.cName 164 ns := watch.namespace 165 startTime := watch.startWatchTime 166 readCloser, err := m.kClient.ContainerLogs(watch.ctx, pID, containerName, ns, startTime) 167 if err != nil { 168 logger.Get(watch.ctx).Infof("Error streaming %s logs: %v", name, err) 169 return 170 } 171 defer func() { 172 _ = readCloser.Close() 173 }() 174 175 var actionWriter io.Writer = PodLogActionWriter{ 176 Store: st, 177 ManifestName: name, 178 PodID: pID, 179 } 180 if watch.shouldPrefix { 181 prefix := fmt.Sprintf("[%s] ", watch.cName) 182 actionWriter = logger.NewPrefixedWriter(prefix, actionWriter) 183 } 184 185 _, err = io.Copy(actionWriter, NewHardCancelReader(watch.ctx, readCloser)) 186 if err != nil && watch.ctx.Err() == nil { 187 logger.Get(watch.ctx).Infof("Error streaming %s logs: %v", name, err) 188 return 189 } 190 } 191 192 type PodLogWatch struct { 193 ctx context.Context 194 cancel func() 195 196 name model.ManifestName 197 podID k8s.PodID 198 namespace k8s.Namespace 199 cName container.Name 200 startWatchTime time.Time 201 terminationTime chan time.Time 202 203 shouldPrefix bool // if true, we'll prefix logs with the container name 204 } 205 206 type podLogKey struct { 207 podID k8s.PodID 208 cID container.ID 209 } 210 211 type PodLogActionWriter struct { 212 Store store.RStore 213 PodID k8s.PodID 214 ManifestName model.ManifestName 215 } 216 217 func (w PodLogActionWriter) Write(p []byte) (n int, err error) { 218 w.Store.Dispatch(PodLogAction{ 219 PodID: w.PodID, 220 LogEvent: store.NewLogEvent(w.ManifestName, p), 221 }) 222 return len(p), nil 223 } 224 225 var _ store.Subscriber = &PodLogManager{}