github.com/grahambrereton-form3/tilt@v0.10.18/internal/engine/runtimelog/docker_compose_log_manager.go (about) 1 package runtimelog 2 3 import ( 4 "bytes" 5 "context" 6 "io" 7 "time" 8 9 "github.com/windmilleng/tilt/internal/dockercompose" 10 "github.com/windmilleng/tilt/internal/store" 11 "github.com/windmilleng/tilt/pkg/logger" 12 "github.com/windmilleng/tilt/pkg/model" 13 ) 14 15 // Collects logs from running docker-compose services. 16 type DockerComposeLogManager struct { 17 watches map[model.ManifestName]dockerComposeLogWatch 18 dcc dockercompose.DockerComposeClient 19 } 20 21 func NewDockerComposeLogManager(dcc dockercompose.DockerComposeClient) *DockerComposeLogManager { 22 return &DockerComposeLogManager{ 23 watches: make(map[model.ManifestName]dockerComposeLogWatch), 24 dcc: dcc, 25 } 26 } 27 28 // Diff the current watches against set of current docker-compose services, i.e. 29 // what we SHOULD be watching, returning the changes we need to make. 30 func (m *DockerComposeLogManager) diff(ctx context.Context, st store.RStore) (setup []dockerComposeLogWatch, teardown []dockerComposeLogWatch) { 31 state := st.RLockState() 32 defer st.RUnlockState() 33 34 // If we're not watching the FS for changes, then don't bother watching logs. 35 if !state.WatchFiles { 36 return nil, nil 37 } 38 39 for _, mt := range state.ManifestTargets { 40 manifest := mt.Manifest 41 if !manifest.IsDC() { 42 continue 43 } 44 45 // If the build hasn't started yet, don't start watching. 46 // 47 // TODO(nick): This points to a larger synchronization bug between DC 48 // LogManager and BuildController. Starting a build will delete all the logs 49 // that have been recorded so far. This creates race conditions: if the logs 50 // come in before the StartBuild event is recorded, those logs will get 51 // deleted. This affects tests and fast builds more than normal builds. 52 // But we should have a better way to associate logs with a particular build. 53 ms := mt.State 54 if ms.CurrentBuild.StartTime.IsZero() && ms.LastBuild().StartTime.IsZero() { 55 continue 56 } 57 58 existing, isActive := m.watches[manifest.Name] 59 startWatchTime := time.Unix(0, 0) 60 if isActive { 61 select { 62 case termTime := <-existing.terminationTime: 63 // If we're receiving on this channel, it's because the previous watcher ended or 64 // died somehow; we need to create a new one that picks up where it left off. 65 startWatchTime = termTime 66 default: 67 // Watcher is still active, no action needed. 68 continue 69 } 70 } 71 72 ctx, cancel := context.WithCancel(ctx) 73 w := dockerComposeLogWatch{ 74 ctx: ctx, 75 cancel: cancel, 76 name: manifest.Name, 77 dc: manifest.DockerComposeTarget(), 78 startWatchTime: startWatchTime, 79 terminationTime: make(chan time.Time, 1), 80 } 81 m.watches[manifest.Name] = w 82 setup = append(setup, w) 83 } 84 85 for key, value := range m.watches { 86 _, inState := state.ManifestTargets[key] 87 if !inState { 88 delete(m.watches, key) 89 90 teardown = append(teardown, value) 91 } 92 } 93 94 return setup, teardown 95 } 96 97 func (m *DockerComposeLogManager) OnChange(ctx context.Context, st store.RStore) { 98 setup, teardown := m.diff(ctx, st) 99 for _, watch := range teardown { 100 watch.cancel() 101 } 102 103 for _, watch := range setup { 104 go m.consumeLogs(watch, st) 105 } 106 } 107 108 func (m *DockerComposeLogManager) consumeLogs(watch dockerComposeLogWatch, st store.RStore) { 109 defer func() { 110 watch.terminationTime <- time.Now() 111 }() 112 113 name := watch.name 114 readCloser, err := m.dcc.StreamLogs(watch.ctx, watch.dc.ConfigPaths, watch.dc.Name) 115 if err != nil { 116 logger.Get(watch.ctx).Debugf("Error streaming %s logs: %v", name, err) 117 return 118 } 119 defer func() { 120 _ = readCloser.Close() 121 }() 122 123 actionWriter := DockerComposeLogActionWriter{ 124 store: st, 125 manifestName: name, 126 } 127 _, err = io.Copy(actionWriter, NewHardCancelReader(watch.ctx, readCloser)) 128 if err != nil && watch.ctx.Err() == nil { 129 logger.Get(watch.ctx).Debugf("Error streaming %s logs: %v", name, err) 130 return 131 } 132 } 133 134 type dockerComposeLogWatch struct { 135 ctx context.Context 136 cancel func() 137 name model.ManifestName 138 dc model.DockerComposeTarget 139 startWatchTime time.Time 140 terminationTime chan time.Time 141 142 // TODO(maia): do we need to track these? (maybe if we implement with `docker logs <cID>`...) 143 // cID container.ID 144 // cName container.Name 145 } 146 147 type DockerComposeLogActionWriter struct { 148 store store.RStore 149 manifestName model.ManifestName 150 } 151 152 func (w DockerComposeLogActionWriter) Write(p []byte) (n int, err error) { 153 if shouldFilterDCLog(p) { 154 return len(p), nil 155 } 156 w.store.Dispatch(DockerComposeLogAction{ 157 LogEvent: store.NewLogEvent(w.manifestName, p), 158 }) 159 return len(p), nil 160 } 161 162 var _ store.Subscriber = &DockerComposeLogManager{} 163 164 func shouldFilterDCLog(p []byte) bool { 165 return bytes.HasPrefix(p, []byte("Attaching to ")) 166 }