github.com/yogeshlonkar/moby@v1.13.2-0.20201203103638-c0b64beaea94/daemon/logger/jsonfilelog/read.go (about) 1 package jsonfilelog 2 3 import ( 4 "bytes" 5 "encoding/json" 6 "fmt" 7 "io" 8 "os" 9 "time" 10 11 "github.com/fsnotify/fsnotify" 12 "golang.org/x/net/context" 13 14 "github.com/sirupsen/logrus" 15 "github.com/docker/docker/daemon/logger" 16 "github.com/docker/docker/pkg/filenotify" 17 "github.com/docker/docker/pkg/ioutils" 18 "github.com/docker/docker/pkg/jsonlog" 19 "github.com/docker/docker/pkg/tailfile" 20 "github.com/pkg/errors" 21 ) 22 23 const maxJSONDecodeRetry = 20000 24 25 func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { 26 l.Reset() 27 if err := dec.Decode(l); err != nil { 28 return nil, err 29 } 30 msg := &logger.Message{ 31 Source: l.Stream, 32 Timestamp: l.Created, 33 Line: []byte(l.Log), 34 Attrs: l.Attrs, 35 } 36 return msg, nil 37 } 38 39 // ReadLogs implements the logger's LogReader interface for the logs 40 // created by this driver. 41 func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { 42 logWatcher := logger.NewLogWatcher() 43 44 go l.readLogs(logWatcher, config) 45 return logWatcher 46 } 47 48 func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { 49 defer close(logWatcher.Msg) 50 51 // lock so the read stream doesn't get corrupted due to rotations or other log data written while we open these files 52 // This will block writes!!! 53 l.mu.RLock() 54 55 // TODO it would be nice to move a lot of this reader implementation to the rotate logger object 56 pth := l.writer.LogPath() 57 var files []io.ReadSeeker 58 for i := l.writer.MaxFiles(); i > 1; i-- { 59 f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) 60 if err != nil { 61 if !os.IsNotExist(err) { 62 logWatcher.Err <- err 63 l.mu.RUnlock() 64 return 65 } 66 continue 67 } 68 defer f.Close() 69 files = append(files, f) 70 } 71 72 latestFile, err := os.Open(pth) 73 if err != nil { 74 logWatcher.Err <- errors.Wrap(err, "error opening latest log file") 75 l.mu.RUnlock() 76 return 77 } 78 defer latestFile.Close() 79 80 latestChunk, err := newSectionReader(latestFile) 81 82 // Now we have the reader sectioned, all fd's opened, we can unlock. 83 // New writes/rotates will not affect seeking through these files 84 l.mu.RUnlock() 85 86 if err != nil { 87 logWatcher.Err <- err 88 return 89 } 90 91 if config.Tail != 0 { 92 tailer := ioutils.MultiReadSeeker(append(files, latestChunk)...) 93 tailFile(tailer, logWatcher, config.Tail, config.Since) 94 } 95 96 // close all the rotated files 97 for _, f := range files { 98 if err := f.(io.Closer).Close(); err != nil { 99 logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) 100 } 101 } 102 103 if !config.Follow || l.closed { 104 return 105 } 106 107 notifyRotate := l.writer.NotifyRotate() 108 defer l.writer.NotifyRotateEvict(notifyRotate) 109 110 l.mu.Lock() 111 l.readers[logWatcher] = struct{}{} 112 l.mu.Unlock() 113 114 followLogs(latestFile, logWatcher, notifyRotate, config.Since) 115 116 l.mu.Lock() 117 delete(l.readers, logWatcher) 118 l.mu.Unlock() 119 } 120 121 func newSectionReader(f *os.File) (*io.SectionReader, error) { 122 // seek to the end to get the size 123 // we'll leave this at the end of the file since section reader does not advance the reader 124 size, err := f.Seek(0, os.SEEK_END) 125 if err != nil { 126 return nil, errors.Wrap(err, "error getting current file size") 127 } 128 return io.NewSectionReader(f, 0, size), nil 129 } 130 131 func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { 132 var rdr io.Reader 133 rdr = f 134 if tail > 0 { 135 ls, err := tailfile.TailFile(f, tail) 136 if err != nil { 137 logWatcher.Err <- err 138 return 139 } 140 rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) 141 } 142 dec := json.NewDecoder(rdr) 143 l := &jsonlog.JSONLog{} 144 for { 145 msg, err := decodeLogLine(dec, l) 146 if err != nil { 147 if err != io.EOF { 148 logWatcher.Err <- err 149 } 150 return 151 } 152 if !since.IsZero() && msg.Timestamp.Before(since) { 153 continue 154 } 155 select { 156 case <-logWatcher.WatchClose(): 157 return 158 case logWatcher.Msg <- msg: 159 } 160 } 161 } 162 163 func watchFile(name string) (filenotify.FileWatcher, error) { 164 fileWatcher, err := filenotify.New() 165 if err != nil { 166 return nil, err 167 } 168 169 if err := fileWatcher.Add(name); err != nil { 170 logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) 171 fileWatcher.Close() 172 fileWatcher = filenotify.NewPollingWatcher() 173 174 if err := fileWatcher.Add(name); err != nil { 175 fileWatcher.Close() 176 logrus.Debugf("error watching log file for modifications: %v", err) 177 return nil, err 178 } 179 } 180 return fileWatcher, nil 181 } 182 183 func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { 184 dec := json.NewDecoder(f) 185 l := &jsonlog.JSONLog{} 186 187 name := f.Name() 188 fileWatcher, err := watchFile(name) 189 if err != nil { 190 logWatcher.Err <- err 191 return 192 } 193 defer func() { 194 f.Close() 195 fileWatcher.Remove(name) 196 fileWatcher.Close() 197 }() 198 199 ctx, cancel := context.WithCancel(context.Background()) 200 defer cancel() 201 go func() { 202 select { 203 case <-logWatcher.WatchClose(): 204 fileWatcher.Remove(name) 205 cancel() 206 case <-ctx.Done(): 207 return 208 } 209 }() 210 211 var retries int 212 handleRotate := func() error { 213 f.Close() 214 fileWatcher.Remove(name) 215 216 // retry when the file doesn't exist 217 for retries := 0; retries <= 5; retries++ { 218 f, err = os.Open(name) 219 if err == nil || !os.IsNotExist(err) { 220 break 221 } 222 } 223 if err != nil { 224 return err 225 } 226 if err := fileWatcher.Add(name); err != nil { 227 return err 228 } 229 dec = json.NewDecoder(f) 230 return nil 231 } 232 233 errRetry := errors.New("retry") 234 errDone := errors.New("done") 235 waitRead := func() error { 236 select { 237 case e := <-fileWatcher.Events(): 238 switch e.Op { 239 case fsnotify.Write: 240 dec = json.NewDecoder(f) 241 return nil 242 case fsnotify.Rename, fsnotify.Remove: 243 select { 244 case <-notifyRotate: 245 case <-ctx.Done(): 246 return errDone 247 } 248 if err := handleRotate(); err != nil { 249 return err 250 } 251 return nil 252 } 253 return errRetry 254 case err := <-fileWatcher.Errors(): 255 logrus.Debug("logger got error watching file: %v", err) 256 // Something happened, let's try and stay alive and create a new watcher 257 if retries <= 5 { 258 fileWatcher.Close() 259 fileWatcher, err = watchFile(name) 260 if err != nil { 261 return err 262 } 263 retries++ 264 return errRetry 265 } 266 return err 267 case <-ctx.Done(): 268 return errDone 269 } 270 } 271 272 handleDecodeErr := func(err error) error { 273 if err == io.EOF { 274 for { 275 err := waitRead() 276 if err == nil { 277 break 278 } 279 if err == errRetry { 280 continue 281 } 282 return err 283 } 284 return nil 285 } 286 // try again because this shouldn't happen 287 if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { 288 dec = json.NewDecoder(f) 289 retries++ 290 return nil 291 } 292 // io.ErrUnexpectedEOF is returned from json.Decoder when there is 293 // remaining data in the parser's buffer while an io.EOF occurs. 294 // If the json logger writes a partial json log entry to the disk 295 // while at the same time the decoder tries to decode it, the race condition happens. 296 if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { 297 reader := io.MultiReader(dec.Buffered(), f) 298 dec = json.NewDecoder(reader) 299 retries++ 300 return nil 301 } 302 return err 303 } 304 305 // main loop 306 for { 307 msg, err := decodeLogLine(dec, l) 308 if err != nil { 309 if err := handleDecodeErr(err); err != nil { 310 if err == errDone { 311 return 312 } 313 // we got an unrecoverable error, so return 314 logWatcher.Err <- err 315 return 316 } 317 // ready to try again 318 continue 319 } 320 321 retries = 0 // reset retries since we've succeeded 322 if !since.IsZero() && msg.Timestamp.Before(since) { 323 continue 324 } 325 select { 326 case logWatcher.Msg <- msg: 327 case <-ctx.Done(): 328 logWatcher.Msg <- msg 329 for { 330 msg, err := decodeLogLine(dec, l) 331 if err != nil { 332 return 333 } 334 if !since.IsZero() && msg.Timestamp.Before(since) { 335 continue 336 } 337 logWatcher.Msg <- msg 338 } 339 } 340 } 341 }