github.com/portworx/docker@v1.12.1/daemon/logger/jsonfilelog/read.go (about) 1 package jsonfilelog 2 3 import ( 4 "bytes" 5 "encoding/json" 6 "fmt" 7 "io" 8 "os" 9 "time" 10 11 "github.com/Sirupsen/logrus" 12 "github.com/docker/docker/daemon/logger" 13 "github.com/docker/docker/pkg/filenotify" 14 "github.com/docker/docker/pkg/ioutils" 15 "github.com/docker/docker/pkg/jsonlog" 16 "github.com/docker/docker/pkg/tailfile" 17 ) 18 19 const maxJSONDecodeRetry = 20000 20 21 func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { 22 l.Reset() 23 if err := dec.Decode(l); err != nil { 24 return nil, err 25 } 26 msg := &logger.Message{ 27 Source: l.Stream, 28 Timestamp: l.Created, 29 Line: []byte(l.Log), 30 Attrs: l.Attrs, 31 } 32 return msg, nil 33 } 34 35 // ReadLogs implements the logger's LogReader interface for the logs 36 // created by this driver. 37 func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { 38 logWatcher := logger.NewLogWatcher() 39 40 go l.readLogs(logWatcher, config) 41 return logWatcher 42 } 43 44 func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { 45 defer close(logWatcher.Msg) 46 47 pth := l.writer.LogPath() 48 var files []io.ReadSeeker 49 for i := l.writer.MaxFiles(); i > 1; i-- { 50 f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) 51 if err != nil { 52 if !os.IsNotExist(err) { 53 logWatcher.Err <- err 54 break 55 } 56 continue 57 } 58 files = append(files, f) 59 } 60 61 latestFile, err := os.Open(pth) 62 if err != nil { 63 logWatcher.Err <- err 64 return 65 } 66 67 if config.Tail != 0 { 68 tailer := ioutils.MultiReadSeeker(append(files, latestFile)...) 69 tailFile(tailer, logWatcher, config.Tail, config.Since) 70 } 71 72 // close all the rotated files 73 for _, f := range files { 74 if err := f.(io.Closer).Close(); err != nil { 75 logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) 76 } 77 } 78 79 if !config.Follow { 80 if err := latestFile.Close(); err != nil { 81 logrus.Errorf("Error closing file: %v", err) 82 } 83 return 84 } 85 86 if config.Tail >= 0 { 87 latestFile.Seek(0, os.SEEK_END) 88 } 89 90 l.mu.Lock() 91 l.readers[logWatcher] = struct{}{} 92 l.mu.Unlock() 93 94 notifyRotate := l.writer.NotifyRotate() 95 followLogs(latestFile, logWatcher, notifyRotate, config.Since) 96 97 l.mu.Lock() 98 delete(l.readers, logWatcher) 99 l.mu.Unlock() 100 101 l.writer.NotifyRotateEvict(notifyRotate) 102 } 103 104 func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { 105 var rdr io.Reader = f 106 if tail > 0 { 107 ls, err := tailfile.TailFile(f, tail) 108 if err != nil { 109 logWatcher.Err <- err 110 return 111 } 112 rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) 113 } 114 dec := json.NewDecoder(rdr) 115 l := &jsonlog.JSONLog{} 116 for { 117 msg, err := decodeLogLine(dec, l) 118 if err != nil { 119 if err != io.EOF { 120 logWatcher.Err <- err 121 } 122 return 123 } 124 if !since.IsZero() && msg.Timestamp.Before(since) { 125 continue 126 } 127 logWatcher.Msg <- msg 128 } 129 } 130 131 func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { 132 dec := json.NewDecoder(f) 133 l := &jsonlog.JSONLog{} 134 135 fileWatcher, err := filenotify.New() 136 if err != nil { 137 logWatcher.Err <- err 138 } 139 defer func() { 140 f.Close() 141 fileWatcher.Close() 142 }() 143 name := f.Name() 144 145 if err := fileWatcher.Add(name); err != nil { 146 logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) 147 fileWatcher.Close() 148 fileWatcher = filenotify.NewPollingWatcher() 149 150 if err := fileWatcher.Add(name); err != nil { 151 logrus.Debugf("error watching log file for modifications: %v", err) 152 logWatcher.Err <- err 153 return 154 } 155 } 156 157 var retries int 158 for { 159 msg, err := decodeLogLine(dec, l) 160 if err != nil { 161 if err != io.EOF { 162 // try again because this shouldn't happen 163 if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { 164 dec = json.NewDecoder(f) 165 retries++ 166 continue 167 } 168 169 // io.ErrUnexpectedEOF is returned from json.Decoder when there is 170 // remaining data in the parser's buffer while an io.EOF occurs. 171 // If the json logger writes a partial json log entry to the disk 172 // while at the same time the decoder tries to decode it, the race condition happens. 173 if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { 174 reader := io.MultiReader(dec.Buffered(), f) 175 dec = json.NewDecoder(reader) 176 retries++ 177 continue 178 } 179 180 return 181 } 182 183 select { 184 case <-fileWatcher.Events(): 185 dec = json.NewDecoder(f) 186 continue 187 case <-fileWatcher.Errors(): 188 logWatcher.Err <- err 189 return 190 case <-logWatcher.WatchClose(): 191 fileWatcher.Remove(name) 192 return 193 case <-notifyRotate: 194 f.Close() 195 fileWatcher.Remove(name) 196 197 // retry when the file doesn't exist 198 for retries := 0; retries <= 5; retries++ { 199 f, err = os.Open(name) 200 if err == nil || !os.IsNotExist(err) { 201 break 202 } 203 } 204 205 if err = fileWatcher.Add(name); err != nil { 206 logWatcher.Err <- err 207 return 208 } 209 if err != nil { 210 logWatcher.Err <- err 211 return 212 } 213 214 dec = json.NewDecoder(f) 215 continue 216 } 217 } 218 219 retries = 0 // reset retries since we've succeeded 220 if !since.IsZero() && msg.Timestamp.Before(since) { 221 continue 222 } 223 select { 224 case logWatcher.Msg <- msg: 225 case <-logWatcher.WatchClose(): 226 logWatcher.Msg <- msg 227 for { 228 msg, err := decodeLogLine(dec, l) 229 if err != nil { 230 return 231 } 232 if !since.IsZero() && msg.Timestamp.Before(since) { 233 continue 234 } 235 logWatcher.Msg <- msg 236 } 237 } 238 } 239 }