github.com/kobeld/docker@v1.12.0-rc1/daemon/logger/jsonfilelog/read.go (about) 1 package jsonfilelog 2 3 import ( 4 "bytes" 5 "encoding/json" 6 "fmt" 7 "io" 8 "os" 9 "time" 10 11 "github.com/Sirupsen/logrus" 12 "github.com/docker/docker/daemon/logger" 13 "github.com/docker/docker/pkg/filenotify" 14 "github.com/docker/docker/pkg/ioutils" 15 "github.com/docker/docker/pkg/jsonlog" 16 "github.com/docker/docker/pkg/tailfile" 17 ) 18 19 const maxJSONDecodeRetry = 20000 20 21 func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { 22 l.Reset() 23 if err := dec.Decode(l); err != nil { 24 return nil, err 25 } 26 msg := &logger.Message{ 27 Source: l.Stream, 28 Timestamp: l.Created, 29 Line: []byte(l.Log), 30 Attrs: l.Attrs, 31 } 32 return msg, nil 33 } 34 35 // ReadLogs implements the logger's LogReader interface for the logs 36 // created by this driver. 37 func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { 38 logWatcher := logger.NewLogWatcher() 39 40 go l.readLogs(logWatcher, config) 41 return logWatcher 42 } 43 44 func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { 45 defer close(logWatcher.Msg) 46 47 pth := l.writer.LogPath() 48 var files []io.ReadSeeker 49 for i := l.writer.MaxFiles(); i > 1; i-- { 50 f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) 51 if err != nil { 52 if !os.IsNotExist(err) { 53 logWatcher.Err <- err 54 break 55 } 56 continue 57 } 58 files = append(files, f) 59 } 60 61 latestFile, err := os.Open(pth) 62 if err != nil { 63 logWatcher.Err <- err 64 return 65 } 66 67 if config.Tail != 0 { 68 tailer := ioutils.MultiReadSeeker(append(files, latestFile)...) 69 tailFile(tailer, logWatcher, config.Tail, config.Since) 70 } 71 72 // close all the rotated files 73 for _, f := range files { 74 if err := f.(io.Closer).Close(); err != nil { 75 logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) 76 } 77 } 78 79 if !config.Follow { 80 return 81 } 82 83 if config.Tail >= 0 { 84 latestFile.Seek(0, os.SEEK_END) 85 } 86 87 l.mu.Lock() 88 l.readers[logWatcher] = struct{}{} 89 l.mu.Unlock() 90 91 notifyRotate := l.writer.NotifyRotate() 92 followLogs(latestFile, logWatcher, notifyRotate, config.Since) 93 94 l.mu.Lock() 95 delete(l.readers, logWatcher) 96 l.mu.Unlock() 97 98 l.writer.NotifyRotateEvict(notifyRotate) 99 } 100 101 func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { 102 var rdr io.Reader = f 103 if tail > 0 { 104 ls, err := tailfile.TailFile(f, tail) 105 if err != nil { 106 logWatcher.Err <- err 107 return 108 } 109 rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) 110 } 111 dec := json.NewDecoder(rdr) 112 l := &jsonlog.JSONLog{} 113 for { 114 msg, err := decodeLogLine(dec, l) 115 if err != nil { 116 if err != io.EOF { 117 logWatcher.Err <- err 118 } 119 return 120 } 121 if !since.IsZero() && msg.Timestamp.Before(since) { 122 continue 123 } 124 logWatcher.Msg <- msg 125 } 126 } 127 128 func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { 129 dec := json.NewDecoder(f) 130 l := &jsonlog.JSONLog{} 131 132 fileWatcher, err := filenotify.New() 133 if err != nil { 134 logWatcher.Err <- err 135 } 136 defer func() { 137 f.Close() 138 fileWatcher.Close() 139 }() 140 name := f.Name() 141 142 if err := fileWatcher.Add(name); err != nil { 143 logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) 144 fileWatcher.Close() 145 fileWatcher = filenotify.NewPollingWatcher() 146 147 if err := fileWatcher.Add(name); err != nil { 148 logrus.Debugf("error watching log file for modifications: %v", err) 149 logWatcher.Err <- err 150 return 151 } 152 } 153 154 var retries int 155 for { 156 msg, err := decodeLogLine(dec, l) 157 if err != nil { 158 if err != io.EOF { 159 // try again because this shouldn't happen 160 if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { 161 dec = json.NewDecoder(f) 162 retries++ 163 continue 164 } 165 166 // io.ErrUnexpectedEOF is returned from json.Decoder when there is 167 // remaining data in the parser's buffer while an io.EOF occurs. 168 // If the json logger writes a partial json log entry to the disk 169 // while at the same time the decoder tries to decode it, the race condition happens. 170 if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { 171 reader := io.MultiReader(dec.Buffered(), f) 172 dec = json.NewDecoder(reader) 173 retries++ 174 continue 175 } 176 177 return 178 } 179 180 select { 181 case <-fileWatcher.Events(): 182 dec = json.NewDecoder(f) 183 continue 184 case <-fileWatcher.Errors(): 185 logWatcher.Err <- err 186 return 187 case <-logWatcher.WatchClose(): 188 fileWatcher.Remove(name) 189 return 190 case <-notifyRotate: 191 f.Close() 192 fileWatcher.Remove(name) 193 194 // retry when the file doesn't exist 195 for retries := 0; retries <= 5; retries++ { 196 f, err = os.Open(name) 197 if err == nil || !os.IsNotExist(err) { 198 break 199 } 200 } 201 202 if err = fileWatcher.Add(name); err != nil { 203 logWatcher.Err <- err 204 return 205 } 206 if err != nil { 207 logWatcher.Err <- err 208 return 209 } 210 211 dec = json.NewDecoder(f) 212 continue 213 } 214 } 215 216 retries = 0 // reset retries since we've succeeded 217 if !since.IsZero() && msg.Timestamp.Before(since) { 218 continue 219 } 220 select { 221 case logWatcher.Msg <- msg: 222 case <-logWatcher.WatchClose(): 223 logWatcher.Msg <- msg 224 for { 225 msg, err := decodeLogLine(dec, l) 226 if err != nil { 227 return 228 } 229 if !since.IsZero() && msg.Timestamp.Before(since) { 230 continue 231 } 232 logWatcher.Msg <- msg 233 } 234 } 235 } 236 }