github.com/projectatomic/docker@v1.8.2/daemon/logger/jsonfilelog/jsonfilelog.go (about) 1 package jsonfilelog 2 3 import ( 4 "bytes" 5 "encoding/json" 6 "fmt" 7 "io" 8 "os" 9 "strconv" 10 "sync" 11 "time" 12 13 "gopkg.in/fsnotify.v1" 14 15 "github.com/Sirupsen/logrus" 16 "github.com/docker/docker/daemon/logger" 17 "github.com/docker/docker/pkg/ioutils" 18 "github.com/docker/docker/pkg/jsonlog" 19 "github.com/docker/docker/pkg/pubsub" 20 "github.com/docker/docker/pkg/tailfile" 21 "github.com/docker/docker/pkg/timeutils" 22 "github.com/docker/docker/pkg/units" 23 ) 24 25 const ( 26 Name = "json-file" 27 maxJSONDecodeRetry = 10 28 ) 29 30 // JSONFileLogger is Logger implementation for default docker logging: 31 // JSON objects to file 32 type JSONFileLogger struct { 33 buf *bytes.Buffer 34 f *os.File // store for closing 35 mu sync.Mutex // protects buffer 36 capacity int64 //maximum size of each file 37 n int //maximum number of files 38 ctx logger.Context 39 readers map[*logger.LogWatcher]struct{} // stores the active log followers 40 notifyRotate *pubsub.Publisher 41 } 42 43 func init() { 44 if err := logger.RegisterLogDriver(Name, New); err != nil { 45 logrus.Fatal(err) 46 } 47 if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { 48 logrus.Fatal(err) 49 } 50 } 51 52 // New creates new JSONFileLogger which writes to filename 53 func New(ctx logger.Context) (logger.Logger, error) { 54 log, err := os.OpenFile(ctx.LogPath, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) 55 if err != nil { 56 return nil, err 57 } 58 var capval int64 = -1 59 if capacity, ok := ctx.Config["max-size"]; ok { 60 var err error 61 capval, err = units.FromHumanSize(capacity) 62 if err != nil { 63 return nil, err 64 } 65 } 66 var maxFiles int = 1 67 if maxFileString, ok := ctx.Config["max-file"]; ok { 68 maxFiles, err = strconv.Atoi(maxFileString) 69 if err != nil { 70 return nil, err 71 } 72 if maxFiles < 1 { 73 return nil, fmt.Errorf("max-files cannot be less than 1.") 74 } 75 } 76 return &JSONFileLogger{ 77 f: log, 78 buf: bytes.NewBuffer(nil), 79 ctx: ctx, 80 capacity: capval, 81 n: maxFiles, 82 readers: make(map[*logger.LogWatcher]struct{}), 83 notifyRotate: pubsub.NewPublisher(0, 1), 84 }, nil 85 } 86 87 // Log converts logger.Message to jsonlog.JSONLog and serializes it to file 88 func (l *JSONFileLogger) Log(msg *logger.Message) error { 89 l.mu.Lock() 90 defer l.mu.Unlock() 91 92 timestamp, err := timeutils.FastMarshalJSON(msg.Timestamp) 93 if err != nil { 94 return err 95 } 96 err = (&jsonlog.JSONLogBytes{Log: append(msg.Line, '\n'), Stream: msg.Source, Created: timestamp}).MarshalJSONBuf(l.buf) 97 if err != nil { 98 return err 99 } 100 l.buf.WriteByte('\n') 101 _, err = writeLog(l) 102 return err 103 } 104 105 func writeLog(l *JSONFileLogger) (int64, error) { 106 if l.capacity == -1 { 107 return writeToBuf(l) 108 } 109 meta, err := l.f.Stat() 110 if err != nil { 111 return -1, err 112 } 113 if meta.Size() >= l.capacity { 114 name := l.f.Name() 115 if err := l.f.Close(); err != nil { 116 return -1, err 117 } 118 if err := rotate(name, l.n); err != nil { 119 return -1, err 120 } 121 file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666) 122 if err != nil { 123 return -1, err 124 } 125 l.f = file 126 l.notifyRotate.Publish(struct{}{}) 127 } 128 return writeToBuf(l) 129 } 130 131 func writeToBuf(l *JSONFileLogger) (int64, error) { 132 i, err := l.buf.WriteTo(l.f) 133 if err != nil { 134 l.buf = bytes.NewBuffer(nil) 135 } 136 return i, err 137 } 138 139 func rotate(name string, n int) error { 140 if n < 2 { 141 return nil 142 } 143 for i := n - 1; i > 1; i-- { 144 oldFile := name + "." + strconv.Itoa(i) 145 replacingFile := name + "." + strconv.Itoa(i-1) 146 if err := backup(oldFile, replacingFile); err != nil { 147 return err 148 } 149 } 150 if err := backup(name+".1", name); err != nil { 151 return err 152 } 153 return nil 154 } 155 156 func backup(old, curr string) error { 157 if _, err := os.Stat(old); !os.IsNotExist(err) { 158 err := os.Remove(old) 159 if err != nil { 160 return err 161 } 162 } 163 if _, err := os.Stat(curr); os.IsNotExist(err) { 164 f, err := os.Create(curr) 165 if err != nil { 166 return err 167 } 168 f.Close() 169 } 170 return os.Rename(curr, old) 171 } 172 173 func ValidateLogOpt(cfg map[string]string) error { 174 for key := range cfg { 175 switch key { 176 case "max-file": 177 case "max-size": 178 default: 179 return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) 180 } 181 } 182 return nil 183 } 184 185 func (l *JSONFileLogger) LogPath() string { 186 return l.ctx.LogPath 187 } 188 189 // Close closes underlying file and signals all readers to stop 190 func (l *JSONFileLogger) Close() error { 191 l.mu.Lock() 192 err := l.f.Close() 193 for r := range l.readers { 194 r.Close() 195 delete(l.readers, r) 196 } 197 l.mu.Unlock() 198 return err 199 } 200 201 // Name returns name of this logger 202 func (l *JSONFileLogger) Name() string { 203 return Name 204 } 205 206 func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { 207 l.Reset() 208 if err := dec.Decode(l); err != nil { 209 return nil, err 210 } 211 msg := &logger.Message{ 212 Source: l.Stream, 213 Timestamp: l.Created, 214 Line: []byte(l.Log), 215 } 216 return msg, nil 217 } 218 219 // Reads from the log file 220 func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { 221 logWatcher := logger.NewLogWatcher() 222 223 go l.readLogs(logWatcher, config) 224 return logWatcher 225 } 226 227 func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { 228 defer close(logWatcher.Msg) 229 230 pth := l.ctx.LogPath 231 var files []io.ReadSeeker 232 for i := l.n; i > 1; i-- { 233 f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) 234 if err != nil { 235 if !os.IsNotExist(err) { 236 logWatcher.Err <- err 237 break 238 } 239 continue 240 } 241 defer f.Close() 242 files = append(files, f) 243 } 244 245 latestFile, err := os.Open(pth) 246 if err != nil { 247 logWatcher.Err <- err 248 return 249 } 250 defer latestFile.Close() 251 252 files = append(files, latestFile) 253 tailer := ioutils.MultiReadSeeker(files...) 254 255 if config.Tail != 0 { 256 tailFile(tailer, logWatcher, config.Tail, config.Since) 257 } 258 259 if !config.Follow { 260 return 261 } 262 263 if config.Tail >= 0 { 264 latestFile.Seek(0, os.SEEK_END) 265 } 266 267 l.mu.Lock() 268 l.readers[logWatcher] = struct{}{} 269 l.mu.Unlock() 270 271 notifyRotate := l.notifyRotate.Subscribe() 272 followLogs(latestFile, logWatcher, notifyRotate, config.Since) 273 274 l.mu.Lock() 275 delete(l.readers, logWatcher) 276 l.mu.Unlock() 277 278 l.notifyRotate.Evict(notifyRotate) 279 } 280 281 func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { 282 var rdr io.Reader = f 283 if tail > 0 { 284 ls, err := tailfile.TailFile(f, tail) 285 if err != nil { 286 logWatcher.Err <- err 287 return 288 } 289 rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) 290 } 291 dec := json.NewDecoder(rdr) 292 l := &jsonlog.JSONLog{} 293 for { 294 msg, err := decodeLogLine(dec, l) 295 if err != nil { 296 if err != io.EOF { 297 logWatcher.Err <- err 298 } 299 return 300 } 301 if !since.IsZero() && msg.Timestamp.Before(since) { 302 continue 303 } 304 logWatcher.Msg <- msg 305 } 306 } 307 308 func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { 309 dec := json.NewDecoder(f) 310 l := &jsonlog.JSONLog{} 311 fileWatcher, err := fsnotify.NewWatcher() 312 if err != nil { 313 logWatcher.Err <- err 314 return 315 } 316 defer fileWatcher.Close() 317 if err := fileWatcher.Add(f.Name()); err != nil { 318 logWatcher.Err <- err 319 return 320 } 321 322 var retries int 323 for { 324 msg, err := decodeLogLine(dec, l) 325 if err != nil { 326 if err != io.EOF { 327 // try again because this shouldn't happen 328 if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { 329 dec = json.NewDecoder(f) 330 retries += 1 331 continue 332 } 333 logWatcher.Err <- err 334 return 335 } 336 337 select { 338 case <-fileWatcher.Events: 339 dec = json.NewDecoder(f) 340 continue 341 case <-fileWatcher.Errors: 342 logWatcher.Err <- err 343 return 344 case <-logWatcher.WatchClose(): 345 return 346 case <-notifyRotate: 347 fileWatcher.Remove(f.Name()) 348 349 f, err = os.Open(f.Name()) 350 if err != nil { 351 logWatcher.Err <- err 352 return 353 } 354 if err := fileWatcher.Add(f.Name()); err != nil { 355 logWatcher.Err <- err 356 } 357 dec = json.NewDecoder(f) 358 continue 359 } 360 } 361 362 retries = 0 // reset retries since we've succeeded 363 if !since.IsZero() && msg.Timestamp.Before(since) { 364 continue 365 } 366 select { 367 case logWatcher.Msg <- msg: 368 case <-logWatcher.WatchClose(): 369 logWatcher.Msg <- msg 370 for { 371 msg, err := decodeLogLine(dec, l) 372 if err != nil { 373 return 374 } 375 if !since.IsZero() && msg.Timestamp.Before(since) { 376 continue 377 } 378 logWatcher.Msg <- msg 379 } 380 } 381 } 382 }