github.com/mheon/docker@v0.11.2-0.20150922122814-44f47903a831/daemon/logger/jsonfilelog/jsonfilelog.go (about) 1 // Package jsonfilelog provides the default Logger implementation for 2 // Docker logging. This logger logs to files on the host server in the 3 // JSON format. 4 package jsonfilelog 5 6 import ( 7 "bytes" 8 "encoding/json" 9 "fmt" 10 "io" 11 "os" 12 "strconv" 13 "sync" 14 "time" 15 16 "gopkg.in/fsnotify.v1" 17 18 "github.com/Sirupsen/logrus" 19 "github.com/docker/docker/daemon/logger" 20 "github.com/docker/docker/pkg/ioutils" 21 "github.com/docker/docker/pkg/jsonlog" 22 "github.com/docker/docker/pkg/pubsub" 23 "github.com/docker/docker/pkg/tailfile" 24 "github.com/docker/docker/pkg/timeutils" 25 "github.com/docker/docker/pkg/units" 26 ) 27 28 const ( 29 // Name is the name of the file that the jsonlogger logs to. 30 Name = "json-file" 31 maxJSONDecodeRetry = 10 32 ) 33 34 // JSONFileLogger is Logger implementation for default Docker logging. 35 type JSONFileLogger struct { 36 buf *bytes.Buffer 37 f *os.File // store for closing 38 mu sync.Mutex // protects buffer 39 capacity int64 //maximum size of each file 40 n int //maximum number of files 41 ctx logger.Context 42 readers map[*logger.LogWatcher]struct{} // stores the active log followers 43 notifyRotate *pubsub.Publisher 44 } 45 46 func init() { 47 if err := logger.RegisterLogDriver(Name, New); err != nil { 48 logrus.Fatal(err) 49 } 50 if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { 51 logrus.Fatal(err) 52 } 53 } 54 55 // New creates new JSONFileLogger which writes to filename passed in 56 // on given context. 57 func New(ctx logger.Context) (logger.Logger, error) { 58 log, err := os.OpenFile(ctx.LogPath, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) 59 if err != nil { 60 return nil, err 61 } 62 var capval int64 = -1 63 if capacity, ok := ctx.Config["max-size"]; ok { 64 var err error 65 capval, err = units.FromHumanSize(capacity) 66 if err != nil { 67 return nil, err 68 } 69 } 70 var maxFiles = 1 71 if maxFileString, ok := ctx.Config["max-file"]; ok { 72 maxFiles, err = strconv.Atoi(maxFileString) 73 if err != nil { 74 return nil, err 75 } 76 if maxFiles < 1 { 77 return nil, fmt.Errorf("max-file cannot be less than 1") 78 } 79 } 80 return &JSONFileLogger{ 81 f: log, 82 buf: bytes.NewBuffer(nil), 83 ctx: ctx, 84 capacity: capval, 85 n: maxFiles, 86 readers: make(map[*logger.LogWatcher]struct{}), 87 notifyRotate: pubsub.NewPublisher(0, 1), 88 }, nil 89 } 90 91 // Log converts logger.Message to jsonlog.JSONLog and serializes it to file. 92 func (l *JSONFileLogger) Log(msg *logger.Message) error { 93 l.mu.Lock() 94 defer l.mu.Unlock() 95 96 timestamp, err := timeutils.FastMarshalJSON(msg.Timestamp) 97 if err != nil { 98 return err 99 } 100 err = (&jsonlog.JSONLogs{Log: append(msg.Line, '\n'), Stream: msg.Source, Created: timestamp}).MarshalJSONBuf(l.buf) 101 if err != nil { 102 return err 103 } 104 l.buf.WriteByte('\n') 105 _, err = writeLog(l) 106 return err 107 } 108 109 func writeLog(l *JSONFileLogger) (int64, error) { 110 if l.capacity == -1 { 111 return writeToBuf(l) 112 } 113 meta, err := l.f.Stat() 114 if err != nil { 115 return -1, err 116 } 117 if meta.Size() >= l.capacity { 118 name := l.f.Name() 119 if err := l.f.Close(); err != nil { 120 return -1, err 121 } 122 if err := rotate(name, l.n); err != nil { 123 return -1, err 124 } 125 file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666) 126 if err != nil { 127 return -1, err 128 } 129 l.f = file 130 l.notifyRotate.Publish(struct{}{}) 131 } 132 return writeToBuf(l) 133 } 134 135 func writeToBuf(l *JSONFileLogger) (int64, error) { 136 i, err := l.buf.WriteTo(l.f) 137 if err != nil { 138 l.buf = bytes.NewBuffer(nil) 139 } 140 return i, err 141 } 142 143 func rotate(name string, n int) error { 144 if n < 2 { 145 return nil 146 } 147 for i := n - 1; i > 1; i-- { 148 oldFile := name + "." + strconv.Itoa(i) 149 replacingFile := name + "." + strconv.Itoa(i-1) 150 if err := backup(oldFile, replacingFile); err != nil { 151 return err 152 } 153 } 154 if err := backup(name+".1", name); err != nil { 155 return err 156 } 157 return nil 158 } 159 160 // backup renames a file from curr to old, creating an empty file curr if it does not exist. 161 func backup(old, curr string) error { 162 if _, err := os.Stat(old); !os.IsNotExist(err) { 163 err := os.Remove(old) 164 if err != nil { 165 return err 166 } 167 } 168 if _, err := os.Stat(curr); os.IsNotExist(err) { 169 f, err := os.Create(curr) 170 if err != nil { 171 return err 172 } 173 f.Close() 174 } 175 return os.Rename(curr, old) 176 } 177 178 // ValidateLogOpt looks for json specific log options max-file & max-size. 179 func ValidateLogOpt(cfg map[string]string) error { 180 for key := range cfg { 181 switch key { 182 case "max-file": 183 case "max-size": 184 default: 185 return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) 186 } 187 } 188 return nil 189 } 190 191 // LogPath returns the location the given json logger logs to. 192 func (l *JSONFileLogger) LogPath() string { 193 return l.ctx.LogPath 194 } 195 196 // Close closes underlying file and signals all readers to stop. 197 func (l *JSONFileLogger) Close() error { 198 l.mu.Lock() 199 err := l.f.Close() 200 for r := range l.readers { 201 r.Close() 202 delete(l.readers, r) 203 } 204 l.mu.Unlock() 205 return err 206 } 207 208 // Name returns name of this logger. 209 func (l *JSONFileLogger) Name() string { 210 return Name 211 } 212 213 func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { 214 l.Reset() 215 if err := dec.Decode(l); err != nil { 216 return nil, err 217 } 218 msg := &logger.Message{ 219 Source: l.Stream, 220 Timestamp: l.Created, 221 Line: []byte(l.Log), 222 } 223 return msg, nil 224 } 225 226 // ReadLogs implements the logger's LogReader interface for the logs 227 // created by this driver. 228 func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { 229 logWatcher := logger.NewLogWatcher() 230 231 go l.readLogs(logWatcher, config) 232 return logWatcher 233 } 234 235 func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { 236 defer close(logWatcher.Msg) 237 238 pth := l.ctx.LogPath 239 var files []io.ReadSeeker 240 for i := l.n; i > 1; i-- { 241 f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) 242 if err != nil { 243 if !os.IsNotExist(err) { 244 logWatcher.Err <- err 245 break 246 } 247 continue 248 } 249 defer f.Close() 250 files = append(files, f) 251 } 252 253 latestFile, err := os.Open(pth) 254 if err != nil { 255 logWatcher.Err <- err 256 return 257 } 258 defer latestFile.Close() 259 260 files = append(files, latestFile) 261 tailer := ioutils.MultiReadSeeker(files...) 262 263 if config.Tail != 0 { 264 tailFile(tailer, logWatcher, config.Tail, config.Since) 265 } 266 267 if !config.Follow { 268 return 269 } 270 271 if config.Tail >= 0 { 272 latestFile.Seek(0, os.SEEK_END) 273 } 274 275 l.mu.Lock() 276 l.readers[logWatcher] = struct{}{} 277 l.mu.Unlock() 278 279 notifyRotate := l.notifyRotate.Subscribe() 280 followLogs(latestFile, logWatcher, notifyRotate, config.Since) 281 282 l.mu.Lock() 283 delete(l.readers, logWatcher) 284 l.mu.Unlock() 285 286 l.notifyRotate.Evict(notifyRotate) 287 } 288 289 func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { 290 var rdr io.Reader = f 291 if tail > 0 { 292 ls, err := tailfile.TailFile(f, tail) 293 if err != nil { 294 logWatcher.Err <- err 295 return 296 } 297 rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) 298 } 299 dec := json.NewDecoder(rdr) 300 l := &jsonlog.JSONLog{} 301 for { 302 msg, err := decodeLogLine(dec, l) 303 if err != nil { 304 if err != io.EOF { 305 logWatcher.Err <- err 306 } 307 return 308 } 309 if !since.IsZero() && msg.Timestamp.Before(since) { 310 continue 311 } 312 logWatcher.Msg <- msg 313 } 314 } 315 316 func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { 317 dec := json.NewDecoder(f) 318 l := &jsonlog.JSONLog{} 319 fileWatcher, err := fsnotify.NewWatcher() 320 if err != nil { 321 logWatcher.Err <- err 322 return 323 } 324 defer fileWatcher.Close() 325 if err := fileWatcher.Add(f.Name()); err != nil { 326 logWatcher.Err <- err 327 return 328 } 329 330 var retries int 331 for { 332 msg, err := decodeLogLine(dec, l) 333 if err != nil { 334 if err != io.EOF { 335 // try again because this shouldn't happen 336 if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { 337 dec = json.NewDecoder(f) 338 retries++ 339 continue 340 } 341 logWatcher.Err <- err 342 return 343 } 344 345 select { 346 case <-fileWatcher.Events: 347 dec = json.NewDecoder(f) 348 continue 349 case <-fileWatcher.Errors: 350 logWatcher.Err <- err 351 return 352 case <-logWatcher.WatchClose(): 353 return 354 case <-notifyRotate: 355 fileWatcher.Remove(f.Name()) 356 357 f, err = os.Open(f.Name()) 358 if err != nil { 359 logWatcher.Err <- err 360 return 361 } 362 if err := fileWatcher.Add(f.Name()); err != nil { 363 logWatcher.Err <- err 364 } 365 dec = json.NewDecoder(f) 366 continue 367 } 368 } 369 370 retries = 0 // reset retries since we've succeeded 371 if !since.IsZero() && msg.Timestamp.Before(since) { 372 continue 373 } 374 select { 375 case logWatcher.Msg <- msg: 376 case <-logWatcher.WatchClose(): 377 logWatcher.Msg <- msg 378 for { 379 msg, err := decodeLogLine(dec, l) 380 if err != nil { 381 return 382 } 383 if !since.IsZero() && msg.Timestamp.Before(since) { 384 continue 385 } 386 logWatcher.Msg <- msg 387 } 388 } 389 } 390 }