github.com/devdivbcp/moby@v17.12.0-ce-rc1.0.20200726071732-2d4bfdc789ad+incompatible/daemon/logger/loggerutils/logfile.go (about) 1 package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" 2 3 import ( 4 "compress/gzip" 5 "context" 6 "encoding/json" 7 "fmt" 8 "io" 9 "os" 10 "runtime" 11 "strconv" 12 "strings" 13 "sync" 14 "time" 15 16 "github.com/docker/docker/daemon/logger" 17 "github.com/docker/docker/pkg/filenotify" 18 "github.com/docker/docker/pkg/pools" 19 "github.com/docker/docker/pkg/pubsub" 20 "github.com/fsnotify/fsnotify" 21 "github.com/pkg/errors" 22 "github.com/sirupsen/logrus" 23 ) 24 25 const tmpLogfileSuffix = ".tmp" 26 27 // rotateFileMetadata is a metadata of the gzip header of the compressed log file 28 type rotateFileMetadata struct { 29 LastTime time.Time `json:"lastTime,omitempty"` 30 } 31 32 // refCounter is a counter of logfile being referenced 33 type refCounter struct { 34 mu sync.Mutex 35 counter map[string]int 36 } 37 38 // Reference increase the reference counter for specified logfile 39 func (rc *refCounter) GetReference(fileName string, openRefFile func(fileName string, exists bool) (*os.File, error)) (*os.File, error) { 40 rc.mu.Lock() 41 defer rc.mu.Unlock() 42 43 var ( 44 file *os.File 45 err error 46 ) 47 _, ok := rc.counter[fileName] 48 file, err = openRefFile(fileName, ok) 49 if err != nil { 50 return nil, err 51 } 52 53 if ok { 54 rc.counter[fileName]++ 55 } else if file != nil { 56 rc.counter[file.Name()] = 1 57 } 58 59 return file, nil 60 } 61 62 // Dereference reduce the reference counter for specified logfile 63 func (rc *refCounter) Dereference(fileName string) error { 64 rc.mu.Lock() 65 defer rc.mu.Unlock() 66 67 rc.counter[fileName]-- 68 if rc.counter[fileName] <= 0 { 69 delete(rc.counter, fileName) 70 err := os.Remove(fileName) 71 if err != nil { 72 return err 73 } 74 } 75 return nil 76 } 77 78 // LogFile is Logger implementation for default Docker logging. 79 type LogFile struct { 80 mu sync.RWMutex // protects the logfile access 81 f *os.File // store for closing 82 closed bool 83 rotateMu sync.Mutex // blocks the next rotation until the current rotation is completed 84 capacity int64 // maximum size of each file 85 currentSize int64 // current size of the latest file 86 maxFiles int // maximum number of files 87 compress bool // whether old versions of log files are compressed 88 lastTimestamp time.Time // timestamp of the last log 89 filesRefCounter refCounter // keep reference-counted of decompressed files 90 notifyRotate *pubsub.Publisher 91 marshal logger.MarshalFunc 92 createDecoder makeDecoderFunc 93 getTailReader GetTailReaderFunc 94 perms os.FileMode 95 } 96 97 type makeDecoderFunc func(rdr io.Reader) func() (*logger.Message, error) 98 99 // SizeReaderAt defines a ReaderAt that also reports its size. 100 // This is used for tailing log files. 101 type SizeReaderAt interface { 102 io.ReaderAt 103 Size() int64 104 } 105 106 // GetTailReaderFunc is used to truncate a reader to only read as much as is required 107 // in order to get the passed in number of log lines. 108 // It returns the sectioned reader, the number of lines that the section reader 109 // contains, and any error that occurs. 110 type GetTailReaderFunc func(ctx context.Context, f SizeReaderAt, nLogLines int) (rdr io.Reader, nLines int, err error) 111 112 // NewLogFile creates new LogFile 113 func NewLogFile(logPath string, capacity int64, maxFiles int, compress bool, marshaller logger.MarshalFunc, decodeFunc makeDecoderFunc, perms os.FileMode, getTailReader GetTailReaderFunc) (*LogFile, error) { 114 log, err := openFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, perms) 115 if err != nil { 116 return nil, err 117 } 118 119 size, err := log.Seek(0, os.SEEK_END) 120 if err != nil { 121 return nil, err 122 } 123 124 return &LogFile{ 125 f: log, 126 capacity: capacity, 127 currentSize: size, 128 maxFiles: maxFiles, 129 compress: compress, 130 filesRefCounter: refCounter{counter: make(map[string]int)}, 131 notifyRotate: pubsub.NewPublisher(0, 1), 132 marshal: marshaller, 133 createDecoder: decodeFunc, 134 perms: perms, 135 getTailReader: getTailReader, 136 }, nil 137 } 138 139 // WriteLogEntry writes the provided log message to the current log file. 140 // This may trigger a rotation event if the max file/capacity limits are hit. 141 func (w *LogFile) WriteLogEntry(msg *logger.Message) error { 142 b, err := w.marshal(msg) 143 if err != nil { 144 return errors.Wrap(err, "error marshalling log message") 145 } 146 147 logger.PutMessage(msg) 148 149 w.mu.Lock() 150 if w.closed { 151 w.mu.Unlock() 152 return errors.New("cannot write because the output file was closed") 153 } 154 155 if err := w.checkCapacityAndRotate(); err != nil { 156 w.mu.Unlock() 157 return err 158 } 159 160 n, err := w.f.Write(b) 161 if err == nil { 162 w.currentSize += int64(n) 163 w.lastTimestamp = msg.Timestamp 164 } 165 w.mu.Unlock() 166 return err 167 } 168 169 func (w *LogFile) checkCapacityAndRotate() error { 170 if w.capacity == -1 { 171 return nil 172 } 173 174 if w.currentSize >= w.capacity { 175 w.rotateMu.Lock() 176 fname := w.f.Name() 177 if err := w.f.Close(); err != nil { 178 // if there was an error during a prior rotate, the file could already be closed 179 if !errors.Is(err, os.ErrClosed) { 180 w.rotateMu.Unlock() 181 return errors.Wrap(err, "error closing file") 182 } 183 } 184 if err := rotate(fname, w.maxFiles, w.compress); err != nil { 185 w.rotateMu.Unlock() 186 return err 187 } 188 file, err := openFile(fname, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, w.perms) 189 if err != nil { 190 w.rotateMu.Unlock() 191 return err 192 } 193 w.f = file 194 w.currentSize = 0 195 w.notifyRotate.Publish(struct{}{}) 196 197 if w.maxFiles <= 1 || !w.compress { 198 w.rotateMu.Unlock() 199 return nil 200 } 201 202 go func() { 203 compressFile(fname+".1", w.lastTimestamp) 204 w.rotateMu.Unlock() 205 }() 206 } 207 208 return nil 209 } 210 211 func rotate(name string, maxFiles int, compress bool) error { 212 if maxFiles < 2 { 213 return nil 214 } 215 216 var extension string 217 if compress { 218 extension = ".gz" 219 } 220 221 lastFile := fmt.Sprintf("%s.%d%s", name, maxFiles-1, extension) 222 err := os.Remove(lastFile) 223 if err != nil && !os.IsNotExist(err) { 224 return errors.Wrap(err, "error removing oldest log file") 225 } 226 227 for i := maxFiles - 1; i > 1; i-- { 228 toPath := name + "." + strconv.Itoa(i) + extension 229 fromPath := name + "." + strconv.Itoa(i-1) + extension 230 if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { 231 return err 232 } 233 } 234 235 if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { 236 return err 237 } 238 239 return nil 240 } 241 242 func compressFile(fileName string, lastTimestamp time.Time) { 243 file, err := os.Open(fileName) 244 if err != nil { 245 logrus.Errorf("Failed to open log file: %v", err) 246 return 247 } 248 defer func() { 249 file.Close() 250 err := os.Remove(fileName) 251 if err != nil { 252 logrus.Errorf("Failed to remove source log file: %v", err) 253 } 254 }() 255 256 outFile, err := openFile(fileName+".gz", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0640) 257 if err != nil { 258 logrus.Errorf("Failed to open or create gzip log file: %v", err) 259 return 260 } 261 defer func() { 262 outFile.Close() 263 if err != nil { 264 os.Remove(fileName + ".gz") 265 } 266 }() 267 268 compressWriter := gzip.NewWriter(outFile) 269 defer compressWriter.Close() 270 271 // Add the last log entry timestamp to the gzip header 272 extra := rotateFileMetadata{} 273 extra.LastTime = lastTimestamp 274 compressWriter.Header.Extra, err = json.Marshal(&extra) 275 if err != nil { 276 // Here log the error only and don't return since this is just an optimization. 277 logrus.Warningf("Failed to marshal gzip header as JSON: %v", err) 278 } 279 280 _, err = pools.Copy(compressWriter, file) 281 if err != nil { 282 logrus.WithError(err).WithField("module", "container.logs").WithField("file", fileName).Error("Error compressing log file") 283 return 284 } 285 } 286 287 // MaxFiles return maximum number of files 288 func (w *LogFile) MaxFiles() int { 289 return w.maxFiles 290 } 291 292 // Close closes underlying file and signals all readers to stop. 293 func (w *LogFile) Close() error { 294 w.mu.Lock() 295 defer w.mu.Unlock() 296 if w.closed { 297 return nil 298 } 299 if err := w.f.Close(); err != nil { 300 return err 301 } 302 w.closed = true 303 return nil 304 } 305 306 // ReadLogs decodes entries from log files and sends them the passed in watcher 307 // 308 // Note: Using the follow option can become inconsistent in cases with very frequent rotations and max log files is 1. 309 // TODO: Consider a different implementation which can effectively follow logs under frequent rotations. 310 func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) { 311 w.mu.RLock() 312 currentFile, err := os.Open(w.f.Name()) 313 if err != nil { 314 w.mu.RUnlock() 315 watcher.Err <- err 316 return 317 } 318 defer currentFile.Close() 319 320 currentChunk, err := newSectionReader(currentFile) 321 if err != nil { 322 w.mu.RUnlock() 323 watcher.Err <- err 324 return 325 } 326 327 if config.Tail != 0 { 328 // TODO(@cpuguy83): Instead of opening every file, only get the files which 329 // are needed to tail. 330 // This is especially costly when compression is enabled. 331 files, err := w.openRotatedFiles(config) 332 w.mu.RUnlock() 333 if err != nil { 334 watcher.Err <- err 335 return 336 } 337 338 closeFiles := func() { 339 for _, f := range files { 340 f.Close() 341 fileName := f.Name() 342 if strings.HasSuffix(fileName, tmpLogfileSuffix) { 343 err := w.filesRefCounter.Dereference(fileName) 344 if err != nil { 345 logrus.Errorf("Failed to dereference the log file %q: %v", fileName, err) 346 } 347 } 348 } 349 } 350 351 readers := make([]SizeReaderAt, 0, len(files)+1) 352 for _, f := range files { 353 stat, err := f.Stat() 354 if err != nil { 355 watcher.Err <- errors.Wrap(err, "error reading size of rotated file") 356 closeFiles() 357 return 358 } 359 readers = append(readers, io.NewSectionReader(f, 0, stat.Size())) 360 } 361 if currentChunk.Size() > 0 { 362 readers = append(readers, currentChunk) 363 } 364 365 tailFiles(readers, watcher, w.createDecoder, w.getTailReader, config) 366 closeFiles() 367 368 w.mu.RLock() 369 } 370 371 if !config.Follow || w.closed { 372 w.mu.RUnlock() 373 return 374 } 375 w.mu.RUnlock() 376 377 notifyRotate := w.notifyRotate.Subscribe() 378 defer w.notifyRotate.Evict(notifyRotate) 379 followLogs(currentFile, watcher, notifyRotate, w.createDecoder, config.Since, config.Until) 380 } 381 382 func (w *LogFile) openRotatedFiles(config logger.ReadConfig) (files []*os.File, err error) { 383 w.rotateMu.Lock() 384 defer w.rotateMu.Unlock() 385 386 defer func() { 387 if err == nil { 388 return 389 } 390 for _, f := range files { 391 f.Close() 392 if strings.HasSuffix(f.Name(), tmpLogfileSuffix) { 393 err := os.Remove(f.Name()) 394 if err != nil && !os.IsNotExist(err) { 395 logrus.Warnf("Failed to remove logfile: %v", err) 396 } 397 } 398 } 399 }() 400 401 for i := w.maxFiles; i > 1; i-- { 402 f, err := os.Open(fmt.Sprintf("%s.%d", w.f.Name(), i-1)) 403 if err != nil { 404 if !os.IsNotExist(err) { 405 return nil, errors.Wrap(err, "error opening rotated log file") 406 } 407 408 fileName := fmt.Sprintf("%s.%d.gz", w.f.Name(), i-1) 409 decompressedFileName := fileName + tmpLogfileSuffix 410 tmpFile, err := w.filesRefCounter.GetReference(decompressedFileName, func(refFileName string, exists bool) (*os.File, error) { 411 if exists { 412 return os.Open(refFileName) 413 } 414 return decompressfile(fileName, refFileName, config.Since) 415 }) 416 417 if err != nil { 418 if !os.IsNotExist(errors.Cause(err)) { 419 return nil, errors.Wrap(err, "error getting reference to decompressed log file") 420 } 421 continue 422 } 423 if tmpFile == nil { 424 // The log before `config.Since` does not need to read 425 break 426 } 427 428 files = append(files, tmpFile) 429 continue 430 } 431 files = append(files, f) 432 } 433 434 return files, nil 435 } 436 437 func decompressfile(fileName, destFileName string, since time.Time) (*os.File, error) { 438 cf, err := os.Open(fileName) 439 if err != nil { 440 return nil, errors.Wrap(err, "error opening file for decompression") 441 } 442 defer cf.Close() 443 444 rc, err := gzip.NewReader(cf) 445 if err != nil { 446 return nil, errors.Wrap(err, "error making gzip reader for compressed log file") 447 } 448 defer rc.Close() 449 450 // Extract the last log entry timestramp from the gzip header 451 extra := &rotateFileMetadata{} 452 err = json.Unmarshal(rc.Header.Extra, extra) 453 if err == nil && extra.LastTime.Before(since) { 454 return nil, nil 455 } 456 457 rs, err := openFile(destFileName, os.O_CREATE|os.O_RDWR, 0640) 458 if err != nil { 459 return nil, errors.Wrap(err, "error creating file for copying decompressed log stream") 460 } 461 462 _, err = pools.Copy(rs, rc) 463 if err != nil { 464 rs.Close() 465 rErr := os.Remove(rs.Name()) 466 if rErr != nil && !os.IsNotExist(rErr) { 467 logrus.Errorf("Failed to remove logfile: %v", rErr) 468 } 469 return nil, errors.Wrap(err, "error while copying decompressed log stream to file") 470 } 471 472 return rs, nil 473 } 474 475 func newSectionReader(f *os.File) (*io.SectionReader, error) { 476 // seek to the end to get the size 477 // we'll leave this at the end of the file since section reader does not advance the reader 478 size, err := f.Seek(0, os.SEEK_END) 479 if err != nil { 480 return nil, errors.Wrap(err, "error getting current file size") 481 } 482 return io.NewSectionReader(f, 0, size), nil 483 } 484 485 func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, createDecoder makeDecoderFunc, getTailReader GetTailReaderFunc, config logger.ReadConfig) { 486 nLines := config.Tail 487 488 ctx, cancel := context.WithCancel(context.Background()) 489 defer cancel() 490 // TODO(@cpuguy83): we should plumb a context through instead of dealing with `WatchClose()` here. 491 go func() { 492 select { 493 case <-ctx.Done(): 494 case <-watcher.WatchConsumerGone(): 495 cancel() 496 } 497 }() 498 499 readers := make([]io.Reader, 0, len(files)) 500 501 if config.Tail > 0 { 502 for i := len(files) - 1; i >= 0 && nLines > 0; i-- { 503 tail, n, err := getTailReader(ctx, files[i], nLines) 504 if err != nil { 505 watcher.Err <- errors.Wrap(err, "error finding file position to start log tailing") 506 return 507 } 508 nLines -= n 509 readers = append([]io.Reader{tail}, readers...) 510 } 511 } else { 512 for _, r := range files { 513 readers = append(readers, &wrappedReaderAt{ReaderAt: r}) 514 } 515 } 516 517 rdr := io.MultiReader(readers...) 518 decodeLogLine := createDecoder(rdr) 519 for { 520 msg, err := decodeLogLine() 521 if err != nil { 522 if errors.Cause(err) != io.EOF { 523 watcher.Err <- err 524 } 525 return 526 } 527 if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) { 528 continue 529 } 530 if !config.Until.IsZero() && msg.Timestamp.After(config.Until) { 531 return 532 } 533 select { 534 case <-ctx.Done(): 535 return 536 case watcher.Msg <- msg: 537 } 538 } 539 } 540 541 func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, createDecoder makeDecoderFunc, since, until time.Time) { 542 decodeLogLine := createDecoder(f) 543 544 name := f.Name() 545 fileWatcher, err := watchFile(name) 546 if err != nil { 547 logWatcher.Err <- err 548 return 549 } 550 defer func() { 551 f.Close() 552 fileWatcher.Close() 553 }() 554 555 var retries int 556 handleRotate := func() error { 557 f.Close() 558 fileWatcher.Remove(name) 559 560 // retry when the file doesn't exist 561 for retries := 0; retries <= 5; retries++ { 562 f, err = os.Open(name) 563 if err == nil || !os.IsNotExist(err) { 564 break 565 } 566 } 567 if err != nil { 568 return err 569 } 570 if err := fileWatcher.Add(name); err != nil { 571 return err 572 } 573 decodeLogLine = createDecoder(f) 574 return nil 575 } 576 577 errRetry := errors.New("retry") 578 errDone := errors.New("done") 579 waitRead := func() error { 580 select { 581 case e := <-fileWatcher.Events(): 582 switch e.Op { 583 case fsnotify.Write: 584 decodeLogLine = createDecoder(f) 585 return nil 586 case fsnotify.Rename, fsnotify.Remove: 587 select { 588 case <-notifyRotate: 589 case <-logWatcher.WatchProducerGone(): 590 return errDone 591 case <-logWatcher.WatchConsumerGone(): 592 return errDone 593 } 594 if err := handleRotate(); err != nil { 595 return err 596 } 597 return nil 598 } 599 return errRetry 600 case err := <-fileWatcher.Errors(): 601 logrus.Debugf("logger got error watching file: %v", err) 602 // Something happened, let's try and stay alive and create a new watcher 603 if retries <= 5 { 604 fileWatcher.Close() 605 fileWatcher, err = watchFile(name) 606 if err != nil { 607 return err 608 } 609 retries++ 610 return errRetry 611 } 612 return err 613 case <-logWatcher.WatchProducerGone(): 614 return errDone 615 case <-logWatcher.WatchConsumerGone(): 616 return errDone 617 } 618 } 619 620 oldSize := int64(-1) 621 handleDecodeErr := func(err error) error { 622 if errors.Cause(err) != io.EOF { 623 return err 624 } 625 626 // Handle special case (#39235): max-file=1 and file was truncated 627 st, stErr := f.Stat() 628 if stErr == nil { 629 size := st.Size() 630 defer func() { oldSize = size }() 631 if size < oldSize { // truncated 632 f.Seek(0, 0) 633 return nil 634 } 635 } else { 636 logrus.WithError(stErr).Warn("logger: stat error") 637 } 638 639 for { 640 err := waitRead() 641 if err == nil { 642 break 643 } 644 if err == errRetry { 645 continue 646 } 647 return err 648 } 649 return nil 650 } 651 652 // main loop 653 for { 654 msg, err := decodeLogLine() 655 if err != nil { 656 if err := handleDecodeErr(err); err != nil { 657 if err == errDone { 658 return 659 } 660 // we got an unrecoverable error, so return 661 logWatcher.Err <- err 662 return 663 } 664 // ready to try again 665 continue 666 } 667 668 retries = 0 // reset retries since we've succeeded 669 if !since.IsZero() && msg.Timestamp.Before(since) { 670 continue 671 } 672 if !until.IsZero() && msg.Timestamp.After(until) { 673 return 674 } 675 // send the message, unless the consumer is gone 676 select { 677 case logWatcher.Msg <- msg: 678 case <-logWatcher.WatchConsumerGone(): 679 return 680 } 681 } 682 } 683 684 func watchFile(name string) (filenotify.FileWatcher, error) { 685 var fileWatcher filenotify.FileWatcher 686 687 if runtime.GOOS == "windows" { 688 // FileWatcher on Windows files is based on the syscall notifications which has an issue because of file caching. 689 // It is based on ReadDirectoryChangesW() which doesn't detect writes to the cache. It detects writes to disk only. 690 // Because of the OS lazy writing, we don't get notifications for file writes and thereby the watcher 691 // doesn't work. Hence for Windows we will use poll based notifier. 692 fileWatcher = filenotify.NewPollingWatcher() 693 } else { 694 var err error 695 fileWatcher, err = filenotify.New() 696 if err != nil { 697 return nil, err 698 } 699 } 700 701 logger := logrus.WithFields(logrus.Fields{ 702 "module": "logger", 703 "file": name, 704 }) 705 706 if err := fileWatcher.Add(name); err != nil { 707 // we will retry using file poller. 708 logger.WithError(err).Warnf("falling back to file poller") 709 fileWatcher.Close() 710 fileWatcher = filenotify.NewPollingWatcher() 711 712 if err := fileWatcher.Add(name); err != nil { 713 fileWatcher.Close() 714 logger.WithError(err).Debugf("error watching log file for modifications") 715 return nil, err 716 } 717 } 718 719 return fileWatcher, nil 720 } 721 722 type wrappedReaderAt struct { 723 io.ReaderAt 724 pos int64 725 } 726 727 func (r *wrappedReaderAt) Read(p []byte) (int, error) { 728 n, err := r.ReaderAt.ReadAt(p, r.pos) 729 r.pos += int64(n) 730 return n, err 731 }