github.com/demonoid81/moby@v0.0.0-20200517203328-62dd8e17c460/daemon/logger/loggerutils/logfile.go (about)

     1  package loggerutils // import "github.com/demonoid81/moby/daemon/logger/loggerutils"
     2  
     3  import (
     4  	"compress/gzip"
     5  	"context"
     6  	"encoding/json"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"runtime"
    11  	"strconv"
    12  	"strings"
    13  	"sync"
    14  	"time"
    15  
    16  	"github.com/demonoid81/moby/daemon/logger"
    17  	"github.com/demonoid81/moby/pkg/filenotify"
    18  	"github.com/demonoid81/moby/pkg/pools"
    19  	"github.com/demonoid81/moby/pkg/pubsub"
    20  	"github.com/fsnotify/fsnotify"
    21  	"github.com/pkg/errors"
    22  	"github.com/sirupsen/logrus"
    23  )
    24  
    25  const tmpLogfileSuffix = ".tmp"
    26  
    27  // rotateFileMetadata is a metadata of the gzip header of the compressed log file
    28  type rotateFileMetadata struct {
    29  	LastTime time.Time `json:"lastTime,omitempty"`
    30  }
    31  
    32  // refCounter is a counter of logfile being referenced
    33  type refCounter struct {
    34  	mu      sync.Mutex
    35  	counter map[string]int
    36  }
    37  
    38  // Reference increase the reference counter for specified logfile
    39  func (rc *refCounter) GetReference(fileName string, openRefFile func(fileName string, exists bool) (*os.File, error)) (*os.File, error) {
    40  	rc.mu.Lock()
    41  	defer rc.mu.Unlock()
    42  
    43  	var (
    44  		file *os.File
    45  		err  error
    46  	)
    47  	_, ok := rc.counter[fileName]
    48  	file, err = openRefFile(fileName, ok)
    49  	if err != nil {
    50  		return nil, err
    51  	}
    52  
    53  	if ok {
    54  		rc.counter[fileName]++
    55  	} else if file != nil {
    56  		rc.counter[file.Name()] = 1
    57  	}
    58  
    59  	return file, nil
    60  }
    61  
    62  // Dereference reduce the reference counter for specified logfile
    63  func (rc *refCounter) Dereference(fileName string) error {
    64  	rc.mu.Lock()
    65  	defer rc.mu.Unlock()
    66  
    67  	rc.counter[fileName]--
    68  	if rc.counter[fileName] <= 0 {
    69  		delete(rc.counter, fileName)
    70  		err := os.Remove(fileName)
    71  		if err != nil {
    72  			return err
    73  		}
    74  	}
    75  	return nil
    76  }
    77  
    78  // LogFile is Logger implementation for default Docker logging.
    79  type LogFile struct {
    80  	mu              sync.RWMutex // protects the logfile access
    81  	f               *os.File     // store for closing
    82  	closed          bool
    83  	rotateMu        sync.Mutex // blocks the next rotation until the current rotation is completed
    84  	capacity        int64      // maximum size of each file
    85  	currentSize     int64      // current size of the latest file
    86  	maxFiles        int        // maximum number of files
    87  	compress        bool       // whether old versions of log files are compressed
    88  	lastTimestamp   time.Time  // timestamp of the last log
    89  	filesRefCounter refCounter // keep reference-counted of decompressed files
    90  	notifyRotate    *pubsub.Publisher
    91  	marshal         logger.MarshalFunc
    92  	createDecoder   MakeDecoderFn
    93  	getTailReader   GetTailReaderFunc
    94  	perms           os.FileMode
    95  }
    96  
    97  // MakeDecoderFn creates a decoder
    98  type MakeDecoderFn func(rdr io.Reader) Decoder
    99  
   100  // Decoder is for reading logs
   101  // It is created by the log reader by calling the `MakeDecoderFunc`
   102  type Decoder interface {
   103  	// Reset resets the decoder
   104  	// Reset is called for certain events, such as log rotations
   105  	Reset(io.Reader)
   106  	// Decode decodes the next log messeage from the stream
   107  	Decode() (*logger.Message, error)
   108  	// Close signals to the decoder that it can release whatever resources it was using.
   109  	Close()
   110  }
   111  
   112  // SizeReaderAt defines a ReaderAt that also reports its size.
   113  // This is used for tailing log files.
   114  type SizeReaderAt interface {
   115  	io.ReaderAt
   116  	Size() int64
   117  }
   118  
   119  // GetTailReaderFunc is used to truncate a reader to only read as much as is required
   120  // in order to get the passed in number of log lines.
   121  // It returns the sectioned reader, the number of lines that the section reader
   122  // contains, and any error that occurs.
   123  type GetTailReaderFunc func(ctx context.Context, f SizeReaderAt, nLogLines int) (rdr io.Reader, nLines int, err error)
   124  
   125  // NewLogFile creates new LogFile
   126  func NewLogFile(logPath string, capacity int64, maxFiles int, compress bool, marshaller logger.MarshalFunc, decodeFunc MakeDecoderFn, perms os.FileMode, getTailReader GetTailReaderFunc) (*LogFile, error) {
   127  	log, err := openFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, perms)
   128  	if err != nil {
   129  		return nil, err
   130  	}
   131  
   132  	size, err := log.Seek(0, io.SeekEnd)
   133  	if err != nil {
   134  		return nil, err
   135  	}
   136  
   137  	return &LogFile{
   138  		f:               log,
   139  		capacity:        capacity,
   140  		currentSize:     size,
   141  		maxFiles:        maxFiles,
   142  		compress:        compress,
   143  		filesRefCounter: refCounter{counter: make(map[string]int)},
   144  		notifyRotate:    pubsub.NewPublisher(0, 1),
   145  		marshal:         marshaller,
   146  		createDecoder:   decodeFunc,
   147  		perms:           perms,
   148  		getTailReader:   getTailReader,
   149  	}, nil
   150  }
   151  
   152  // WriteLogEntry writes the provided log message to the current log file.
   153  // This may trigger a rotation event if the max file/capacity limits are hit.
   154  func (w *LogFile) WriteLogEntry(msg *logger.Message) error {
   155  	b, err := w.marshal(msg)
   156  	if err != nil {
   157  		return errors.Wrap(err, "error marshalling log message")
   158  	}
   159  
   160  	logger.PutMessage(msg)
   161  
   162  	w.mu.Lock()
   163  	if w.closed {
   164  		w.mu.Unlock()
   165  		return errors.New("cannot write because the output file was closed")
   166  	}
   167  
   168  	if err := w.checkCapacityAndRotate(); err != nil {
   169  		w.mu.Unlock()
   170  		return err
   171  	}
   172  
   173  	n, err := w.f.Write(b)
   174  	if err == nil {
   175  		w.currentSize += int64(n)
   176  		w.lastTimestamp = msg.Timestamp
   177  	}
   178  	w.mu.Unlock()
   179  	return err
   180  }
   181  
   182  func (w *LogFile) checkCapacityAndRotate() error {
   183  	if w.capacity == -1 {
   184  		return nil
   185  	}
   186  
   187  	if w.currentSize >= w.capacity {
   188  		w.rotateMu.Lock()
   189  		fname := w.f.Name()
   190  		if err := w.f.Close(); err != nil {
   191  			// if there was an error during a prior rotate, the file could already be closed
   192  			if !errors.Is(err, os.ErrClosed) {
   193  				w.rotateMu.Unlock()
   194  				return errors.Wrap(err, "error closing file")
   195  			}
   196  		}
   197  		if err := rotate(fname, w.maxFiles, w.compress); err != nil {
   198  			w.rotateMu.Unlock()
   199  			return err
   200  		}
   201  		file, err := openFile(fname, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, w.perms)
   202  		if err != nil {
   203  			w.rotateMu.Unlock()
   204  			return err
   205  		}
   206  		w.f = file
   207  		w.currentSize = 0
   208  		w.notifyRotate.Publish(struct{}{})
   209  
   210  		if w.maxFiles <= 1 || !w.compress {
   211  			w.rotateMu.Unlock()
   212  			return nil
   213  		}
   214  
   215  		go func() {
   216  			compressFile(fname+".1", w.lastTimestamp)
   217  			w.rotateMu.Unlock()
   218  		}()
   219  	}
   220  
   221  	return nil
   222  }
   223  
   224  func rotate(name string, maxFiles int, compress bool) error {
   225  	if maxFiles < 2 {
   226  		return nil
   227  	}
   228  
   229  	var extension string
   230  	if compress {
   231  		extension = ".gz"
   232  	}
   233  
   234  	lastFile := fmt.Sprintf("%s.%d%s", name, maxFiles-1, extension)
   235  	err := os.Remove(lastFile)
   236  	if err != nil && !os.IsNotExist(err) {
   237  		return errors.Wrap(err, "error removing oldest log file")
   238  	}
   239  
   240  	for i := maxFiles - 1; i > 1; i-- {
   241  		toPath := name + "." + strconv.Itoa(i) + extension
   242  		fromPath := name + "." + strconv.Itoa(i-1) + extension
   243  		if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) {
   244  			return err
   245  		}
   246  	}
   247  
   248  	if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) {
   249  		return err
   250  	}
   251  
   252  	return nil
   253  }
   254  
   255  func compressFile(fileName string, lastTimestamp time.Time) {
   256  	file, err := os.Open(fileName)
   257  	if err != nil {
   258  		logrus.Errorf("Failed to open log file: %v", err)
   259  		return
   260  	}
   261  	defer func() {
   262  		file.Close()
   263  		err := os.Remove(fileName)
   264  		if err != nil {
   265  			logrus.Errorf("Failed to remove source log file: %v", err)
   266  		}
   267  	}()
   268  
   269  	outFile, err := openFile(fileName+".gz", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0640)
   270  	if err != nil {
   271  		logrus.Errorf("Failed to open or create gzip log file: %v", err)
   272  		return
   273  	}
   274  	defer func() {
   275  		outFile.Close()
   276  		if err != nil {
   277  			os.Remove(fileName + ".gz")
   278  		}
   279  	}()
   280  
   281  	compressWriter := gzip.NewWriter(outFile)
   282  	defer compressWriter.Close()
   283  
   284  	// Add the last log entry timestamp to the gzip header
   285  	extra := rotateFileMetadata{}
   286  	extra.LastTime = lastTimestamp
   287  	compressWriter.Header.Extra, err = json.Marshal(&extra)
   288  	if err != nil {
   289  		// Here log the error only and don't return since this is just an optimization.
   290  		logrus.Warningf("Failed to marshal gzip header as JSON: %v", err)
   291  	}
   292  
   293  	_, err = pools.Copy(compressWriter, file)
   294  	if err != nil {
   295  		logrus.WithError(err).WithField("module", "container.logs").WithField("file", fileName).Error("Error compressing log file")
   296  		return
   297  	}
   298  }
   299  
   300  // MaxFiles return maximum number of files
   301  func (w *LogFile) MaxFiles() int {
   302  	return w.maxFiles
   303  }
   304  
   305  // Close closes underlying file and signals all readers to stop.
   306  func (w *LogFile) Close() error {
   307  	w.mu.Lock()
   308  	defer w.mu.Unlock()
   309  	if w.closed {
   310  		return nil
   311  	}
   312  	if err := w.f.Close(); err != nil {
   313  		return err
   314  	}
   315  	w.closed = true
   316  	return nil
   317  }
   318  
   319  // ReadLogs decodes entries from log files and sends them the passed in watcher
   320  //
   321  // Note: Using the follow option can become inconsistent in cases with very frequent rotations and max log files is 1.
   322  // TODO: Consider a different implementation which can effectively follow logs under frequent rotations.
   323  func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) {
   324  	w.mu.RLock()
   325  	currentFile, err := os.Open(w.f.Name())
   326  	if err != nil {
   327  		w.mu.RUnlock()
   328  		watcher.Err <- err
   329  		return
   330  	}
   331  	defer currentFile.Close()
   332  
   333  	dec := w.createDecoder(nil)
   334  	defer dec.Close()
   335  
   336  	currentChunk, err := newSectionReader(currentFile)
   337  	if err != nil {
   338  		w.mu.RUnlock()
   339  		watcher.Err <- err
   340  		return
   341  	}
   342  
   343  	if config.Tail != 0 {
   344  		// TODO(@cpuguy83): Instead of opening every file, only get the files which
   345  		// are needed to tail.
   346  		// This is especially costly when compression is enabled.
   347  		files, err := w.openRotatedFiles(config)
   348  		w.mu.RUnlock()
   349  		if err != nil {
   350  			watcher.Err <- err
   351  			return
   352  		}
   353  
   354  		closeFiles := func() {
   355  			for _, f := range files {
   356  				f.Close()
   357  				fileName := f.Name()
   358  				if strings.HasSuffix(fileName, tmpLogfileSuffix) {
   359  					err := w.filesRefCounter.Dereference(fileName)
   360  					if err != nil {
   361  						logrus.Errorf("Failed to dereference the log file %q: %v", fileName, err)
   362  					}
   363  				}
   364  			}
   365  		}
   366  
   367  		readers := make([]SizeReaderAt, 0, len(files)+1)
   368  		for _, f := range files {
   369  			stat, err := f.Stat()
   370  			if err != nil {
   371  				watcher.Err <- errors.Wrap(err, "error reading size of rotated file")
   372  				closeFiles()
   373  				return
   374  			}
   375  			readers = append(readers, io.NewSectionReader(f, 0, stat.Size()))
   376  		}
   377  		if currentChunk.Size() > 0 {
   378  			readers = append(readers, currentChunk)
   379  		}
   380  
   381  		tailFiles(readers, watcher, dec, w.getTailReader, config)
   382  		closeFiles()
   383  
   384  		w.mu.RLock()
   385  	}
   386  
   387  	if !config.Follow || w.closed {
   388  		w.mu.RUnlock()
   389  		return
   390  	}
   391  	w.mu.RUnlock()
   392  
   393  	notifyRotate := w.notifyRotate.Subscribe()
   394  	defer w.notifyRotate.Evict(notifyRotate)
   395  	followLogs(currentFile, watcher, notifyRotate, dec, config.Since, config.Until)
   396  }
   397  
   398  func (w *LogFile) openRotatedFiles(config logger.ReadConfig) (files []*os.File, err error) {
   399  	w.rotateMu.Lock()
   400  	defer w.rotateMu.Unlock()
   401  
   402  	defer func() {
   403  		if err == nil {
   404  			return
   405  		}
   406  		for _, f := range files {
   407  			f.Close()
   408  			if strings.HasSuffix(f.Name(), tmpLogfileSuffix) {
   409  				err := os.Remove(f.Name())
   410  				if err != nil && !os.IsNotExist(err) {
   411  					logrus.Warnf("Failed to remove logfile: %v", err)
   412  				}
   413  			}
   414  		}
   415  	}()
   416  
   417  	for i := w.maxFiles; i > 1; i-- {
   418  		f, err := os.Open(fmt.Sprintf("%s.%d", w.f.Name(), i-1))
   419  		if err != nil {
   420  			if !os.IsNotExist(err) {
   421  				return nil, errors.Wrap(err, "error opening rotated log file")
   422  			}
   423  
   424  			fileName := fmt.Sprintf("%s.%d.gz", w.f.Name(), i-1)
   425  			decompressedFileName := fileName + tmpLogfileSuffix
   426  			tmpFile, err := w.filesRefCounter.GetReference(decompressedFileName, func(refFileName string, exists bool) (*os.File, error) {
   427  				if exists {
   428  					return os.Open(refFileName)
   429  				}
   430  				return decompressfile(fileName, refFileName, config.Since)
   431  			})
   432  
   433  			if err != nil {
   434  				if !errors.Is(err, os.ErrNotExist) {
   435  					return nil, errors.Wrap(err, "error getting reference to decompressed log file")
   436  				}
   437  				continue
   438  			}
   439  			if tmpFile == nil {
   440  				// The log before `config.Since` does not need to read
   441  				break
   442  			}
   443  
   444  			files = append(files, tmpFile)
   445  			continue
   446  		}
   447  		files = append(files, f)
   448  	}
   449  
   450  	return files, nil
   451  }
   452  
   453  func decompressfile(fileName, destFileName string, since time.Time) (*os.File, error) {
   454  	cf, err := os.Open(fileName)
   455  	if err != nil {
   456  		return nil, errors.Wrap(err, "error opening file for decompression")
   457  	}
   458  	defer cf.Close()
   459  
   460  	rc, err := gzip.NewReader(cf)
   461  	if err != nil {
   462  		return nil, errors.Wrap(err, "error making gzip reader for compressed log file")
   463  	}
   464  	defer rc.Close()
   465  
   466  	// Extract the last log entry timestramp from the gzip header
   467  	extra := &rotateFileMetadata{}
   468  	err = json.Unmarshal(rc.Header.Extra, extra)
   469  	if err == nil && extra.LastTime.Before(since) {
   470  		return nil, nil
   471  	}
   472  
   473  	rs, err := openFile(destFileName, os.O_CREATE|os.O_RDWR, 0640)
   474  	if err != nil {
   475  		return nil, errors.Wrap(err, "error creating file for copying decompressed log stream")
   476  	}
   477  
   478  	_, err = pools.Copy(rs, rc)
   479  	if err != nil {
   480  		rs.Close()
   481  		rErr := os.Remove(rs.Name())
   482  		if rErr != nil && !os.IsNotExist(rErr) {
   483  			logrus.Errorf("Failed to remove logfile: %v", rErr)
   484  		}
   485  		return nil, errors.Wrap(err, "error while copying decompressed log stream to file")
   486  	}
   487  
   488  	return rs, nil
   489  }
   490  
   491  func newSectionReader(f *os.File) (*io.SectionReader, error) {
   492  	// seek to the end to get the size
   493  	// we'll leave this at the end of the file since section reader does not advance the reader
   494  	size, err := f.Seek(0, io.SeekEnd)
   495  	if err != nil {
   496  		return nil, errors.Wrap(err, "error getting current file size")
   497  	}
   498  	return io.NewSectionReader(f, 0, size), nil
   499  }
   500  
   501  func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, dec Decoder, getTailReader GetTailReaderFunc, config logger.ReadConfig) {
   502  	nLines := config.Tail
   503  
   504  	ctx, cancel := context.WithCancel(context.Background())
   505  	defer cancel()
   506  	// TODO(@cpuguy83): we should plumb a context through instead of dealing with `WatchClose()` here.
   507  	go func() {
   508  		select {
   509  		case <-ctx.Done():
   510  		case <-watcher.WatchConsumerGone():
   511  			cancel()
   512  		}
   513  	}()
   514  
   515  	readers := make([]io.Reader, 0, len(files))
   516  
   517  	if config.Tail > 0 {
   518  		for i := len(files) - 1; i >= 0 && nLines > 0; i-- {
   519  			tail, n, err := getTailReader(ctx, files[i], nLines)
   520  			if err != nil {
   521  				watcher.Err <- errors.Wrap(err, "error finding file position to start log tailing")
   522  				return
   523  			}
   524  			nLines -= n
   525  			readers = append([]io.Reader{tail}, readers...)
   526  		}
   527  	} else {
   528  		for _, r := range files {
   529  			readers = append(readers, &wrappedReaderAt{ReaderAt: r})
   530  		}
   531  	}
   532  
   533  	rdr := io.MultiReader(readers...)
   534  	dec.Reset(rdr)
   535  
   536  	for {
   537  		msg, err := dec.Decode()
   538  		if err != nil {
   539  			if !errors.Is(err, io.EOF) {
   540  				watcher.Err <- err
   541  			}
   542  			return
   543  		}
   544  		if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) {
   545  			continue
   546  		}
   547  		if !config.Until.IsZero() && msg.Timestamp.After(config.Until) {
   548  			return
   549  		}
   550  		select {
   551  		case <-ctx.Done():
   552  			return
   553  		case watcher.Msg <- msg:
   554  		}
   555  	}
   556  }
   557  
   558  func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, dec Decoder, since, until time.Time) {
   559  	dec.Reset(f)
   560  
   561  	name := f.Name()
   562  	fileWatcher, err := watchFile(name)
   563  	if err != nil {
   564  		logWatcher.Err <- err
   565  		return
   566  	}
   567  	defer func() {
   568  		f.Close()
   569  		fileWatcher.Close()
   570  	}()
   571  
   572  	var retries int
   573  	handleRotate := func() error {
   574  		f.Close()
   575  		fileWatcher.Remove(name)
   576  
   577  		// retry when the file doesn't exist
   578  		for retries := 0; retries <= 5; retries++ {
   579  			f, err = os.Open(name)
   580  			if err == nil || !os.IsNotExist(err) {
   581  				break
   582  			}
   583  		}
   584  		if err != nil {
   585  			return err
   586  		}
   587  		if err := fileWatcher.Add(name); err != nil {
   588  			return err
   589  		}
   590  		dec.Reset(f)
   591  		return nil
   592  	}
   593  
   594  	errRetry := errors.New("retry")
   595  	errDone := errors.New("done")
   596  	waitRead := func() error {
   597  		select {
   598  		case e := <-fileWatcher.Events():
   599  			switch e.Op {
   600  			case fsnotify.Write:
   601  				dec.Reset(f)
   602  				return nil
   603  			case fsnotify.Rename, fsnotify.Remove:
   604  				select {
   605  				case <-notifyRotate:
   606  				case <-logWatcher.WatchProducerGone():
   607  					return errDone
   608  				case <-logWatcher.WatchConsumerGone():
   609  					return errDone
   610  				}
   611  				if err := handleRotate(); err != nil {
   612  					return err
   613  				}
   614  				return nil
   615  			}
   616  			return errRetry
   617  		case err := <-fileWatcher.Errors():
   618  			logrus.Debugf("logger got error watching file: %v", err)
   619  			// Something happened, let's try and stay alive and create a new watcher
   620  			if retries <= 5 {
   621  				fileWatcher.Close()
   622  				fileWatcher, err = watchFile(name)
   623  				if err != nil {
   624  					return err
   625  				}
   626  				retries++
   627  				return errRetry
   628  			}
   629  			return err
   630  		case <-logWatcher.WatchProducerGone():
   631  			return errDone
   632  		case <-logWatcher.WatchConsumerGone():
   633  			return errDone
   634  		}
   635  	}
   636  
   637  	oldSize := int64(-1)
   638  	handleDecodeErr := func(err error) error {
   639  		if !errors.Is(err, io.EOF) {
   640  			return err
   641  		}
   642  
   643  		// Handle special case (#39235): max-file=1 and file was truncated
   644  		st, stErr := f.Stat()
   645  		if stErr == nil {
   646  			size := st.Size()
   647  			defer func() { oldSize = size }()
   648  			if size < oldSize { // truncated
   649  				f.Seek(0, 0)
   650  				return nil
   651  			}
   652  		} else {
   653  			logrus.WithError(stErr).Warn("logger: stat error")
   654  		}
   655  
   656  		for {
   657  			err := waitRead()
   658  			if err == nil {
   659  				break
   660  			}
   661  			if err == errRetry {
   662  				continue
   663  			}
   664  			return err
   665  		}
   666  		return nil
   667  	}
   668  
   669  	// main loop
   670  	for {
   671  		msg, err := dec.Decode()
   672  		if err != nil {
   673  			if err := handleDecodeErr(err); err != nil {
   674  				if err == errDone {
   675  					return
   676  				}
   677  				// we got an unrecoverable error, so return
   678  				logWatcher.Err <- err
   679  				return
   680  			}
   681  			// ready to try again
   682  			continue
   683  		}
   684  
   685  		retries = 0 // reset retries since we've succeeded
   686  		if !since.IsZero() && msg.Timestamp.Before(since) {
   687  			continue
   688  		}
   689  		if !until.IsZero() && msg.Timestamp.After(until) {
   690  			return
   691  		}
   692  		// send the message, unless the consumer is gone
   693  		select {
   694  		case logWatcher.Msg <- msg:
   695  		case <-logWatcher.WatchConsumerGone():
   696  			return
   697  		}
   698  	}
   699  }
   700  
   701  func watchFile(name string) (filenotify.FileWatcher, error) {
   702  	var fileWatcher filenotify.FileWatcher
   703  
   704  	if runtime.GOOS == "windows" {
   705  		// FileWatcher on Windows files is based on the syscall notifications which has an issue because of file caching.
   706  		// It is based on ReadDirectoryChangesW() which doesn't detect writes to the cache. It detects writes to disk only.
   707  		// Because of the OS lazy writing, we don't get notifications for file writes and thereby the watcher
   708  		// doesn't work. Hence for Windows we will use poll based notifier.
   709  		fileWatcher = filenotify.NewPollingWatcher()
   710  	} else {
   711  		var err error
   712  		fileWatcher, err = filenotify.New()
   713  		if err != nil {
   714  			return nil, err
   715  		}
   716  	}
   717  
   718  	logger := logrus.WithFields(logrus.Fields{
   719  		"module": "logger",
   720  		"file":   name,
   721  	})
   722  
   723  	if err := fileWatcher.Add(name); err != nil {
   724  		// we will retry using file poller.
   725  		logger.WithError(err).Warnf("falling back to file poller")
   726  		fileWatcher.Close()
   727  		fileWatcher = filenotify.NewPollingWatcher()
   728  
   729  		if err := fileWatcher.Add(name); err != nil {
   730  			fileWatcher.Close()
   731  			logger.WithError(err).Debugf("error watching log file for modifications")
   732  			return nil, err
   733  		}
   734  	}
   735  
   736  	return fileWatcher, nil
   737  }
   738  
   739  type wrappedReaderAt struct {
   740  	io.ReaderAt
   741  	pos int64
   742  }
   743  
   744  func (r *wrappedReaderAt) Read(p []byte) (int, error) {
   745  	n, err := r.ReaderAt.ReadAt(p, r.pos)
   746  	r.pos += int64(n)
   747  	return n, err
   748  }