github.com/opsramp/moby@v1.13.1/daemon/logger/jsonfilelog/read.go (about)

     1  package jsonfilelog
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/json"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"time"
    11  
    12  	"github.com/fsnotify/fsnotify"
    13  	"golang.org/x/net/context"
    14  
    15  	"github.com/Sirupsen/logrus"
    16  	"github.com/docker/docker/daemon/logger"
    17  	"github.com/docker/docker/pkg/filenotify"
    18  	"github.com/docker/docker/pkg/ioutils"
    19  	"github.com/docker/docker/pkg/jsonlog"
    20  	"github.com/docker/docker/pkg/tailfile"
    21  )
    22  
    23  const maxJSONDecodeRetry = 20000
    24  
    25  func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) {
    26  	l.Reset()
    27  	if err := dec.Decode(l); err != nil {
    28  		return nil, err
    29  	}
    30  	msg := &logger.Message{
    31  		Source:    l.Stream,
    32  		Timestamp: l.Created,
    33  		Line:      []byte(l.Log),
    34  		Attrs:     l.Attrs,
    35  	}
    36  	return msg, nil
    37  }
    38  
    39  // ReadLogs implements the logger's LogReader interface for the logs
    40  // created by this driver.
    41  func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
    42  	logWatcher := logger.NewLogWatcher()
    43  
    44  	go l.readLogs(logWatcher, config)
    45  	return logWatcher
    46  }
    47  
    48  func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
    49  	defer close(logWatcher.Msg)
    50  
    51  	// lock so the read stream doesn't get corrupted due to rotations or other log data written while we read
    52  	// This will block writes!!!
    53  	l.mu.Lock()
    54  
    55  	pth := l.writer.LogPath()
    56  	var files []io.ReadSeeker
    57  	for i := l.writer.MaxFiles(); i > 1; i-- {
    58  		f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1))
    59  		if err != nil {
    60  			if !os.IsNotExist(err) {
    61  				logWatcher.Err <- err
    62  				break
    63  			}
    64  			continue
    65  		}
    66  		defer f.Close()
    67  
    68  		files = append(files, f)
    69  	}
    70  
    71  	latestFile, err := os.Open(pth)
    72  	if err != nil {
    73  		logWatcher.Err <- err
    74  		l.mu.Unlock()
    75  		return
    76  	}
    77  	defer latestFile.Close()
    78  
    79  	if config.Tail != 0 {
    80  		tailer := ioutils.MultiReadSeeker(append(files, latestFile)...)
    81  		tailFile(tailer, logWatcher, config.Tail, config.Since)
    82  	}
    83  
    84  	// close all the rotated files
    85  	for _, f := range files {
    86  		if err := f.(io.Closer).Close(); err != nil {
    87  			logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err)
    88  		}
    89  	}
    90  
    91  	if !config.Follow {
    92  		if err := latestFile.Close(); err != nil {
    93  			logrus.Errorf("Error closing file: %v", err)
    94  		}
    95  		l.mu.Unlock()
    96  		return
    97  	}
    98  
    99  	if config.Tail >= 0 {
   100  		latestFile.Seek(0, os.SEEK_END)
   101  	}
   102  
   103  	l.readers[logWatcher] = struct{}{}
   104  	l.mu.Unlock()
   105  
   106  	notifyRotate := l.writer.NotifyRotate()
   107  	followLogs(latestFile, logWatcher, notifyRotate, config.Since)
   108  
   109  	l.mu.Lock()
   110  	delete(l.readers, logWatcher)
   111  	l.mu.Unlock()
   112  
   113  	l.writer.NotifyRotateEvict(notifyRotate)
   114  }
   115  
   116  func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) {
   117  	var rdr io.Reader
   118  	rdr = f
   119  	if tail > 0 {
   120  		ls, err := tailfile.TailFile(f, tail)
   121  		if err != nil {
   122  			logWatcher.Err <- err
   123  			return
   124  		}
   125  		rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n")))
   126  	}
   127  	dec := json.NewDecoder(rdr)
   128  	l := &jsonlog.JSONLog{}
   129  	for {
   130  		msg, err := decodeLogLine(dec, l)
   131  		if err != nil {
   132  			if err != io.EOF {
   133  				logWatcher.Err <- err
   134  			}
   135  			return
   136  		}
   137  		if !since.IsZero() && msg.Timestamp.Before(since) {
   138  			continue
   139  		}
   140  		logWatcher.Msg <- msg
   141  	}
   142  }
   143  
   144  func watchFile(name string) (filenotify.FileWatcher, error) {
   145  	fileWatcher, err := filenotify.New()
   146  	if err != nil {
   147  		return nil, err
   148  	}
   149  
   150  	if err := fileWatcher.Add(name); err != nil {
   151  		logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err)
   152  		fileWatcher.Close()
   153  		fileWatcher = filenotify.NewPollingWatcher()
   154  
   155  		if err := fileWatcher.Add(name); err != nil {
   156  			fileWatcher.Close()
   157  			logrus.Debugf("error watching log file for modifications: %v", err)
   158  			return nil, err
   159  		}
   160  	}
   161  	return fileWatcher, nil
   162  }
   163  
   164  func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) {
   165  	dec := json.NewDecoder(f)
   166  	l := &jsonlog.JSONLog{}
   167  
   168  	name := f.Name()
   169  	fileWatcher, err := watchFile(name)
   170  	if err != nil {
   171  		logWatcher.Err <- err
   172  		return
   173  	}
   174  	defer func() {
   175  		f.Close()
   176  		fileWatcher.Remove(name)
   177  		fileWatcher.Close()
   178  	}()
   179  
   180  	ctx, cancel := context.WithCancel(context.Background())
   181  	defer cancel()
   182  	go func() {
   183  		select {
   184  		case <-logWatcher.WatchClose():
   185  			fileWatcher.Remove(name)
   186  			cancel()
   187  		case <-ctx.Done():
   188  			return
   189  		}
   190  	}()
   191  
   192  	var retries int
   193  	handleRotate := func() error {
   194  		f.Close()
   195  		fileWatcher.Remove(name)
   196  
   197  		// retry when the file doesn't exist
   198  		for retries := 0; retries <= 5; retries++ {
   199  			f, err = os.Open(name)
   200  			if err == nil || !os.IsNotExist(err) {
   201  				break
   202  			}
   203  		}
   204  		if err != nil {
   205  			return err
   206  		}
   207  		if err := fileWatcher.Add(name); err != nil {
   208  			return err
   209  		}
   210  		dec = json.NewDecoder(f)
   211  		return nil
   212  	}
   213  
   214  	errRetry := errors.New("retry")
   215  	errDone := errors.New("done")
   216  	waitRead := func() error {
   217  		select {
   218  		case e := <-fileWatcher.Events():
   219  			switch e.Op {
   220  			case fsnotify.Write:
   221  				dec = json.NewDecoder(f)
   222  				return nil
   223  			case fsnotify.Rename, fsnotify.Remove:
   224  				select {
   225  				case <-notifyRotate:
   226  				case <-ctx.Done():
   227  					return errDone
   228  				}
   229  				if err := handleRotate(); err != nil {
   230  					return err
   231  				}
   232  				return nil
   233  			}
   234  			return errRetry
   235  		case err := <-fileWatcher.Errors():
   236  			logrus.Debug("logger got error watching file: %v", err)
   237  			// Something happened, let's try and stay alive and create a new watcher
   238  			if retries <= 5 {
   239  				fileWatcher.Close()
   240  				fileWatcher, err = watchFile(name)
   241  				if err != nil {
   242  					return err
   243  				}
   244  				retries++
   245  				return errRetry
   246  			}
   247  			return err
   248  		case <-ctx.Done():
   249  			return errDone
   250  		}
   251  	}
   252  
   253  	handleDecodeErr := func(err error) error {
   254  		if err == io.EOF {
   255  			for err := waitRead(); err != nil; {
   256  				if err == errRetry {
   257  					// retry the waitRead
   258  					continue
   259  				}
   260  				return err
   261  			}
   262  			return nil
   263  		}
   264  		// try again because this shouldn't happen
   265  		if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry {
   266  			dec = json.NewDecoder(f)
   267  			retries++
   268  			return nil
   269  		}
   270  		// io.ErrUnexpectedEOF is returned from json.Decoder when there is
   271  		// remaining data in the parser's buffer while an io.EOF occurs.
   272  		// If the json logger writes a partial json log entry to the disk
   273  		// while at the same time the decoder tries to decode it, the race condition happens.
   274  		if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry {
   275  			reader := io.MultiReader(dec.Buffered(), f)
   276  			dec = json.NewDecoder(reader)
   277  			retries++
   278  			return nil
   279  		}
   280  		return err
   281  	}
   282  
   283  	// main loop
   284  	for {
   285  		msg, err := decodeLogLine(dec, l)
   286  		if err != nil {
   287  			if err := handleDecodeErr(err); err != nil {
   288  				if err == errDone {
   289  					return
   290  				}
   291  				// we got an unrecoverable error, so return
   292  				logWatcher.Err <- err
   293  				return
   294  			}
   295  			// ready to try again
   296  			continue
   297  		}
   298  
   299  		retries = 0 // reset retries since we've succeeded
   300  		if !since.IsZero() && msg.Timestamp.Before(since) {
   301  			continue
   302  		}
   303  		select {
   304  		case logWatcher.Msg <- msg:
   305  		case <-ctx.Done():
   306  			logWatcher.Msg <- msg
   307  			for {
   308  				msg, err := decodeLogLine(dec, l)
   309  				if err != nil {
   310  					return
   311  				}
   312  				if !since.IsZero() && msg.Timestamp.Before(since) {
   313  					continue
   314  				}
   315  				logWatcher.Msg <- msg
   316  			}
   317  		}
   318  	}
   319  }