github.com/jiasir/docker@v1.3.3-0.20170609024000-252e610103e7/daemon/logger/jsonfilelog/read.go (about)

     1  package jsonfilelog
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/json"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"time"
    11  
    12  	"github.com/fsnotify/fsnotify"
    13  	"golang.org/x/net/context"
    14  
    15  	"github.com/Sirupsen/logrus"
    16  	"github.com/docker/docker/daemon/logger"
    17  	"github.com/docker/docker/daemon/logger/jsonfilelog/multireader"
    18  	"github.com/docker/docker/pkg/filenotify"
    19  	"github.com/docker/docker/pkg/jsonlog"
    20  	"github.com/docker/docker/pkg/tailfile"
    21  )
    22  
    23  const maxJSONDecodeRetry = 20000
    24  
    25  func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) {
    26  	l.Reset()
    27  	if err := dec.Decode(l); err != nil {
    28  		return nil, err
    29  	}
    30  	msg := &logger.Message{
    31  		Source:    l.Stream,
    32  		Timestamp: l.Created,
    33  		Line:      []byte(l.Log),
    34  		Attrs:     l.Attrs,
    35  	}
    36  	return msg, nil
    37  }
    38  
    39  // ReadLogs implements the logger's LogReader interface for the logs
    40  // created by this driver.
    41  func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
    42  	logWatcher := logger.NewLogWatcher()
    43  
    44  	go l.readLogs(logWatcher, config)
    45  	return logWatcher
    46  }
    47  
    48  func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
    49  	defer close(logWatcher.Msg)
    50  
    51  	// lock so the read stream doesn't get corrupted due to rotations or other log data written while we read
    52  	// This will block writes!!!
    53  	l.mu.Lock()
    54  
    55  	pth := l.writer.LogPath()
    56  	var files []io.ReadSeeker
    57  	for i := l.writer.MaxFiles(); i > 1; i-- {
    58  		f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1))
    59  		if err != nil {
    60  			if !os.IsNotExist(err) {
    61  				logWatcher.Err <- err
    62  				break
    63  			}
    64  			continue
    65  		}
    66  		defer f.Close()
    67  
    68  		files = append(files, f)
    69  	}
    70  
    71  	latestFile, err := os.Open(pth)
    72  	if err != nil {
    73  		logWatcher.Err <- err
    74  		l.mu.Unlock()
    75  		return
    76  	}
    77  	defer latestFile.Close()
    78  
    79  	if config.Tail != 0 {
    80  		tailer := multireader.MultiReadSeeker(append(files, latestFile)...)
    81  		tailFile(tailer, logWatcher, config.Tail, config.Since)
    82  	}
    83  
    84  	// close all the rotated files
    85  	for _, f := range files {
    86  		if err := f.(io.Closer).Close(); err != nil {
    87  			logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err)
    88  		}
    89  	}
    90  
    91  	if !config.Follow || l.closed {
    92  		l.mu.Unlock()
    93  		return
    94  	}
    95  
    96  	if config.Tail >= 0 {
    97  		latestFile.Seek(0, os.SEEK_END)
    98  	}
    99  
   100  	notifyRotate := l.writer.NotifyRotate()
   101  	defer l.writer.NotifyRotateEvict(notifyRotate)
   102  
   103  	l.readers[logWatcher] = struct{}{}
   104  
   105  	l.mu.Unlock()
   106  
   107  	followLogs(latestFile, logWatcher, notifyRotate, config.Since)
   108  
   109  	l.mu.Lock()
   110  	delete(l.readers, logWatcher)
   111  	l.mu.Unlock()
   112  }
   113  
   114  func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) {
   115  	var rdr io.Reader
   116  	rdr = f
   117  	if tail > 0 {
   118  		ls, err := tailfile.TailFile(f, tail)
   119  		if err != nil {
   120  			logWatcher.Err <- err
   121  			return
   122  		}
   123  		rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n")))
   124  	}
   125  	dec := json.NewDecoder(rdr)
   126  	l := &jsonlog.JSONLog{}
   127  	for {
   128  		msg, err := decodeLogLine(dec, l)
   129  		if err != nil {
   130  			if err != io.EOF {
   131  				logWatcher.Err <- err
   132  			}
   133  			return
   134  		}
   135  		if !since.IsZero() && msg.Timestamp.Before(since) {
   136  			continue
   137  		}
   138  		select {
   139  		case <-logWatcher.WatchClose():
   140  			return
   141  		case logWatcher.Msg <- msg:
   142  		}
   143  	}
   144  }
   145  
   146  func watchFile(name string) (filenotify.FileWatcher, error) {
   147  	fileWatcher, err := filenotify.New()
   148  	if err != nil {
   149  		return nil, err
   150  	}
   151  
   152  	if err := fileWatcher.Add(name); err != nil {
   153  		logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err)
   154  		fileWatcher.Close()
   155  		fileWatcher = filenotify.NewPollingWatcher()
   156  
   157  		if err := fileWatcher.Add(name); err != nil {
   158  			fileWatcher.Close()
   159  			logrus.Debugf("error watching log file for modifications: %v", err)
   160  			return nil, err
   161  		}
   162  	}
   163  	return fileWatcher, nil
   164  }
   165  
   166  func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) {
   167  	dec := json.NewDecoder(f)
   168  	l := &jsonlog.JSONLog{}
   169  
   170  	name := f.Name()
   171  	fileWatcher, err := watchFile(name)
   172  	if err != nil {
   173  		logWatcher.Err <- err
   174  		return
   175  	}
   176  	defer func() {
   177  		f.Close()
   178  		fileWatcher.Remove(name)
   179  		fileWatcher.Close()
   180  	}()
   181  
   182  	ctx, cancel := context.WithCancel(context.Background())
   183  	defer cancel()
   184  	go func() {
   185  		select {
   186  		case <-logWatcher.WatchClose():
   187  			fileWatcher.Remove(name)
   188  			cancel()
   189  		case <-ctx.Done():
   190  			return
   191  		}
   192  	}()
   193  
   194  	var retries int
   195  	handleRotate := func() error {
   196  		f.Close()
   197  		fileWatcher.Remove(name)
   198  
   199  		// retry when the file doesn't exist
   200  		for retries := 0; retries <= 5; retries++ {
   201  			f, err = os.Open(name)
   202  			if err == nil || !os.IsNotExist(err) {
   203  				break
   204  			}
   205  		}
   206  		if err != nil {
   207  			return err
   208  		}
   209  		if err := fileWatcher.Add(name); err != nil {
   210  			return err
   211  		}
   212  		dec = json.NewDecoder(f)
   213  		return nil
   214  	}
   215  
   216  	errRetry := errors.New("retry")
   217  	errDone := errors.New("done")
   218  	waitRead := func() error {
   219  		select {
   220  		case e := <-fileWatcher.Events():
   221  			switch e.Op {
   222  			case fsnotify.Write:
   223  				dec = json.NewDecoder(f)
   224  				return nil
   225  			case fsnotify.Rename, fsnotify.Remove:
   226  				select {
   227  				case <-notifyRotate:
   228  				case <-ctx.Done():
   229  					return errDone
   230  				}
   231  				if err := handleRotate(); err != nil {
   232  					return err
   233  				}
   234  				return nil
   235  			}
   236  			return errRetry
   237  		case err := <-fileWatcher.Errors():
   238  			logrus.Debug("logger got error watching file: %v", err)
   239  			// Something happened, let's try and stay alive and create a new watcher
   240  			if retries <= 5 {
   241  				fileWatcher.Close()
   242  				fileWatcher, err = watchFile(name)
   243  				if err != nil {
   244  					return err
   245  				}
   246  				retries++
   247  				return errRetry
   248  			}
   249  			return err
   250  		case <-ctx.Done():
   251  			return errDone
   252  		}
   253  	}
   254  
   255  	handleDecodeErr := func(err error) error {
   256  		if err == io.EOF {
   257  			for {
   258  				err := waitRead()
   259  				if err == nil {
   260  					break
   261  				}
   262  				if err == errRetry {
   263  					continue
   264  				}
   265  				return err
   266  			}
   267  			return nil
   268  		}
   269  		// try again because this shouldn't happen
   270  		if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry {
   271  			dec = json.NewDecoder(f)
   272  			retries++
   273  			return nil
   274  		}
   275  		// io.ErrUnexpectedEOF is returned from json.Decoder when there is
   276  		// remaining data in the parser's buffer while an io.EOF occurs.
   277  		// If the json logger writes a partial json log entry to the disk
   278  		// while at the same time the decoder tries to decode it, the race condition happens.
   279  		if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry {
   280  			reader := io.MultiReader(dec.Buffered(), f)
   281  			dec = json.NewDecoder(reader)
   282  			retries++
   283  			return nil
   284  		}
   285  		return err
   286  	}
   287  
   288  	// main loop
   289  	for {
   290  		msg, err := decodeLogLine(dec, l)
   291  		if err != nil {
   292  			if err := handleDecodeErr(err); err != nil {
   293  				if err == errDone {
   294  					return
   295  				}
   296  				// we got an unrecoverable error, so return
   297  				logWatcher.Err <- err
   298  				return
   299  			}
   300  			// ready to try again
   301  			continue
   302  		}
   303  
   304  		retries = 0 // reset retries since we've succeeded
   305  		if !since.IsZero() && msg.Timestamp.Before(since) {
   306  			continue
   307  		}
   308  		select {
   309  		case logWatcher.Msg <- msg:
   310  		case <-ctx.Done():
   311  			logWatcher.Msg <- msg
   312  			for {
   313  				msg, err := decodeLogLine(dec, l)
   314  				if err != nil {
   315  					return
   316  				}
   317  				if !since.IsZero() && msg.Timestamp.Before(since) {
   318  					continue
   319  				}
   320  				logWatcher.Msg <- msg
   321  			}
   322  		}
   323  	}
   324  }