github.com/vieux/docker@v0.6.3-0.20161004191708-e097c2a938c7/daemon/logger/jsonfilelog/read.go (about)

     1  package jsonfilelog
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/json"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"time"
    11  
    12  	"gopkg.in/fsnotify.v1"
    13  
    14  	"github.com/Sirupsen/logrus"
    15  	"github.com/docker/docker/daemon/logger"
    16  	"github.com/docker/docker/pkg/filenotify"
    17  	"github.com/docker/docker/pkg/ioutils"
    18  	"github.com/docker/docker/pkg/jsonlog"
    19  	"github.com/docker/docker/pkg/tailfile"
    20  )
    21  
    22  const maxJSONDecodeRetry = 20000
    23  
    24  func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) {
    25  	l.Reset()
    26  	if err := dec.Decode(l); err != nil {
    27  		return nil, err
    28  	}
    29  	msg := &logger.Message{
    30  		Source:    l.Stream,
    31  		Timestamp: l.Created,
    32  		Line:      []byte(l.Log),
    33  		Attrs:     l.Attrs,
    34  	}
    35  	return msg, nil
    36  }
    37  
    38  // ReadLogs implements the logger's LogReader interface for the logs
    39  // created by this driver.
    40  func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
    41  	logWatcher := logger.NewLogWatcher()
    42  
    43  	go l.readLogs(logWatcher, config)
    44  	return logWatcher
    45  }
    46  
    47  func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
    48  	defer close(logWatcher.Msg)
    49  
    50  	// lock so the read stream doesn't get corrupted due to rotations or other log data written while we read
    51  	// This will block writes!!!
    52  	l.mu.Lock()
    53  
    54  	pth := l.writer.LogPath()
    55  	var files []io.ReadSeeker
    56  	for i := l.writer.MaxFiles(); i > 1; i-- {
    57  		f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1))
    58  		if err != nil {
    59  			if !os.IsNotExist(err) {
    60  				logWatcher.Err <- err
    61  				break
    62  			}
    63  			continue
    64  		}
    65  		defer f.Close()
    66  
    67  		files = append(files, f)
    68  	}
    69  
    70  	latestFile, err := os.Open(pth)
    71  	if err != nil {
    72  		logWatcher.Err <- err
    73  		l.mu.Unlock()
    74  		return
    75  	}
    76  	defer latestFile.Close()
    77  
    78  	if config.Tail != 0 {
    79  		tailer := ioutils.MultiReadSeeker(append(files, latestFile)...)
    80  		tailFile(tailer, logWatcher, config.Tail, config.Since)
    81  	}
    82  
    83  	// close all the rotated files
    84  	for _, f := range files {
    85  		if err := f.(io.Closer).Close(); err != nil {
    86  			logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err)
    87  		}
    88  	}
    89  
    90  	if !config.Follow {
    91  		if err := latestFile.Close(); err != nil {
    92  			logrus.Errorf("Error closing file: %v", err)
    93  		}
    94  		l.mu.Unlock()
    95  		return
    96  	}
    97  
    98  	if config.Tail >= 0 {
    99  		latestFile.Seek(0, os.SEEK_END)
   100  	}
   101  
   102  	l.readers[logWatcher] = struct{}{}
   103  	l.mu.Unlock()
   104  
   105  	notifyRotate := l.writer.NotifyRotate()
   106  	followLogs(latestFile, logWatcher, notifyRotate, config.Since)
   107  
   108  	l.mu.Lock()
   109  	delete(l.readers, logWatcher)
   110  	l.mu.Unlock()
   111  
   112  	l.writer.NotifyRotateEvict(notifyRotate)
   113  }
   114  
   115  func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) {
   116  	var rdr io.Reader = f
   117  	if tail > 0 {
   118  		ls, err := tailfile.TailFile(f, tail)
   119  		if err != nil {
   120  			logWatcher.Err <- err
   121  			return
   122  		}
   123  		rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n")))
   124  	}
   125  	dec := json.NewDecoder(rdr)
   126  	l := &jsonlog.JSONLog{}
   127  	for {
   128  		msg, err := decodeLogLine(dec, l)
   129  		if err != nil {
   130  			if err != io.EOF {
   131  				logWatcher.Err <- err
   132  			}
   133  			return
   134  		}
   135  		if !since.IsZero() && msg.Timestamp.Before(since) {
   136  			continue
   137  		}
   138  		logWatcher.Msg <- msg
   139  	}
   140  }
   141  
   142  func watchFile(name string) (filenotify.FileWatcher, error) {
   143  	fileWatcher, err := filenotify.New()
   144  	if err != nil {
   145  		return nil, err
   146  	}
   147  
   148  	if err := fileWatcher.Add(name); err != nil {
   149  		logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err)
   150  		fileWatcher.Close()
   151  		fileWatcher = filenotify.NewPollingWatcher()
   152  
   153  		if err := fileWatcher.Add(name); err != nil {
   154  			fileWatcher.Close()
   155  			logrus.Debugf("error watching log file for modifications: %v", err)
   156  			return nil, err
   157  		}
   158  	}
   159  	return fileWatcher, nil
   160  }
   161  
   162  func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) {
   163  	dec := json.NewDecoder(f)
   164  	l := &jsonlog.JSONLog{}
   165  
   166  	name := f.Name()
   167  	fileWatcher, err := watchFile(name)
   168  	if err != nil {
   169  		logWatcher.Err <- err
   170  		return
   171  	}
   172  	defer func() {
   173  		f.Close()
   174  		fileWatcher.Close()
   175  	}()
   176  
   177  	var retries int
   178  	handleRotate := func() error {
   179  		f.Close()
   180  		fileWatcher.Remove(name)
   181  
   182  		// retry when the file doesn't exist
   183  		for retries := 0; retries <= 5; retries++ {
   184  			f, err = os.Open(name)
   185  			if err == nil || !os.IsNotExist(err) {
   186  				break
   187  			}
   188  		}
   189  		if err != nil {
   190  			return err
   191  		}
   192  		if err := fileWatcher.Add(name); err != nil {
   193  			return err
   194  		}
   195  		dec = json.NewDecoder(f)
   196  		return nil
   197  	}
   198  
   199  	errRetry := errors.New("retry")
   200  	errDone := errors.New("done")
   201  	waitRead := func() error {
   202  		select {
   203  		case e := <-fileWatcher.Events():
   204  			switch e.Op {
   205  			case fsnotify.Write:
   206  				dec = json.NewDecoder(f)
   207  				return nil
   208  			case fsnotify.Rename, fsnotify.Remove:
   209  				select {
   210  				case <-notifyRotate:
   211  				case <-logWatcher.WatchClose():
   212  					fileWatcher.Remove(name)
   213  					return errDone
   214  				}
   215  				if err := handleRotate(); err != nil {
   216  					return err
   217  				}
   218  				return nil
   219  			}
   220  			return errRetry
   221  		case err := <-fileWatcher.Errors():
   222  			logrus.Debug("logger got error watching file: %v", err)
   223  			// Something happened, let's try and stay alive and create a new watcher
   224  			if retries <= 5 {
   225  				fileWatcher.Close()
   226  				fileWatcher, err = watchFile(name)
   227  				if err != nil {
   228  					return err
   229  				}
   230  				retries++
   231  				return errRetry
   232  			}
   233  			return err
   234  		case <-logWatcher.WatchClose():
   235  			fileWatcher.Remove(name)
   236  			return errDone
   237  		}
   238  	}
   239  
   240  	handleDecodeErr := func(err error) error {
   241  		if err == io.EOF {
   242  			for err := waitRead(); err != nil; {
   243  				if err == errRetry {
   244  					// retry the waitRead
   245  					continue
   246  				}
   247  				return err
   248  			}
   249  			return nil
   250  		}
   251  		// try again because this shouldn't happen
   252  		if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry {
   253  			dec = json.NewDecoder(f)
   254  			retries++
   255  			return nil
   256  		}
   257  		// io.ErrUnexpectedEOF is returned from json.Decoder when there is
   258  		// remaining data in the parser's buffer while an io.EOF occurs.
   259  		// If the json logger writes a partial json log entry to the disk
   260  		// while at the same time the decoder tries to decode it, the race condition happens.
   261  		if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry {
   262  			reader := io.MultiReader(dec.Buffered(), f)
   263  			dec = json.NewDecoder(reader)
   264  			retries++
   265  			return nil
   266  		}
   267  		return err
   268  	}
   269  
   270  	// main loop
   271  	for {
   272  		msg, err := decodeLogLine(dec, l)
   273  		if err != nil {
   274  			if err := handleDecodeErr(err); err != nil {
   275  				if err == errDone {
   276  					return
   277  				}
   278  				// we got an unrecoverable error, so return
   279  				logWatcher.Err <- err
   280  				return
   281  			}
   282  			// ready to try again
   283  			continue
   284  		}
   285  
   286  		retries = 0 // reset retries since we've succeeded
   287  		if !since.IsZero() && msg.Timestamp.Before(since) {
   288  			continue
   289  		}
   290  		select {
   291  		case logWatcher.Msg <- msg:
   292  		case <-logWatcher.WatchClose():
   293  			logWatcher.Msg <- msg
   294  			for {
   295  				msg, err := decodeLogLine(dec, l)
   296  				if err != nil {
   297  					return
   298  				}
   299  				if !since.IsZero() && msg.Timestamp.Before(since) {
   300  					continue
   301  				}
   302  				logWatcher.Msg <- msg
   303  			}
   304  		}
   305  	}
   306  }