github.com/walkingsparrow/docker@v1.4.2-0.20151218153551-b708a2249bfa/daemon/logger/jsonfilelog/read.go (about)

     1  package jsonfilelog
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"os"
     9  	"time"
    10  
    11  	"github.com/Sirupsen/logrus"
    12  	"github.com/docker/docker/daemon/logger"
    13  	"github.com/docker/docker/pkg/filenotify"
    14  	"github.com/docker/docker/pkg/ioutils"
    15  	"github.com/docker/docker/pkg/jsonlog"
    16  	"github.com/docker/docker/pkg/tailfile"
    17  )
    18  
    19  const maxJSONDecodeRetry = 20000
    20  
    21  func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) {
    22  	l.Reset()
    23  	if err := dec.Decode(l); err != nil {
    24  		return nil, err
    25  	}
    26  	msg := &logger.Message{
    27  		Source:    l.Stream,
    28  		Timestamp: l.Created,
    29  		Line:      []byte(l.Log),
    30  	}
    31  	return msg, nil
    32  }
    33  
    34  // ReadLogs implements the logger's LogReader interface for the logs
    35  // created by this driver.
    36  func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
    37  	logWatcher := logger.NewLogWatcher()
    38  
    39  	go l.readLogs(logWatcher, config)
    40  	return logWatcher
    41  }
    42  
    43  func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
    44  	defer close(logWatcher.Msg)
    45  
    46  	pth := l.writer.LogPath()
    47  	var files []io.ReadSeeker
    48  	for i := l.writer.MaxFiles(); i > 1; i-- {
    49  		f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1))
    50  		if err != nil {
    51  			if !os.IsNotExist(err) {
    52  				logWatcher.Err <- err
    53  				break
    54  			}
    55  			continue
    56  		}
    57  		defer f.Close()
    58  		files = append(files, f)
    59  	}
    60  
    61  	latestFile, err := os.Open(pth)
    62  	if err != nil {
    63  		logWatcher.Err <- err
    64  		return
    65  	}
    66  	defer latestFile.Close()
    67  
    68  	files = append(files, latestFile)
    69  	tailer := ioutils.MultiReadSeeker(files...)
    70  
    71  	if config.Tail != 0 {
    72  		tailFile(tailer, logWatcher, config.Tail, config.Since)
    73  	}
    74  
    75  	if !config.Follow {
    76  		return
    77  	}
    78  
    79  	if config.Tail >= 0 {
    80  		latestFile.Seek(0, os.SEEK_END)
    81  	}
    82  
    83  	l.mu.Lock()
    84  	l.readers[logWatcher] = struct{}{}
    85  	l.mu.Unlock()
    86  
    87  	notifyRotate := l.writer.NotifyRotate()
    88  	followLogs(latestFile, logWatcher, notifyRotate, config.Since)
    89  
    90  	l.mu.Lock()
    91  	delete(l.readers, logWatcher)
    92  	l.mu.Unlock()
    93  
    94  	l.writer.NotifyRotateEvict(notifyRotate)
    95  }
    96  
    97  func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) {
    98  	var rdr io.Reader = f
    99  	if tail > 0 {
   100  		ls, err := tailfile.TailFile(f, tail)
   101  		if err != nil {
   102  			logWatcher.Err <- err
   103  			return
   104  		}
   105  		rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n")))
   106  	}
   107  	dec := json.NewDecoder(rdr)
   108  	l := &jsonlog.JSONLog{}
   109  	for {
   110  		msg, err := decodeLogLine(dec, l)
   111  		if err != nil {
   112  			if err != io.EOF {
   113  				logWatcher.Err <- err
   114  			}
   115  			return
   116  		}
   117  		if !since.IsZero() && msg.Timestamp.Before(since) {
   118  			continue
   119  		}
   120  		logWatcher.Msg <- msg
   121  	}
   122  }
   123  
   124  func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) {
   125  	dec := json.NewDecoder(f)
   126  	l := &jsonlog.JSONLog{}
   127  
   128  	fileWatcher, err := filenotify.New()
   129  	if err != nil {
   130  		logWatcher.Err <- err
   131  	}
   132  	defer fileWatcher.Close()
   133  
   134  	var retries int
   135  	for {
   136  		msg, err := decodeLogLine(dec, l)
   137  		if err != nil {
   138  			if err != io.EOF {
   139  				// try again because this shouldn't happen
   140  				if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry {
   141  					dec = json.NewDecoder(f)
   142  					retries++
   143  					continue
   144  				}
   145  
   146  				// io.ErrUnexpectedEOF is returned from json.Decoder when there is
   147  				// remaining data in the parser's buffer while an io.EOF occurs.
   148  				// If the json logger writes a partial json log entry to the disk
   149  				// while at the same time the decoder tries to decode it, the race condition happens.
   150  				if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry {
   151  					reader := io.MultiReader(dec.Buffered(), f)
   152  					dec = json.NewDecoder(reader)
   153  					retries++
   154  					continue
   155  				}
   156  				logWatcher.Err <- err
   157  				return
   158  			}
   159  
   160  			logrus.WithField("logger", "json-file").Debugf("waiting for events")
   161  			if err := fileWatcher.Add(f.Name()); err != nil {
   162  				logrus.WithField("logger", "json-file").Warn("falling back to file poller")
   163  				fileWatcher.Close()
   164  				fileWatcher = filenotify.NewPollingWatcher()
   165  				if err := fileWatcher.Add(f.Name()); err != nil {
   166  					logrus.Errorf("error watching log file for modifications: %v", err)
   167  					logWatcher.Err <- err
   168  				}
   169  			}
   170  			select {
   171  			case <-fileWatcher.Events():
   172  				dec = json.NewDecoder(f)
   173  				fileWatcher.Remove(f.Name())
   174  				continue
   175  			case <-fileWatcher.Errors():
   176  				fileWatcher.Remove(f.Name())
   177  				logWatcher.Err <- err
   178  				return
   179  			case <-logWatcher.WatchClose():
   180  				fileWatcher.Remove(f.Name())
   181  				return
   182  			case <-notifyRotate:
   183  				f, err = os.Open(f.Name())
   184  				if err != nil {
   185  					logWatcher.Err <- err
   186  					return
   187  				}
   188  
   189  				dec = json.NewDecoder(f)
   190  				fileWatcher.Remove(f.Name())
   191  				fileWatcher.Add(f.Name())
   192  				continue
   193  			}
   194  		}
   195  
   196  		retries = 0 // reset retries since we've succeeded
   197  		if !since.IsZero() && msg.Timestamp.Before(since) {
   198  			continue
   199  		}
   200  		select {
   201  		case logWatcher.Msg <- msg:
   202  		case <-logWatcher.WatchClose():
   203  			logWatcher.Msg <- msg
   204  			for {
   205  				msg, err := decodeLogLine(dec, l)
   206  				if err != nil {
   207  					return
   208  				}
   209  				if !since.IsZero() && msg.Timestamp.Before(since) {
   210  					continue
   211  				}
   212  				logWatcher.Msg <- msg
   213  			}
   214  		}
   215  	}
   216  }