github.com/kaisenlinux/docker.io@v0.0.0-20230510090727-ea55db55fac7/engine/daemon/logger/jsonfilelog/read.go (about)

     1  package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog"
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"io"
     7  
     8  	"github.com/docker/docker/api/types/backend"
     9  	"github.com/docker/docker/daemon/logger"
    10  	"github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog"
    11  	"github.com/docker/docker/daemon/logger/loggerutils"
    12  	"github.com/docker/docker/pkg/tailfile"
    13  	"github.com/sirupsen/logrus"
    14  )
    15  
    16  const maxJSONDecodeRetry = 20000
    17  
    18  // ReadLogs implements the logger's LogReader interface for the logs
    19  // created by this driver.
    20  func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
    21  	logWatcher := logger.NewLogWatcher()
    22  
    23  	go l.readLogs(logWatcher, config)
    24  	return logWatcher
    25  }
    26  
    27  func (l *JSONFileLogger) readLogs(watcher *logger.LogWatcher, config logger.ReadConfig) {
    28  	defer close(watcher.Msg)
    29  
    30  	l.mu.Lock()
    31  	l.readers[watcher] = struct{}{}
    32  	l.mu.Unlock()
    33  
    34  	l.writer.ReadLogs(config, watcher)
    35  
    36  	l.mu.Lock()
    37  	delete(l.readers, watcher)
    38  	l.mu.Unlock()
    39  }
    40  
    41  func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) {
    42  	l.Reset()
    43  	if err := dec.Decode(l); err != nil {
    44  		return nil, err
    45  	}
    46  
    47  	var attrs []backend.LogAttr
    48  	if len(l.Attrs) != 0 {
    49  		attrs = make([]backend.LogAttr, 0, len(l.Attrs))
    50  		for k, v := range l.Attrs {
    51  			attrs = append(attrs, backend.LogAttr{Key: k, Value: v})
    52  		}
    53  	}
    54  	msg := &logger.Message{
    55  		Source:    l.Stream,
    56  		Timestamp: l.Created,
    57  		Line:      []byte(l.Log),
    58  		Attrs:     attrs,
    59  	}
    60  	return msg, nil
    61  }
    62  
    63  type decoder struct {
    64  	rdr      io.Reader
    65  	dec      *json.Decoder
    66  	jl       *jsonlog.JSONLog
    67  	maxRetry int
    68  }
    69  
    70  func (d *decoder) Reset(rdr io.Reader) {
    71  	d.rdr = rdr
    72  	d.dec = nil
    73  	if d.jl != nil {
    74  		d.jl.Reset()
    75  	}
    76  }
    77  
    78  func (d *decoder) Close() {
    79  	d.dec = nil
    80  	d.rdr = nil
    81  	d.jl = nil
    82  }
    83  
    84  func (d *decoder) Decode() (msg *logger.Message, err error) {
    85  	if d.dec == nil {
    86  		d.dec = json.NewDecoder(d.rdr)
    87  	}
    88  	if d.jl == nil {
    89  		d.jl = &jsonlog.JSONLog{}
    90  	}
    91  	if d.maxRetry == 0 {
    92  		// We aren't using maxJSONDecodeRetry directly so we can give a custom value for testing.
    93  		d.maxRetry = maxJSONDecodeRetry
    94  	}
    95  	for retries := 0; retries < d.maxRetry; retries++ {
    96  		msg, err = decodeLogLine(d.dec, d.jl)
    97  		if err == nil || err == io.EOF {
    98  			break
    99  		}
   100  
   101  		logrus.WithError(err).WithField("retries", retries).Warn("got error while decoding json")
   102  		// try again, could be due to a an incomplete json object as we read
   103  		if _, ok := err.(*json.SyntaxError); ok {
   104  			d.dec = json.NewDecoder(d.rdr)
   105  			continue
   106  		}
   107  
   108  		// io.ErrUnexpectedEOF is returned from json.Decoder when there is
   109  		// remaining data in the parser's buffer while an io.EOF occurs.
   110  		// If the json logger writes a partial json log entry to the disk
   111  		// while at the same time the decoder tries to decode it, the race condition happens.
   112  		if err == io.ErrUnexpectedEOF {
   113  			d.rdr = combineReaders(d.dec.Buffered(), d.rdr)
   114  			d.dec = json.NewDecoder(d.rdr)
   115  			continue
   116  		}
   117  	}
   118  	return msg, err
   119  }
   120  
   121  func combineReaders(pre, rdr io.Reader) io.Reader {
   122  	return &combinedReader{pre: pre, rdr: rdr}
   123  }
   124  
   125  // combinedReader is a reader which is like `io.MultiReader` where except it does not cache a full EOF.
   126  // Once `io.MultiReader` returns EOF, it is always EOF.
   127  //
   128  // For this usecase we have an underlying reader which is a file which may reach EOF but have more data written to it later.
   129  // As such, io.MultiReader does not work for us.
   130  type combinedReader struct {
   131  	pre io.Reader
   132  	rdr io.Reader
   133  }
   134  
   135  func (r *combinedReader) Read(p []byte) (int, error) {
   136  	var read int
   137  	if r.pre != nil {
   138  		n, err := r.pre.Read(p)
   139  		if err != nil {
   140  			if err != io.EOF {
   141  				return n, err
   142  			}
   143  			r.pre = nil
   144  		}
   145  		read = n
   146  	}
   147  
   148  	if read < len(p) {
   149  		n, err := r.rdr.Read(p[read:])
   150  		if n > 0 {
   151  			read += n
   152  		}
   153  		if err != nil {
   154  			return read, err
   155  		}
   156  	}
   157  
   158  	return read, nil
   159  }
   160  
   161  // decodeFunc is used to create a decoder for the log file reader
   162  func decodeFunc(rdr io.Reader) loggerutils.Decoder {
   163  	return &decoder{
   164  		rdr: rdr,
   165  		dec: nil,
   166  		jl:  nil,
   167  	}
   168  }
   169  
   170  func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io.Reader, int, error) {
   171  	return tailfile.NewTailReader(ctx, r, req)
   172  }