github.com/moby/docker@v26.1.3+incompatible/daemon/logger/copier.go (about)

     1  package logger // import "github.com/docker/docker/daemon/logger"
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"io"
     7  	"sync"
     8  	"time"
     9  
    10  	"github.com/containerd/log"
    11  	types "github.com/docker/docker/api/types/backend"
    12  	"github.com/docker/docker/pkg/stringid"
    13  )
    14  
    15  const (
    16  	// readSize is the maximum bytes read during a single read
    17  	// operation.
    18  	readSize = 2 * 1024
    19  
    20  	// defaultBufSize provides a reasonable default for loggers that do
    21  	// not have an external limit to impose on log line size.
    22  	defaultBufSize = 16 * 1024
    23  )
    24  
    25  // Copier can copy logs from specified sources to Logger and attach Timestamp.
    26  // Writes are concurrent, so you need implement some sync in your logger.
    27  type Copier struct {
    28  	// srcs is map of name -> reader pairs, for example "stdout", "stderr"
    29  	srcs      map[string]io.Reader
    30  	dst       Logger
    31  	copyJobs  sync.WaitGroup
    32  	closeOnce sync.Once
    33  	closed    chan struct{}
    34  }
    35  
    36  // NewCopier creates a new Copier
    37  func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier {
    38  	return &Copier{
    39  		srcs:   srcs,
    40  		dst:    dst,
    41  		closed: make(chan struct{}),
    42  	}
    43  }
    44  
    45  // Run starts logs copying
    46  func (c *Copier) Run() {
    47  	for src, w := range c.srcs {
    48  		c.copyJobs.Add(1)
    49  		go c.copySrc(src, w)
    50  	}
    51  }
    52  
    53  func (c *Copier) copySrc(name string, src io.Reader) {
    54  	defer c.copyJobs.Done()
    55  
    56  	bufSize := defaultBufSize
    57  	if sizedLogger, ok := c.dst.(SizedLogger); ok {
    58  		size := sizedLogger.BufSize()
    59  		// Loggers that wrap another loggers would have BufSize(), but cannot return the size
    60  		// when the wrapped loggers doesn't have BufSize().
    61  		if size > 0 {
    62  			bufSize = size
    63  		}
    64  	}
    65  	buf := make([]byte, bufSize)
    66  
    67  	n := 0
    68  	eof := false
    69  	var partialid string
    70  	var partialTS time.Time
    71  	var ordinal int
    72  	firstPartial := true
    73  	hasMorePartial := false
    74  
    75  	for {
    76  		select {
    77  		case <-c.closed:
    78  			return
    79  		default:
    80  			// Work out how much more data we are okay with reading this time.
    81  			upto := n + readSize
    82  			if upto > cap(buf) {
    83  				upto = cap(buf)
    84  			}
    85  			// Try to read that data.
    86  			if upto > n {
    87  				read, err := src.Read(buf[n:upto])
    88  				if err != nil {
    89  					if err != io.EOF {
    90  						logReadsFailedCount.Inc(1)
    91  						log.G(context.TODO()).Errorf("Error scanning log stream: %s", err)
    92  						return
    93  					}
    94  					eof = true
    95  				}
    96  				n += read
    97  			}
    98  			// If we have no data to log, and there's no more coming, we're done.
    99  			if n == 0 && eof {
   100  				return
   101  			}
   102  			// Break up the data that we've buffered up into lines, and log each in turn.
   103  			p := 0
   104  
   105  			for q := bytes.IndexByte(buf[p:n], '\n'); q >= 0; q = bytes.IndexByte(buf[p:n], '\n') {
   106  				select {
   107  				case <-c.closed:
   108  					return
   109  				default:
   110  					msg := NewMessage()
   111  					msg.Source = name
   112  					msg.Line = append(msg.Line, buf[p:p+q]...)
   113  
   114  					if hasMorePartial {
   115  						msg.PLogMetaData = &types.PartialLogMetaData{ID: partialid, Ordinal: ordinal, Last: true}
   116  
   117  						// reset
   118  						partialid = ""
   119  						ordinal = 0
   120  						firstPartial = true
   121  						hasMorePartial = false
   122  					}
   123  					if msg.PLogMetaData == nil {
   124  						msg.Timestamp = time.Now().UTC()
   125  					} else {
   126  						msg.Timestamp = partialTS
   127  					}
   128  
   129  					if logErr := c.dst.Log(msg); logErr != nil {
   130  						logDriverError(c.dst.Name(), string(msg.Line), logErr)
   131  					}
   132  				}
   133  				p += q + 1
   134  			}
   135  			// If there's no more coming, or the buffer is full but
   136  			// has no newlines, log whatever we haven't logged yet,
   137  			// noting that it's a partial log line.
   138  			if eof || (p == 0 && n == len(buf)) {
   139  				if p < n {
   140  					msg := NewMessage()
   141  					msg.Source = name
   142  					msg.Line = append(msg.Line, buf[p:n]...)
   143  
   144  					// Generate unique partialID for first partial. Use it across partials.
   145  					// Record timestamp for first partial. Use it across partials.
   146  					// Initialize Ordinal for first partial. Increment it across partials.
   147  					if firstPartial {
   148  						msg.Timestamp = time.Now().UTC()
   149  						partialTS = msg.Timestamp
   150  						partialid = stringid.GenerateRandomID()
   151  						ordinal = 1
   152  						firstPartial = false
   153  						totalPartialLogs.Inc(1)
   154  					} else {
   155  						msg.Timestamp = partialTS
   156  					}
   157  					msg.PLogMetaData = &types.PartialLogMetaData{ID: partialid, Ordinal: ordinal, Last: false}
   158  					ordinal++
   159  					hasMorePartial = true
   160  
   161  					if logErr := c.dst.Log(msg); logErr != nil {
   162  						logDriverError(c.dst.Name(), string(msg.Line), logErr)
   163  					}
   164  					p = 0
   165  					n = 0
   166  				}
   167  				if eof {
   168  					return
   169  				}
   170  			}
   171  			// Move any unlogged data to the front of the buffer in preparation for another read.
   172  			if p > 0 {
   173  				copy(buf[0:], buf[p:n])
   174  				n -= p
   175  			}
   176  		}
   177  	}
   178  }
   179  
   180  // Wait waits until all copying is done
   181  func (c *Copier) Wait() {
   182  	c.copyJobs.Wait()
   183  }
   184  
   185  // Close closes the copier
   186  func (c *Copier) Close() {
   187  	c.closeOnce.Do(func() {
   188  		close(c.closed)
   189  	})
   190  }