github.com/LazyboyChen7/engine@v17.12.1-ce-rc2+incompatible/daemon/logger/copier.go (about)

     1  package logger
     2  
     3  import (
     4  	"bytes"
     5  	"io"
     6  	"sync"
     7  	"time"
     8  
     9  	"github.com/sirupsen/logrus"
    10  )
    11  
    12  const (
    13  	// readSize is the maximum bytes read during a single read
    14  	// operation.
    15  	readSize = 2 * 1024
    16  
    17  	// defaultBufSize provides a reasonable default for loggers that do
    18  	// not have an external limit to impose on log line size.
    19  	defaultBufSize = 16 * 1024
    20  )
    21  
    22  // Copier can copy logs from specified sources to Logger and attach Timestamp.
    23  // Writes are concurrent, so you need implement some sync in your logger.
    24  type Copier struct {
    25  	// srcs is map of name -> reader pairs, for example "stdout", "stderr"
    26  	srcs      map[string]io.Reader
    27  	dst       Logger
    28  	copyJobs  sync.WaitGroup
    29  	closeOnce sync.Once
    30  	closed    chan struct{}
    31  }
    32  
    33  // NewCopier creates a new Copier
    34  func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier {
    35  	return &Copier{
    36  		srcs:   srcs,
    37  		dst:    dst,
    38  		closed: make(chan struct{}),
    39  	}
    40  }
    41  
    42  // Run starts logs copying
    43  func (c *Copier) Run() {
    44  	for src, w := range c.srcs {
    45  		c.copyJobs.Add(1)
    46  		go c.copySrc(src, w)
    47  	}
    48  }
    49  
    50  func (c *Copier) copySrc(name string, src io.Reader) {
    51  	defer c.copyJobs.Done()
    52  
    53  	bufSize := defaultBufSize
    54  	if sizedLogger, ok := c.dst.(SizedLogger); ok {
    55  		bufSize = sizedLogger.BufSize()
    56  	}
    57  	buf := make([]byte, bufSize)
    58  
    59  	n := 0
    60  	eof := false
    61  
    62  	for {
    63  		select {
    64  		case <-c.closed:
    65  			return
    66  		default:
    67  			// Work out how much more data we are okay with reading this time.
    68  			upto := n + readSize
    69  			if upto > cap(buf) {
    70  				upto = cap(buf)
    71  			}
    72  			// Try to read that data.
    73  			if upto > n {
    74  				read, err := src.Read(buf[n:upto])
    75  				if err != nil {
    76  					if err != io.EOF {
    77  						logrus.Errorf("Error scanning log stream: %s", err)
    78  						return
    79  					}
    80  					eof = true
    81  				}
    82  				n += read
    83  			}
    84  			// If we have no data to log, and there's no more coming, we're done.
    85  			if n == 0 && eof {
    86  				return
    87  			}
    88  			// Break up the data that we've buffered up into lines, and log each in turn.
    89  			p := 0
    90  			for q := bytes.IndexByte(buf[p:n], '\n'); q >= 0; q = bytes.IndexByte(buf[p:n], '\n') {
    91  				select {
    92  				case <-c.closed:
    93  					return
    94  				default:
    95  					msg := NewMessage()
    96  					msg.Source = name
    97  					msg.Timestamp = time.Now().UTC()
    98  					msg.Line = append(msg.Line, buf[p:p+q]...)
    99  
   100  					if logErr := c.dst.Log(msg); logErr != nil {
   101  						logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr)
   102  					}
   103  				}
   104  				p += q + 1
   105  			}
   106  			// If there's no more coming, or the buffer is full but
   107  			// has no newlines, log whatever we haven't logged yet,
   108  			// noting that it's a partial log line.
   109  			if eof || (p == 0 && n == len(buf)) {
   110  				if p < n {
   111  					msg := NewMessage()
   112  					msg.Source = name
   113  					msg.Timestamp = time.Now().UTC()
   114  					msg.Line = append(msg.Line, buf[p:n]...)
   115  					msg.Partial = true
   116  
   117  					if logErr := c.dst.Log(msg); logErr != nil {
   118  						logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr)
   119  					}
   120  					p = 0
   121  					n = 0
   122  				}
   123  				if eof {
   124  					return
   125  				}
   126  			}
   127  			// Move any unlogged data to the front of the buffer in preparation for another read.
   128  			if p > 0 {
   129  				copy(buf[0:], buf[p:n])
   130  				n -= p
   131  			}
   132  		}
   133  	}
   134  }
   135  
   136  // Wait waits until all copying is done
   137  func (c *Copier) Wait() {
   138  	c.copyJobs.Wait()
   139  }
   140  
   141  // Close closes the copier
   142  func (c *Copier) Close() {
   143  	c.closeOnce.Do(func() {
   144  		close(c.closed)
   145  	})
   146  }