github.com/fabiokung/docker@v0.11.2-0.20170222101415-4534dcd49497/daemon/logger/copier.go (about)

     1  package logger
     2  
     3  import (
     4  	"bytes"
     5  	"io"
     6  	"sync"
     7  	"time"
     8  
     9  	"github.com/Sirupsen/logrus"
    10  )
    11  
    12  const (
    13  	bufSize  = 16 * 1024
    14  	readSize = 2 * 1024
    15  )
    16  
    17  // Copier can copy logs from specified sources to Logger and attach Timestamp.
    18  // Writes are concurrent, so you need implement some sync in your logger.
    19  type Copier struct {
    20  	// srcs is map of name -> reader pairs, for example "stdout", "stderr"
    21  	srcs      map[string]io.Reader
    22  	dst       Logger
    23  	copyJobs  sync.WaitGroup
    24  	closeOnce sync.Once
    25  	closed    chan struct{}
    26  }
    27  
    28  // NewCopier creates a new Copier
    29  func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier {
    30  	return &Copier{
    31  		srcs:   srcs,
    32  		dst:    dst,
    33  		closed: make(chan struct{}),
    34  	}
    35  }
    36  
    37  // Run starts logs copying
    38  func (c *Copier) Run() {
    39  	for src, w := range c.srcs {
    40  		c.copyJobs.Add(1)
    41  		go c.copySrc(src, w)
    42  	}
    43  }
    44  
    45  func (c *Copier) copySrc(name string, src io.Reader) {
    46  	defer c.copyJobs.Done()
    47  	buf := make([]byte, bufSize)
    48  	n := 0
    49  	eof := false
    50  
    51  	for {
    52  		select {
    53  		case <-c.closed:
    54  			return
    55  		default:
    56  			// Work out how much more data we are okay with reading this time.
    57  			upto := n + readSize
    58  			if upto > cap(buf) {
    59  				upto = cap(buf)
    60  			}
    61  			// Try to read that data.
    62  			if upto > n {
    63  				read, err := src.Read(buf[n:upto])
    64  				if err != nil {
    65  					if err != io.EOF {
    66  						logrus.Errorf("Error scanning log stream: %s", err)
    67  						return
    68  					}
    69  					eof = true
    70  				}
    71  				n += read
    72  			}
    73  			// If we have no data to log, and there's no more coming, we're done.
    74  			if n == 0 && eof {
    75  				return
    76  			}
    77  			// Break up the data that we've buffered up into lines, and log each in turn.
    78  			p := 0
    79  			for q := bytes.IndexByte(buf[p:n], '\n'); q >= 0; q = bytes.IndexByte(buf[p:n], '\n') {
    80  				select {
    81  				case <-c.closed:
    82  					return
    83  				default:
    84  					msg := NewMessage()
    85  					msg.Source = name
    86  					msg.Timestamp = time.Now().UTC()
    87  					msg.Line = append(msg.Line, buf[p:p+q]...)
    88  
    89  					if logErr := c.dst.Log(msg); logErr != nil {
    90  						logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr)
    91  					}
    92  				}
    93  				p += q + 1
    94  			}
    95  			// If there's no more coming, or the buffer is full but
    96  			// has no newlines, log whatever we haven't logged yet,
    97  			// noting that it's a partial log line.
    98  			if eof || (p == 0 && n == len(buf)) {
    99  				if p < n {
   100  					msg := NewMessage()
   101  					msg.Source = name
   102  					msg.Timestamp = time.Now().UTC()
   103  					msg.Line = append(msg.Line, buf[p:n]...)
   104  					msg.Partial = true
   105  
   106  					if logErr := c.dst.Log(msg); logErr != nil {
   107  						logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr)
   108  					}
   109  					p = 0
   110  					n = 0
   111  				}
   112  				if eof {
   113  					return
   114  				}
   115  			}
   116  			// Move any unlogged data to the front of the buffer in preparation for another read.
   117  			if p > 0 {
   118  				copy(buf[0:], buf[p:n])
   119  				n -= p
   120  			}
   121  		}
   122  	}
   123  }
   124  
   125  // Wait waits until all copying is done
   126  func (c *Copier) Wait() {
   127  	c.copyJobs.Wait()
   128  }
   129  
   130  // Close closes the copier
   131  func (c *Copier) Close() {
   132  	c.closeOnce.Do(func() {
   133  		close(c.closed)
   134  	})
   135  }