github.com/LazyboyChen7/engine@v17.12.1-ce-rc2+incompatible/daemon/logger/awslogs/cloudwatchlogs.go (about)

     1  // Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs
     2  package awslogs
     3  
     4  import (
     5  	"fmt"
     6  	"os"
     7  	"regexp"
     8  	"runtime"
     9  	"sort"
    10  	"strconv"
    11  	"strings"
    12  	"sync"
    13  	"time"
    14  
    15  	"github.com/aws/aws-sdk-go/aws"
    16  	"github.com/aws/aws-sdk-go/aws/awserr"
    17  	"github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
    18  	"github.com/aws/aws-sdk-go/aws/ec2metadata"
    19  	"github.com/aws/aws-sdk-go/aws/request"
    20  	"github.com/aws/aws-sdk-go/aws/session"
    21  	"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
    22  	"github.com/docker/docker/daemon/logger"
    23  	"github.com/docker/docker/daemon/logger/loggerutils"
    24  	"github.com/docker/docker/dockerversion"
    25  	"github.com/pkg/errors"
    26  	"github.com/sirupsen/logrus"
    27  )
    28  
    29  const (
    30  	name                   = "awslogs"
    31  	regionKey              = "awslogs-region"
    32  	regionEnvKey           = "AWS_REGION"
    33  	logGroupKey            = "awslogs-group"
    34  	logStreamKey           = "awslogs-stream"
    35  	logCreateGroupKey      = "awslogs-create-group"
    36  	tagKey                 = "tag"
    37  	datetimeFormatKey      = "awslogs-datetime-format"
    38  	multilinePatternKey    = "awslogs-multiline-pattern"
    39  	credentialsEndpointKey = "awslogs-credentials-endpoint"
    40  	batchPublishFrequency  = 5 * time.Second
    41  
    42  	// See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html
    43  	perEventBytes          = 26
    44  	maximumBytesPerPut     = 1048576
    45  	maximumLogEventsPerPut = 10000
    46  
    47  	// See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html
    48  	maximumBytesPerEvent = 262144 - perEventBytes
    49  
    50  	resourceAlreadyExistsCode = "ResourceAlreadyExistsException"
    51  	dataAlreadyAcceptedCode   = "DataAlreadyAcceptedException"
    52  	invalidSequenceTokenCode  = "InvalidSequenceTokenException"
    53  	resourceNotFoundCode      = "ResourceNotFoundException"
    54  
    55  	credentialsEndpoint = "http://169.254.170.2"
    56  
    57  	userAgentHeader = "User-Agent"
    58  )
    59  
    60  type logStream struct {
    61  	logStreamName    string
    62  	logGroupName     string
    63  	logCreateGroup   bool
    64  	multilinePattern *regexp.Regexp
    65  	client           api
    66  	messages         chan *logger.Message
    67  	lock             sync.RWMutex
    68  	closed           bool
    69  	sequenceToken    *string
    70  }
    71  
    72  type api interface {
    73  	CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error)
    74  	CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error)
    75  	PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error)
    76  }
    77  
    78  type regionFinder interface {
    79  	Region() (string, error)
    80  }
    81  
    82  type wrappedEvent struct {
    83  	inputLogEvent *cloudwatchlogs.InputLogEvent
    84  	insertOrder   int
    85  }
    86  type byTimestamp []wrappedEvent
    87  
    88  // init registers the awslogs driver
    89  func init() {
    90  	if err := logger.RegisterLogDriver(name, New); err != nil {
    91  		logrus.Fatal(err)
    92  	}
    93  	if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil {
    94  		logrus.Fatal(err)
    95  	}
    96  }
    97  
    98  // eventBatch holds the events that are batched for submission and the
    99  // associated data about it.
   100  //
   101  // Warning: this type is not threadsafe and must not be used
   102  // concurrently. This type is expected to be consumed in a single go
   103  // routine and never concurrently.
   104  type eventBatch struct {
   105  	batch []wrappedEvent
   106  	bytes int
   107  }
   108  
   109  // New creates an awslogs logger using the configuration passed in on the
   110  // context.  Supported context configuration variables are awslogs-region,
   111  // awslogs-group, awslogs-stream, awslogs-create-group, awslogs-multiline-pattern
   112  // and awslogs-datetime-format.  When available, configuration is
   113  // also taken from environment variables AWS_REGION, AWS_ACCESS_KEY_ID,
   114  // AWS_SECRET_ACCESS_KEY, the shared credentials file (~/.aws/credentials), and
   115  // the EC2 Instance Metadata Service.
   116  func New(info logger.Info) (logger.Logger, error) {
   117  	logGroupName := info.Config[logGroupKey]
   118  	logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}")
   119  	if err != nil {
   120  		return nil, err
   121  	}
   122  	logCreateGroup := false
   123  	if info.Config[logCreateGroupKey] != "" {
   124  		logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey])
   125  		if err != nil {
   126  			return nil, err
   127  		}
   128  	}
   129  
   130  	if info.Config[logStreamKey] != "" {
   131  		logStreamName = info.Config[logStreamKey]
   132  	}
   133  
   134  	multilinePattern, err := parseMultilineOptions(info)
   135  	if err != nil {
   136  		return nil, err
   137  	}
   138  
   139  	client, err := newAWSLogsClient(info)
   140  	if err != nil {
   141  		return nil, err
   142  	}
   143  	containerStream := &logStream{
   144  		logStreamName:    logStreamName,
   145  		logGroupName:     logGroupName,
   146  		logCreateGroup:   logCreateGroup,
   147  		multilinePattern: multilinePattern,
   148  		client:           client,
   149  		messages:         make(chan *logger.Message, 4096),
   150  	}
   151  	err = containerStream.create()
   152  	if err != nil {
   153  		return nil, err
   154  	}
   155  	go containerStream.collectBatch()
   156  
   157  	return containerStream, nil
   158  }
   159  
   160  // Parses awslogs-multiline-pattern and awslogs-datetime-format options
   161  // If awslogs-datetime-format is present, convert the format from strftime
   162  // to regexp and return.
   163  // If awslogs-multiline-pattern is present, compile regexp and return
   164  func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) {
   165  	dateTimeFormat := info.Config[datetimeFormatKey]
   166  	multilinePatternKey := info.Config[multilinePatternKey]
   167  	// strftime input is parsed into a regular expression
   168  	if dateTimeFormat != "" {
   169  		// %. matches each strftime format sequence and ReplaceAllStringFunc
   170  		// looks up each format sequence in the conversion table strftimeToRegex
   171  		// to replace with a defined regular expression
   172  		r := regexp.MustCompile("%.")
   173  		multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string {
   174  			return strftimeToRegex[s]
   175  		})
   176  	}
   177  	if multilinePatternKey != "" {
   178  		multilinePattern, err := regexp.Compile(multilinePatternKey)
   179  		if err != nil {
   180  			return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey)
   181  		}
   182  		return multilinePattern, nil
   183  	}
   184  	return nil, nil
   185  }
   186  
   187  // Maps strftime format strings to regex
   188  var strftimeToRegex = map[string]string{
   189  	/*weekdayShort          */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`,
   190  	/*weekdayFull           */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`,
   191  	/*weekdayZeroIndex      */ `%w`: `[0-6]`,
   192  	/*dayZeroPadded         */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`,
   193  	/*monthShort            */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`,
   194  	/*monthFull             */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`,
   195  	/*monthZeroPadded       */ `%m`: `(?:0[1-9]|1[0-2])`,
   196  	/*yearCentury           */ `%Y`: `\d{4}`,
   197  	/*yearZeroPadded        */ `%y`: `\d{2}`,
   198  	/*hour24ZeroPadded      */ `%H`: `(?:[0,1][0-9]|2[0-3])`,
   199  	/*hour12ZeroPadded      */ `%I`: `(?:0[0-9]|1[0-2])`,
   200  	/*AM or PM              */ `%p`: "[A,P]M",
   201  	/*minuteZeroPadded      */ `%M`: `[0-5][0-9]`,
   202  	/*secondZeroPadded      */ `%S`: `[0-5][0-9]`,
   203  	/*microsecondZeroPadded */ `%f`: `\d{6}`,
   204  	/*utcOffset             */ `%z`: `[+-]\d{4}`,
   205  	/*tzName                */ `%Z`: `[A-Z]{1,4}T`,
   206  	/*dayOfYearZeroPadded   */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`,
   207  	/*milliseconds          */ `%L`: `\.\d{3}`,
   208  }
   209  
   210  // newRegionFinder is a variable such that the implementation
   211  // can be swapped out for unit tests.
   212  var newRegionFinder = func() regionFinder {
   213  	return ec2metadata.New(session.New())
   214  }
   215  
   216  // newSDKEndpoint is a variable such that the implementation
   217  // can be swapped out for unit tests.
   218  var newSDKEndpoint = credentialsEndpoint
   219  
   220  // newAWSLogsClient creates the service client for Amazon CloudWatch Logs.
   221  // Customizations to the default client from the SDK include a Docker-specific
   222  // User-Agent string and automatic region detection using the EC2 Instance
   223  // Metadata Service when region is otherwise unspecified.
   224  func newAWSLogsClient(info logger.Info) (api, error) {
   225  	var region *string
   226  	if os.Getenv(regionEnvKey) != "" {
   227  		region = aws.String(os.Getenv(regionEnvKey))
   228  	}
   229  	if info.Config[regionKey] != "" {
   230  		region = aws.String(info.Config[regionKey])
   231  	}
   232  	if region == nil || *region == "" {
   233  		logrus.Info("Trying to get region from EC2 Metadata")
   234  		ec2MetadataClient := newRegionFinder()
   235  		r, err := ec2MetadataClient.Region()
   236  		if err != nil {
   237  			logrus.WithFields(logrus.Fields{
   238  				"error": err,
   239  			}).Error("Could not get region from EC2 metadata, environment, or log option")
   240  			return nil, errors.New("Cannot determine region for awslogs driver")
   241  		}
   242  		region = &r
   243  	}
   244  
   245  	sess, err := session.NewSession()
   246  	if err != nil {
   247  		return nil, errors.New("Failed to create a service client session for for awslogs driver")
   248  	}
   249  
   250  	// attach region to cloudwatchlogs config
   251  	sess.Config.Region = region
   252  
   253  	if uri, ok := info.Config[credentialsEndpointKey]; ok {
   254  		logrus.Debugf("Trying to get credentials from awslogs-credentials-endpoint")
   255  
   256  		endpoint := fmt.Sprintf("%s%s", newSDKEndpoint, uri)
   257  		creds := endpointcreds.NewCredentialsClient(*sess.Config, sess.Handlers, endpoint,
   258  			func(p *endpointcreds.Provider) {
   259  				p.ExpiryWindow = 5 * time.Minute
   260  			})
   261  
   262  		// attach credentials to cloudwatchlogs config
   263  		sess.Config.Credentials = creds
   264  	}
   265  
   266  	logrus.WithFields(logrus.Fields{
   267  		"region": *region,
   268  	}).Debug("Created awslogs client")
   269  
   270  	client := cloudwatchlogs.New(sess)
   271  
   272  	client.Handlers.Build.PushBackNamed(request.NamedHandler{
   273  		Name: "DockerUserAgentHandler",
   274  		Fn: func(r *request.Request) {
   275  			currentAgent := r.HTTPRequest.Header.Get(userAgentHeader)
   276  			r.HTTPRequest.Header.Set(userAgentHeader,
   277  				fmt.Sprintf("Docker %s (%s) %s",
   278  					dockerversion.Version, runtime.GOOS, currentAgent))
   279  		},
   280  	})
   281  	return client, nil
   282  }
   283  
   284  // Name returns the name of the awslogs logging driver
   285  func (l *logStream) Name() string {
   286  	return name
   287  }
   288  
   289  func (l *logStream) BufSize() int {
   290  	return maximumBytesPerEvent
   291  }
   292  
   293  // Log submits messages for logging by an instance of the awslogs logging driver
   294  func (l *logStream) Log(msg *logger.Message) error {
   295  	l.lock.RLock()
   296  	defer l.lock.RUnlock()
   297  	if !l.closed {
   298  		l.messages <- msg
   299  	}
   300  	return nil
   301  }
   302  
   303  // Close closes the instance of the awslogs logging driver
   304  func (l *logStream) Close() error {
   305  	l.lock.Lock()
   306  	defer l.lock.Unlock()
   307  	if !l.closed {
   308  		close(l.messages)
   309  	}
   310  	l.closed = true
   311  	return nil
   312  }
   313  
   314  // create creates log group and log stream for the instance of the awslogs logging driver
   315  func (l *logStream) create() error {
   316  	if err := l.createLogStream(); err != nil {
   317  		if l.logCreateGroup {
   318  			if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode {
   319  				if err := l.createLogGroup(); err != nil {
   320  					return err
   321  				}
   322  				return l.createLogStream()
   323  			}
   324  		}
   325  		return err
   326  	}
   327  
   328  	return nil
   329  }
   330  
   331  // createLogGroup creates a log group for the instance of the awslogs logging driver
   332  func (l *logStream) createLogGroup() error {
   333  	if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{
   334  		LogGroupName: aws.String(l.logGroupName),
   335  	}); err != nil {
   336  		if awsErr, ok := err.(awserr.Error); ok {
   337  			fields := logrus.Fields{
   338  				"errorCode":      awsErr.Code(),
   339  				"message":        awsErr.Message(),
   340  				"origError":      awsErr.OrigErr(),
   341  				"logGroupName":   l.logGroupName,
   342  				"logCreateGroup": l.logCreateGroup,
   343  			}
   344  			if awsErr.Code() == resourceAlreadyExistsCode {
   345  				// Allow creation to succeed
   346  				logrus.WithFields(fields).Info("Log group already exists")
   347  				return nil
   348  			}
   349  			logrus.WithFields(fields).Error("Failed to create log group")
   350  		}
   351  		return err
   352  	}
   353  	return nil
   354  }
   355  
   356  // createLogStream creates a log stream for the instance of the awslogs logging driver
   357  func (l *logStream) createLogStream() error {
   358  	input := &cloudwatchlogs.CreateLogStreamInput{
   359  		LogGroupName:  aws.String(l.logGroupName),
   360  		LogStreamName: aws.String(l.logStreamName),
   361  	}
   362  
   363  	_, err := l.client.CreateLogStream(input)
   364  
   365  	if err != nil {
   366  		if awsErr, ok := err.(awserr.Error); ok {
   367  			fields := logrus.Fields{
   368  				"errorCode":     awsErr.Code(),
   369  				"message":       awsErr.Message(),
   370  				"origError":     awsErr.OrigErr(),
   371  				"logGroupName":  l.logGroupName,
   372  				"logStreamName": l.logStreamName,
   373  			}
   374  			if awsErr.Code() == resourceAlreadyExistsCode {
   375  				// Allow creation to succeed
   376  				logrus.WithFields(fields).Info("Log stream already exists")
   377  				return nil
   378  			}
   379  			logrus.WithFields(fields).Error("Failed to create log stream")
   380  		}
   381  	}
   382  	return err
   383  }
   384  
   385  // newTicker is used for time-based batching.  newTicker is a variable such
   386  // that the implementation can be swapped out for unit tests.
   387  var newTicker = func(freq time.Duration) *time.Ticker {
   388  	return time.NewTicker(freq)
   389  }
   390  
   391  // collectBatch executes as a goroutine to perform batching of log events for
   392  // submission to the log stream.  If the awslogs-multiline-pattern or
   393  // awslogs-datetime-format options have been configured, multiline processing
   394  // is enabled, where log messages are stored in an event buffer until a multiline
   395  // pattern match is found, at which point the messages in the event buffer are
   396  // pushed to CloudWatch logs as a single log event.  Multiline messages are processed
   397  // according to the maximumBytesPerPut constraint, and the implementation only
   398  // allows for messages to be buffered for a maximum of 2*batchPublishFrequency
   399  // seconds.  When events are ready to be processed for submission to CloudWatch
   400  // Logs, the processEvents method is called.  If a multiline pattern is not
   401  // configured, log events are submitted to the processEvents method immediately.
   402  func (l *logStream) collectBatch() {
   403  	ticker := newTicker(batchPublishFrequency)
   404  	var eventBuffer []byte
   405  	var eventBufferTimestamp int64
   406  	var batch = newEventBatch()
   407  	for {
   408  		select {
   409  		case t := <-ticker.C:
   410  			// If event buffer is older than batch publish frequency flush the event buffer
   411  			if eventBufferTimestamp > 0 && len(eventBuffer) > 0 {
   412  				eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp
   413  				eventBufferExpired := eventBufferAge > int64(batchPublishFrequency)/int64(time.Millisecond)
   414  				eventBufferNegative := eventBufferAge < 0
   415  				if eventBufferExpired || eventBufferNegative {
   416  					l.processEvent(batch, eventBuffer, eventBufferTimestamp)
   417  					eventBuffer = eventBuffer[:0]
   418  				}
   419  			}
   420  			l.publishBatch(batch)
   421  			batch.reset()
   422  		case msg, more := <-l.messages:
   423  			if !more {
   424  				// Flush event buffer and release resources
   425  				l.processEvent(batch, eventBuffer, eventBufferTimestamp)
   426  				eventBuffer = eventBuffer[:0]
   427  				l.publishBatch(batch)
   428  				batch.reset()
   429  				return
   430  			}
   431  			if eventBufferTimestamp == 0 {
   432  				eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond)
   433  			}
   434  			unprocessedLine := msg.Line
   435  			if l.multilinePattern != nil {
   436  				if l.multilinePattern.Match(unprocessedLine) || len(eventBuffer)+len(unprocessedLine) > maximumBytesPerEvent {
   437  					// This is a new log event or we will exceed max bytes per event
   438  					// so flush the current eventBuffer to events and reset timestamp
   439  					l.processEvent(batch, eventBuffer, eventBufferTimestamp)
   440  					eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond)
   441  					eventBuffer = eventBuffer[:0]
   442  				}
   443  				// Append new line
   444  				processedLine := append(unprocessedLine, "\n"...)
   445  				eventBuffer = append(eventBuffer, processedLine...)
   446  				logger.PutMessage(msg)
   447  			} else {
   448  				l.processEvent(batch, unprocessedLine, msg.Timestamp.UnixNano()/int64(time.Millisecond))
   449  				logger.PutMessage(msg)
   450  			}
   451  		}
   452  	}
   453  }
   454  
   455  // processEvent processes log events that are ready for submission to CloudWatch
   456  // logs.  Batching is performed on time- and size-bases.  Time-based batching
   457  // occurs at a 5 second interval (defined in the batchPublishFrequency const).
   458  // Size-based batching is performed on the maximum number of events per batch
   459  // (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a
   460  // batch (defined in maximumBytesPerPut).  Log messages are split by the maximum
   461  // bytes per event (defined in maximumBytesPerEvent).  There is a fixed per-event
   462  // byte overhead (defined in perEventBytes) which is accounted for in split- and
   463  // batch-calculations.
   464  func (l *logStream) processEvent(batch *eventBatch, unprocessedLine []byte, timestamp int64) {
   465  	for len(unprocessedLine) > 0 {
   466  		// Split line length so it does not exceed the maximum
   467  		lineBytes := len(unprocessedLine)
   468  		if lineBytes > maximumBytesPerEvent {
   469  			lineBytes = maximumBytesPerEvent
   470  		}
   471  		line := unprocessedLine[:lineBytes]
   472  
   473  		event := wrappedEvent{
   474  			inputLogEvent: &cloudwatchlogs.InputLogEvent{
   475  				Message:   aws.String(string(line)),
   476  				Timestamp: aws.Int64(timestamp),
   477  			},
   478  			insertOrder: batch.count(),
   479  		}
   480  
   481  		added := batch.add(event, lineBytes)
   482  		if added {
   483  			unprocessedLine = unprocessedLine[lineBytes:]
   484  		} else {
   485  			l.publishBatch(batch)
   486  			batch.reset()
   487  		}
   488  	}
   489  }
   490  
   491  // publishBatch calls PutLogEvents for a given set of InputLogEvents,
   492  // accounting for sequencing requirements (each request must reference the
   493  // sequence token returned by the previous request).
   494  func (l *logStream) publishBatch(batch *eventBatch) {
   495  	if batch.isEmpty() {
   496  		return
   497  	}
   498  	cwEvents := unwrapEvents(batch.events())
   499  
   500  	nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken)
   501  
   502  	if err != nil {
   503  		if awsErr, ok := err.(awserr.Error); ok {
   504  			if awsErr.Code() == dataAlreadyAcceptedCode {
   505  				// already submitted, just grab the correct sequence token
   506  				parts := strings.Split(awsErr.Message(), " ")
   507  				nextSequenceToken = &parts[len(parts)-1]
   508  				logrus.WithFields(logrus.Fields{
   509  					"errorCode":     awsErr.Code(),
   510  					"message":       awsErr.Message(),
   511  					"logGroupName":  l.logGroupName,
   512  					"logStreamName": l.logStreamName,
   513  				}).Info("Data already accepted, ignoring error")
   514  				err = nil
   515  			} else if awsErr.Code() == invalidSequenceTokenCode {
   516  				// sequence code is bad, grab the correct one and retry
   517  				parts := strings.Split(awsErr.Message(), " ")
   518  				token := parts[len(parts)-1]
   519  				nextSequenceToken, err = l.putLogEvents(cwEvents, &token)
   520  			}
   521  		}
   522  	}
   523  	if err != nil {
   524  		logrus.Error(err)
   525  	} else {
   526  		l.sequenceToken = nextSequenceToken
   527  	}
   528  }
   529  
   530  // putLogEvents wraps the PutLogEvents API
   531  func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) {
   532  	input := &cloudwatchlogs.PutLogEventsInput{
   533  		LogEvents:     events,
   534  		SequenceToken: sequenceToken,
   535  		LogGroupName:  aws.String(l.logGroupName),
   536  		LogStreamName: aws.String(l.logStreamName),
   537  	}
   538  	resp, err := l.client.PutLogEvents(input)
   539  	if err != nil {
   540  		if awsErr, ok := err.(awserr.Error); ok {
   541  			logrus.WithFields(logrus.Fields{
   542  				"errorCode":     awsErr.Code(),
   543  				"message":       awsErr.Message(),
   544  				"origError":     awsErr.OrigErr(),
   545  				"logGroupName":  l.logGroupName,
   546  				"logStreamName": l.logStreamName,
   547  			}).Error("Failed to put log events")
   548  		}
   549  		return nil, err
   550  	}
   551  	return resp.NextSequenceToken, nil
   552  }
   553  
   554  // ValidateLogOpt looks for awslogs-specific log options awslogs-region,
   555  // awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format,
   556  // awslogs-multiline-pattern
   557  func ValidateLogOpt(cfg map[string]string) error {
   558  	for key := range cfg {
   559  		switch key {
   560  		case logGroupKey:
   561  		case logStreamKey:
   562  		case logCreateGroupKey:
   563  		case regionKey:
   564  		case tagKey:
   565  		case datetimeFormatKey:
   566  		case multilinePatternKey:
   567  		case credentialsEndpointKey:
   568  		default:
   569  			return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name)
   570  		}
   571  	}
   572  	if cfg[logGroupKey] == "" {
   573  		return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey)
   574  	}
   575  	if cfg[logCreateGroupKey] != "" {
   576  		if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil {
   577  			return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err)
   578  		}
   579  	}
   580  	_, datetimeFormatKeyExists := cfg[datetimeFormatKey]
   581  	_, multilinePatternKeyExists := cfg[multilinePatternKey]
   582  	if datetimeFormatKeyExists && multilinePatternKeyExists {
   583  		return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey)
   584  	}
   585  	return nil
   586  }
   587  
   588  // Len returns the length of a byTimestamp slice.  Len is required by the
   589  // sort.Interface interface.
   590  func (slice byTimestamp) Len() int {
   591  	return len(slice)
   592  }
   593  
   594  // Less compares two values in a byTimestamp slice by Timestamp.  Less is
   595  // required by the sort.Interface interface.
   596  func (slice byTimestamp) Less(i, j int) bool {
   597  	iTimestamp, jTimestamp := int64(0), int64(0)
   598  	if slice != nil && slice[i].inputLogEvent.Timestamp != nil {
   599  		iTimestamp = *slice[i].inputLogEvent.Timestamp
   600  	}
   601  	if slice != nil && slice[j].inputLogEvent.Timestamp != nil {
   602  		jTimestamp = *slice[j].inputLogEvent.Timestamp
   603  	}
   604  	if iTimestamp == jTimestamp {
   605  		return slice[i].insertOrder < slice[j].insertOrder
   606  	}
   607  	return iTimestamp < jTimestamp
   608  }
   609  
   610  // Swap swaps two values in a byTimestamp slice with each other.  Swap is
   611  // required by the sort.Interface interface.
   612  func (slice byTimestamp) Swap(i, j int) {
   613  	slice[i], slice[j] = slice[j], slice[i]
   614  }
   615  
   616  func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent {
   617  	cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events))
   618  	for i, input := range events {
   619  		cwEvents[i] = input.inputLogEvent
   620  	}
   621  	return cwEvents
   622  }
   623  
   624  func newEventBatch() *eventBatch {
   625  	return &eventBatch{
   626  		batch: make([]wrappedEvent, 0),
   627  		bytes: 0,
   628  	}
   629  }
   630  
   631  // events returns a slice of wrappedEvents sorted in order of their
   632  // timestamps and then by their insertion order (see `byTimestamp`).
   633  //
   634  // Warning: this method is not threadsafe and must not be used
   635  // concurrently.
   636  func (b *eventBatch) events() []wrappedEvent {
   637  	sort.Sort(byTimestamp(b.batch))
   638  	return b.batch
   639  }
   640  
   641  // add adds an event to the batch of events accounting for the
   642  // necessary overhead for an event to be logged. An error will be
   643  // returned if the event cannot be added to the batch due to service
   644  // limits.
   645  //
   646  // Warning: this method is not threadsafe and must not be used
   647  // concurrently.
   648  func (b *eventBatch) add(event wrappedEvent, size int) bool {
   649  	addBytes := size + perEventBytes
   650  
   651  	// verify we are still within service limits
   652  	switch {
   653  	case len(b.batch)+1 > maximumLogEventsPerPut:
   654  		return false
   655  	case b.bytes+addBytes > maximumBytesPerPut:
   656  		return false
   657  	}
   658  
   659  	b.bytes += addBytes
   660  	b.batch = append(b.batch, event)
   661  
   662  	return true
   663  }
   664  
   665  // count is the number of batched events.  Warning: this method
   666  // is not threadsafe and must not be used concurrently.
   667  func (b *eventBatch) count() int {
   668  	return len(b.batch)
   669  }
   670  
   671  // size is the total number of bytes that the batch represents.
   672  //
   673  // Warning: this method is not threadsafe and must not be used
   674  // concurrently.
   675  func (b *eventBatch) size() int {
   676  	return b.bytes
   677  }
   678  
   679  func (b *eventBatch) isEmpty() bool {
   680  	zeroEvents := b.count() == 0
   681  	zeroSize := b.size() == 0
   682  	return zeroEvents && zeroSize
   683  }
   684  
   685  // reset prepares the batch for reuse.
   686  func (b *eventBatch) reset() {
   687  	b.bytes = 0
   688  	b.batch = b.batch[:0]
   689  }