github.com/rumpl/bof@v23.0.0-rc.2+incompatible/daemon/logger/awslogs/cloudwatchlogs.go (about)

     1  // Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs
     2  package awslogs // import "github.com/docker/docker/daemon/logger/awslogs"
     3  
     4  import (
     5  	"fmt"
     6  	"os"
     7  	"regexp"
     8  	"runtime"
     9  	"sort"
    10  	"strconv"
    11  	"strings"
    12  	"sync"
    13  	"time"
    14  	"unicode/utf8"
    15  
    16  	"github.com/aws/aws-sdk-go/aws"
    17  	"github.com/aws/aws-sdk-go/aws/awserr"
    18  	"github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
    19  	"github.com/aws/aws-sdk-go/aws/ec2metadata"
    20  	"github.com/aws/aws-sdk-go/aws/request"
    21  	"github.com/aws/aws-sdk-go/aws/session"
    22  	"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
    23  	"github.com/docker/docker/daemon/logger"
    24  	"github.com/docker/docker/daemon/logger/loggerutils"
    25  	"github.com/docker/docker/dockerversion"
    26  	"github.com/pkg/errors"
    27  	"github.com/sirupsen/logrus"
    28  )
    29  
    30  const (
    31  	name                   = "awslogs"
    32  	regionKey              = "awslogs-region"
    33  	endpointKey            = "awslogs-endpoint"
    34  	regionEnvKey           = "AWS_REGION"
    35  	logGroupKey            = "awslogs-group"
    36  	logStreamKey           = "awslogs-stream"
    37  	logCreateGroupKey      = "awslogs-create-group"
    38  	logCreateStreamKey     = "awslogs-create-stream"
    39  	tagKey                 = "tag"
    40  	datetimeFormatKey      = "awslogs-datetime-format"
    41  	multilinePatternKey    = "awslogs-multiline-pattern"
    42  	credentialsEndpointKey = "awslogs-credentials-endpoint" //nolint:gosec // G101: Potential hardcoded credentials
    43  	forceFlushIntervalKey  = "awslogs-force-flush-interval-seconds"
    44  	maxBufferedEventsKey   = "awslogs-max-buffered-events"
    45  	logFormatKey           = "awslogs-format"
    46  
    47  	defaultForceFlushInterval = 5 * time.Second
    48  	defaultMaxBufferedEvents  = 4096
    49  
    50  	// See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html
    51  	perEventBytes          = 26
    52  	maximumBytesPerPut     = 1048576
    53  	maximumLogEventsPerPut = 10000
    54  
    55  	// See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html
    56  	// Because the events are interpreted as UTF-8 encoded Unicode, invalid UTF-8 byte sequences are replaced with the
    57  	// Unicode replacement character (U+FFFD), which is a 3-byte sequence in UTF-8.  To compensate for that and to avoid
    58  	// splitting valid UTF-8 characters into invalid byte sequences, we calculate the length of each event assuming that
    59  	// this replacement happens.
    60  	maximumBytesPerEvent = 262144 - perEventBytes
    61  
    62  	resourceAlreadyExistsCode = "ResourceAlreadyExistsException"
    63  	dataAlreadyAcceptedCode   = "DataAlreadyAcceptedException"
    64  	invalidSequenceTokenCode  = "InvalidSequenceTokenException"
    65  	resourceNotFoundCode      = "ResourceNotFoundException"
    66  
    67  	credentialsEndpoint = "http://169.254.170.2" //nolint:gosec // G101: Potential hardcoded credentials
    68  
    69  	userAgentHeader = "User-Agent"
    70  
    71  	// See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html
    72  	logsFormatHeader = "x-amzn-logs-format"
    73  	jsonEmfLogFormat = "json/emf"
    74  )
    75  
    76  type logStream struct {
    77  	logStreamName      string
    78  	logGroupName       string
    79  	logCreateGroup     bool
    80  	logCreateStream    bool
    81  	logNonBlocking     bool
    82  	forceFlushInterval time.Duration
    83  	multilinePattern   *regexp.Regexp
    84  	client             api
    85  	messages           chan *logger.Message
    86  	lock               sync.RWMutex
    87  	closed             bool
    88  	sequenceToken      *string
    89  }
    90  
    91  type logStreamConfig struct {
    92  	logStreamName      string
    93  	logGroupName       string
    94  	logCreateGroup     bool
    95  	logCreateStream    bool
    96  	logNonBlocking     bool
    97  	forceFlushInterval time.Duration
    98  	maxBufferedEvents  int
    99  	multilinePattern   *regexp.Regexp
   100  }
   101  
   102  var _ logger.SizedLogger = &logStream{}
   103  
   104  type api interface {
   105  	CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error)
   106  	CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error)
   107  	PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error)
   108  }
   109  
   110  type regionFinder interface {
   111  	Region() (string, error)
   112  }
   113  
   114  type wrappedEvent struct {
   115  	inputLogEvent *cloudwatchlogs.InputLogEvent
   116  	insertOrder   int
   117  }
   118  type byTimestamp []wrappedEvent
   119  
   120  // init registers the awslogs driver
   121  func init() {
   122  	if err := logger.RegisterLogDriver(name, New); err != nil {
   123  		panic(err)
   124  	}
   125  	if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil {
   126  		panic(err)
   127  	}
   128  }
   129  
   130  // eventBatch holds the events that are batched for submission and the
   131  // associated data about it.
   132  //
   133  // Warning: this type is not threadsafe and must not be used
   134  // concurrently. This type is expected to be consumed in a single go
   135  // routine and never concurrently.
   136  type eventBatch struct {
   137  	batch []wrappedEvent
   138  	bytes int
   139  }
   140  
   141  // New creates an awslogs logger using the configuration passed in on the
   142  // context.  Supported context configuration variables are awslogs-region,
   143  // awslogs-endpoint, awslogs-group, awslogs-stream, awslogs-create-group,
   144  // awslogs-multiline-pattern and awslogs-datetime-format.
   145  // When available, configuration is also taken from environment variables
   146  // AWS_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, the shared credentials
   147  // file (~/.aws/credentials), and the EC2 Instance Metadata Service.
   148  func New(info logger.Info) (logger.Logger, error) {
   149  	containerStreamConfig, err := newStreamConfig(info)
   150  	if err != nil {
   151  		return nil, err
   152  	}
   153  	client, err := newAWSLogsClient(info)
   154  	if err != nil {
   155  		return nil, err
   156  	}
   157  
   158  	containerStream := &logStream{
   159  		logStreamName:      containerStreamConfig.logStreamName,
   160  		logGroupName:       containerStreamConfig.logGroupName,
   161  		logCreateGroup:     containerStreamConfig.logCreateGroup,
   162  		logCreateStream:    containerStreamConfig.logCreateStream,
   163  		logNonBlocking:     containerStreamConfig.logNonBlocking,
   164  		forceFlushInterval: containerStreamConfig.forceFlushInterval,
   165  		multilinePattern:   containerStreamConfig.multilinePattern,
   166  		client:             client,
   167  		messages:           make(chan *logger.Message, containerStreamConfig.maxBufferedEvents),
   168  	}
   169  
   170  	creationDone := make(chan bool)
   171  	if containerStream.logNonBlocking {
   172  		go func() {
   173  			backoff := 1
   174  			maxBackoff := 32
   175  			for {
   176  				// If logger is closed we are done
   177  				containerStream.lock.RLock()
   178  				if containerStream.closed {
   179  					containerStream.lock.RUnlock()
   180  					break
   181  				}
   182  				containerStream.lock.RUnlock()
   183  				err := containerStream.create()
   184  				if err == nil {
   185  					break
   186  				}
   187  
   188  				time.Sleep(time.Duration(backoff) * time.Second)
   189  				if backoff < maxBackoff {
   190  					backoff *= 2
   191  				}
   192  				logrus.
   193  					WithError(err).
   194  					WithField("container-id", info.ContainerID).
   195  					WithField("container-name", info.ContainerName).
   196  					Error("Error while trying to initialize awslogs. Retrying in: ", backoff, " seconds")
   197  			}
   198  			close(creationDone)
   199  		}()
   200  	} else {
   201  		if err = containerStream.create(); err != nil {
   202  			return nil, err
   203  		}
   204  		close(creationDone)
   205  	}
   206  	go containerStream.collectBatch(creationDone)
   207  
   208  	return containerStream, nil
   209  }
   210  
   211  // Parses most of the awslogs- options and prepares a config object to be used for newing the actual stream
   212  // It has been formed out to ease Utest of the New above
   213  func newStreamConfig(info logger.Info) (*logStreamConfig, error) {
   214  	logGroupName := info.Config[logGroupKey]
   215  	logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}")
   216  	if err != nil {
   217  		return nil, err
   218  	}
   219  	logCreateGroup := false
   220  	if info.Config[logCreateGroupKey] != "" {
   221  		logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey])
   222  		if err != nil {
   223  			return nil, err
   224  		}
   225  	}
   226  
   227  	logNonBlocking := info.Config["mode"] == "non-blocking"
   228  
   229  	forceFlushInterval := defaultForceFlushInterval
   230  	if info.Config[forceFlushIntervalKey] != "" {
   231  		forceFlushIntervalAsInt, err := strconv.Atoi(info.Config[forceFlushIntervalKey])
   232  		if err != nil {
   233  			return nil, err
   234  		}
   235  		forceFlushInterval = time.Duration(forceFlushIntervalAsInt) * time.Second
   236  	}
   237  
   238  	maxBufferedEvents := int(defaultMaxBufferedEvents)
   239  	if info.Config[maxBufferedEventsKey] != "" {
   240  		maxBufferedEvents, err = strconv.Atoi(info.Config[maxBufferedEventsKey])
   241  		if err != nil {
   242  			return nil, err
   243  		}
   244  	}
   245  
   246  	if info.Config[logStreamKey] != "" {
   247  		logStreamName = info.Config[logStreamKey]
   248  	}
   249  	logCreateStream := true
   250  	if info.Config[logCreateStreamKey] != "" {
   251  		logCreateStream, err = strconv.ParseBool(info.Config[logCreateStreamKey])
   252  		if err != nil {
   253  			return nil, err
   254  		}
   255  	}
   256  
   257  	multilinePattern, err := parseMultilineOptions(info)
   258  	if err != nil {
   259  		return nil, err
   260  	}
   261  
   262  	containerStreamConfig := &logStreamConfig{
   263  		logStreamName:      logStreamName,
   264  		logGroupName:       logGroupName,
   265  		logCreateGroup:     logCreateGroup,
   266  		logCreateStream:    logCreateStream,
   267  		logNonBlocking:     logNonBlocking,
   268  		forceFlushInterval: forceFlushInterval,
   269  		maxBufferedEvents:  maxBufferedEvents,
   270  		multilinePattern:   multilinePattern,
   271  	}
   272  
   273  	return containerStreamConfig, nil
   274  }
   275  
   276  // Parses awslogs-multiline-pattern and awslogs-datetime-format options
   277  // If awslogs-datetime-format is present, convert the format from strftime
   278  // to regexp and return.
   279  // If awslogs-multiline-pattern is present, compile regexp and return
   280  func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) {
   281  	dateTimeFormat := info.Config[datetimeFormatKey]
   282  	multilinePatternKey := info.Config[multilinePatternKey]
   283  	// strftime input is parsed into a regular expression
   284  	if dateTimeFormat != "" {
   285  		// %. matches each strftime format sequence and ReplaceAllStringFunc
   286  		// looks up each format sequence in the conversion table strftimeToRegex
   287  		// to replace with a defined regular expression
   288  		r := regexp.MustCompile("%.")
   289  		multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string {
   290  			return strftimeToRegex[s]
   291  		})
   292  	}
   293  	if multilinePatternKey != "" {
   294  		multilinePattern, err := regexp.Compile(multilinePatternKey)
   295  		if err != nil {
   296  			return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey)
   297  		}
   298  		return multilinePattern, nil
   299  	}
   300  	return nil, nil
   301  }
   302  
   303  // Maps strftime format strings to regex
   304  var strftimeToRegex = map[string]string{
   305  	/*weekdayShort          */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`,
   306  	/*weekdayFull           */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`,
   307  	/*weekdayZeroIndex      */ `%w`: `[0-6]`,
   308  	/*dayZeroPadded         */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`,
   309  	/*monthShort            */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`,
   310  	/*monthFull             */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`,
   311  	/*monthZeroPadded       */ `%m`: `(?:0[1-9]|1[0-2])`,
   312  	/*yearCentury           */ `%Y`: `\d{4}`,
   313  	/*yearZeroPadded        */ `%y`: `\d{2}`,
   314  	/*hour24ZeroPadded      */ `%H`: `(?:[0,1][0-9]|2[0-3])`,
   315  	/*hour12ZeroPadded      */ `%I`: `(?:0[0-9]|1[0-2])`,
   316  	/*AM or PM              */ `%p`: "[A,P]M",
   317  	/*minuteZeroPadded      */ `%M`: `[0-5][0-9]`,
   318  	/*secondZeroPadded      */ `%S`: `[0-5][0-9]`,
   319  	/*microsecondZeroPadded */ `%f`: `\d{6}`,
   320  	/*utcOffset             */ `%z`: `[+-]\d{4}`,
   321  	/*tzName                */ `%Z`: `[A-Z]{1,4}T`,
   322  	/*dayOfYearZeroPadded   */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`,
   323  	/*milliseconds          */ `%L`: `\.\d{3}`,
   324  }
   325  
   326  // newRegionFinder is a variable such that the implementation
   327  // can be swapped out for unit tests.
   328  var newRegionFinder = func() (regionFinder, error) {
   329  	s, err := session.NewSession()
   330  	if err != nil {
   331  		return nil, err
   332  	}
   333  	return ec2metadata.New(s), nil
   334  }
   335  
   336  // newSDKEndpoint is a variable such that the implementation
   337  // can be swapped out for unit tests.
   338  var newSDKEndpoint = credentialsEndpoint
   339  
   340  // newAWSLogsClient creates the service client for Amazon CloudWatch Logs.
   341  // Customizations to the default client from the SDK include a Docker-specific
   342  // User-Agent string and automatic region detection using the EC2 Instance
   343  // Metadata Service when region is otherwise unspecified.
   344  func newAWSLogsClient(info logger.Info) (api, error) {
   345  	var region, endpoint *string
   346  	if os.Getenv(regionEnvKey) != "" {
   347  		region = aws.String(os.Getenv(regionEnvKey))
   348  	}
   349  	if info.Config[regionKey] != "" {
   350  		region = aws.String(info.Config[regionKey])
   351  	}
   352  	if info.Config[endpointKey] != "" {
   353  		endpoint = aws.String(info.Config[endpointKey])
   354  	}
   355  	if region == nil || *region == "" {
   356  		logrus.Info("Trying to get region from EC2 Metadata")
   357  		ec2MetadataClient, err := newRegionFinder()
   358  		if err != nil {
   359  			logrus.WithError(err).Error("could not create EC2 metadata client")
   360  			return nil, errors.Wrap(err, "could not create EC2 metadata client")
   361  		}
   362  
   363  		r, err := ec2MetadataClient.Region()
   364  		if err != nil {
   365  			logrus.WithError(err).Error("Could not get region from EC2 metadata, environment, or log option")
   366  			return nil, errors.New("Cannot determine region for awslogs driver")
   367  		}
   368  		region = &r
   369  	}
   370  
   371  	sess, err := session.NewSession()
   372  	if err != nil {
   373  		return nil, errors.New("Failed to create a service client session for awslogs driver")
   374  	}
   375  
   376  	// attach region to cloudwatchlogs config
   377  	sess.Config.Region = region
   378  
   379  	// attach endpoint to cloudwatchlogs config
   380  	if endpoint != nil {
   381  		sess.Config.Endpoint = endpoint
   382  	}
   383  
   384  	if uri, ok := info.Config[credentialsEndpointKey]; ok {
   385  		logrus.Debugf("Trying to get credentials from awslogs-credentials-endpoint")
   386  
   387  		endpoint := fmt.Sprintf("%s%s", newSDKEndpoint, uri)
   388  		creds := endpointcreds.NewCredentialsClient(*sess.Config, sess.Handlers, endpoint,
   389  			func(p *endpointcreds.Provider) {
   390  				p.ExpiryWindow = 5 * time.Minute
   391  			})
   392  
   393  		// attach credentials to cloudwatchlogs config
   394  		sess.Config.Credentials = creds
   395  	}
   396  
   397  	logrus.WithFields(logrus.Fields{
   398  		"region": *region,
   399  	}).Debug("Created awslogs client")
   400  
   401  	client := cloudwatchlogs.New(sess)
   402  
   403  	client.Handlers.Build.PushBackNamed(request.NamedHandler{
   404  		Name: "DockerUserAgentHandler",
   405  		Fn: func(r *request.Request) {
   406  			currentAgent := r.HTTPRequest.Header.Get(userAgentHeader)
   407  			r.HTTPRequest.Header.Set(userAgentHeader,
   408  				fmt.Sprintf("Docker %s (%s) %s",
   409  					dockerversion.Version, runtime.GOOS, currentAgent))
   410  		},
   411  	})
   412  
   413  	if info.Config[logFormatKey] != "" {
   414  		client.Handlers.Build.PushBackNamed(request.NamedHandler{
   415  			Name: "LogFormatHeaderHandler",
   416  			Fn: func(req *request.Request) {
   417  				req.HTTPRequest.Header.Set(logsFormatHeader, info.Config[logFormatKey])
   418  			},
   419  		})
   420  	}
   421  
   422  	return client, nil
   423  }
   424  
   425  // Name returns the name of the awslogs logging driver
   426  func (l *logStream) Name() string {
   427  	return name
   428  }
   429  
   430  // BufSize returns the maximum bytes CloudWatch can handle.
   431  func (l *logStream) BufSize() int {
   432  	return maximumBytesPerEvent
   433  }
   434  
   435  // Log submits messages for logging by an instance of the awslogs logging driver
   436  func (l *logStream) Log(msg *logger.Message) error {
   437  	l.lock.RLock()
   438  	defer l.lock.RUnlock()
   439  	if l.closed {
   440  		return errors.New("awslogs is closed")
   441  	}
   442  	if l.logNonBlocking {
   443  		select {
   444  		case l.messages <- msg:
   445  			return nil
   446  		default:
   447  			return errors.New("awslogs buffer is full")
   448  		}
   449  	}
   450  	l.messages <- msg
   451  	return nil
   452  }
   453  
   454  // Close closes the instance of the awslogs logging driver
   455  func (l *logStream) Close() error {
   456  	l.lock.Lock()
   457  	defer l.lock.Unlock()
   458  	if !l.closed {
   459  		close(l.messages)
   460  	}
   461  	l.closed = true
   462  	return nil
   463  }
   464  
   465  // create creates log group and log stream for the instance of the awslogs logging driver
   466  func (l *logStream) create() error {
   467  	err := l.createLogStream()
   468  	if err == nil {
   469  		return nil
   470  	}
   471  	if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode && l.logCreateGroup {
   472  		if err := l.createLogGroup(); err != nil {
   473  			return errors.Wrap(err, "failed to create Cloudwatch log group")
   474  		}
   475  		err = l.createLogStream()
   476  		if err == nil {
   477  			return nil
   478  		}
   479  	}
   480  	return errors.Wrap(err, "failed to create Cloudwatch log stream")
   481  }
   482  
   483  // createLogGroup creates a log group for the instance of the awslogs logging driver
   484  func (l *logStream) createLogGroup() error {
   485  	if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{
   486  		LogGroupName: aws.String(l.logGroupName),
   487  	}); err != nil {
   488  		if awsErr, ok := err.(awserr.Error); ok {
   489  			fields := logrus.Fields{
   490  				"errorCode":      awsErr.Code(),
   491  				"message":        awsErr.Message(),
   492  				"origError":      awsErr.OrigErr(),
   493  				"logGroupName":   l.logGroupName,
   494  				"logCreateGroup": l.logCreateGroup,
   495  			}
   496  			if awsErr.Code() == resourceAlreadyExistsCode {
   497  				// Allow creation to succeed
   498  				logrus.WithFields(fields).Info("Log group already exists")
   499  				return nil
   500  			}
   501  			logrus.WithFields(fields).Error("Failed to create log group")
   502  		}
   503  		return err
   504  	}
   505  	return nil
   506  }
   507  
   508  // createLogStream creates a log stream for the instance of the awslogs logging driver
   509  func (l *logStream) createLogStream() error {
   510  	// Directly return if we do not want to create log stream.
   511  	if !l.logCreateStream {
   512  		logrus.WithFields(logrus.Fields{
   513  			"logGroupName":    l.logGroupName,
   514  			"logStreamName":   l.logStreamName,
   515  			"logCreateStream": l.logCreateStream,
   516  		}).Info("Skipping creating log stream")
   517  		return nil
   518  	}
   519  
   520  	input := &cloudwatchlogs.CreateLogStreamInput{
   521  		LogGroupName:  aws.String(l.logGroupName),
   522  		LogStreamName: aws.String(l.logStreamName),
   523  	}
   524  
   525  	_, err := l.client.CreateLogStream(input)
   526  
   527  	if err != nil {
   528  		if awsErr, ok := err.(awserr.Error); ok {
   529  			fields := logrus.Fields{
   530  				"errorCode":     awsErr.Code(),
   531  				"message":       awsErr.Message(),
   532  				"origError":     awsErr.OrigErr(),
   533  				"logGroupName":  l.logGroupName,
   534  				"logStreamName": l.logStreamName,
   535  			}
   536  			if awsErr.Code() == resourceAlreadyExistsCode {
   537  				// Allow creation to succeed
   538  				logrus.WithFields(fields).Info("Log stream already exists")
   539  				return nil
   540  			}
   541  			logrus.WithFields(fields).Error("Failed to create log stream")
   542  		}
   543  	}
   544  	return err
   545  }
   546  
   547  // newTicker is used for time-based batching.  newTicker is a variable such
   548  // that the implementation can be swapped out for unit tests.
   549  var newTicker = func(freq time.Duration) *time.Ticker {
   550  	return time.NewTicker(freq)
   551  }
   552  
   553  // collectBatch executes as a goroutine to perform batching of log events for
   554  // submission to the log stream.  If the awslogs-multiline-pattern or
   555  // awslogs-datetime-format options have been configured, multiline processing
   556  // is enabled, where log messages are stored in an event buffer until a multiline
   557  // pattern match is found, at which point the messages in the event buffer are
   558  // pushed to CloudWatch logs as a single log event.  Multiline messages are processed
   559  // according to the maximumBytesPerPut constraint, and the implementation only
   560  // allows for messages to be buffered for a maximum of 2*batchPublishFrequency
   561  // seconds.  When events are ready to be processed for submission to CloudWatch
   562  // Logs, the processEvents method is called.  If a multiline pattern is not
   563  // configured, log events are submitted to the processEvents method immediately.
   564  func (l *logStream) collectBatch(created chan bool) {
   565  	// Wait for the logstream/group to be created
   566  	<-created
   567  	flushInterval := l.forceFlushInterval
   568  	if flushInterval <= 0 {
   569  		flushInterval = defaultForceFlushInterval
   570  	}
   571  	ticker := newTicker(flushInterval)
   572  	var eventBuffer []byte
   573  	var eventBufferTimestamp int64
   574  	var batch = newEventBatch()
   575  	for {
   576  		select {
   577  		case t := <-ticker.C:
   578  			// If event buffer is older than batch publish frequency flush the event buffer
   579  			if eventBufferTimestamp > 0 && len(eventBuffer) > 0 {
   580  				eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp
   581  				eventBufferExpired := eventBufferAge >= int64(flushInterval)/int64(time.Millisecond)
   582  				eventBufferNegative := eventBufferAge < 0
   583  				if eventBufferExpired || eventBufferNegative {
   584  					l.processEvent(batch, eventBuffer, eventBufferTimestamp)
   585  					eventBuffer = eventBuffer[:0]
   586  				}
   587  			}
   588  			l.publishBatch(batch)
   589  			batch.reset()
   590  		case msg, more := <-l.messages:
   591  			if !more {
   592  				// Flush event buffer and release resources
   593  				l.processEvent(batch, eventBuffer, eventBufferTimestamp)
   594  				l.publishBatch(batch)
   595  				batch.reset()
   596  				return
   597  			}
   598  			if eventBufferTimestamp == 0 {
   599  				eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond)
   600  			}
   601  			line := msg.Line
   602  			if l.multilinePattern != nil {
   603  				lineEffectiveLen := effectiveLen(string(line))
   604  				if l.multilinePattern.Match(line) || effectiveLen(string(eventBuffer))+lineEffectiveLen > maximumBytesPerEvent {
   605  					// This is a new log event or we will exceed max bytes per event
   606  					// so flush the current eventBuffer to events and reset timestamp
   607  					l.processEvent(batch, eventBuffer, eventBufferTimestamp)
   608  					eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond)
   609  					eventBuffer = eventBuffer[:0]
   610  				}
   611  				// Append newline if event is less than max event size
   612  				if lineEffectiveLen < maximumBytesPerEvent {
   613  					line = append(line, "\n"...)
   614  				}
   615  				eventBuffer = append(eventBuffer, line...)
   616  				logger.PutMessage(msg)
   617  			} else {
   618  				l.processEvent(batch, line, msg.Timestamp.UnixNano()/int64(time.Millisecond))
   619  				logger.PutMessage(msg)
   620  			}
   621  		}
   622  	}
   623  }
   624  
   625  // processEvent processes log events that are ready for submission to CloudWatch
   626  // logs.  Batching is performed on time- and size-bases.  Time-based batching
   627  // occurs at a 5 second interval (defined in the batchPublishFrequency const).
   628  // Size-based batching is performed on the maximum number of events per batch
   629  // (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a
   630  // batch (defined in maximumBytesPerPut).  Log messages are split by the maximum
   631  // bytes per event (defined in maximumBytesPerEvent).  There is a fixed per-event
   632  // byte overhead (defined in perEventBytes) which is accounted for in split- and
   633  // batch-calculations.  Because the events are interpreted as UTF-8 encoded
   634  // Unicode, invalid UTF-8 byte sequences are replaced with the Unicode
   635  // replacement character (U+FFFD), which is a 3-byte sequence in UTF-8.  To
   636  // compensate for that and to avoid splitting valid UTF-8 characters into
   637  // invalid byte sequences, we calculate the length of each event assuming that
   638  // this replacement happens.
   639  func (l *logStream) processEvent(batch *eventBatch, bytes []byte, timestamp int64) {
   640  	for len(bytes) > 0 {
   641  		// Split line length so it does not exceed the maximum
   642  		splitOffset, lineBytes := findValidSplit(string(bytes), maximumBytesPerEvent)
   643  		line := bytes[:splitOffset]
   644  		event := wrappedEvent{
   645  			inputLogEvent: &cloudwatchlogs.InputLogEvent{
   646  				Message:   aws.String(string(line)),
   647  				Timestamp: aws.Int64(timestamp),
   648  			},
   649  			insertOrder: batch.count(),
   650  		}
   651  
   652  		added := batch.add(event, lineBytes)
   653  		if added {
   654  			bytes = bytes[splitOffset:]
   655  		} else {
   656  			l.publishBatch(batch)
   657  			batch.reset()
   658  		}
   659  	}
   660  }
   661  
   662  // effectiveLen counts the effective number of bytes in the string, after
   663  // UTF-8 normalization.  UTF-8 normalization includes replacing bytes that do
   664  // not constitute valid UTF-8 encoded Unicode codepoints with the Unicode
   665  // replacement codepoint U+FFFD (a 3-byte UTF-8 sequence, represented in Go as
   666  // utf8.RuneError)
   667  func effectiveLen(line string) int {
   668  	effectiveBytes := 0
   669  	for _, rune := range line {
   670  		effectiveBytes += utf8.RuneLen(rune)
   671  	}
   672  	return effectiveBytes
   673  }
   674  
   675  // findValidSplit finds the byte offset to split a string without breaking valid
   676  // Unicode codepoints given a maximum number of total bytes.  findValidSplit
   677  // returns the byte offset for splitting a string or []byte, as well as the
   678  // effective number of bytes if the string were normalized to replace invalid
   679  // UTF-8 encoded bytes with the Unicode replacement character (a 3-byte UTF-8
   680  // sequence, represented in Go as utf8.RuneError)
   681  func findValidSplit(line string, maxBytes int) (splitOffset, effectiveBytes int) {
   682  	for offset, rune := range line {
   683  		splitOffset = offset
   684  		if effectiveBytes+utf8.RuneLen(rune) > maxBytes {
   685  			return splitOffset, effectiveBytes
   686  		}
   687  		effectiveBytes += utf8.RuneLen(rune)
   688  	}
   689  	splitOffset = len(line)
   690  	return
   691  }
   692  
   693  // publishBatch calls PutLogEvents for a given set of InputLogEvents,
   694  // accounting for sequencing requirements (each request must reference the
   695  // sequence token returned by the previous request).
   696  func (l *logStream) publishBatch(batch *eventBatch) {
   697  	if batch.isEmpty() {
   698  		return
   699  	}
   700  	cwEvents := unwrapEvents(batch.events())
   701  
   702  	nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken)
   703  
   704  	if err != nil {
   705  		if awsErr, ok := err.(awserr.Error); ok {
   706  			if awsErr.Code() == dataAlreadyAcceptedCode {
   707  				// already submitted, just grab the correct sequence token
   708  				parts := strings.Split(awsErr.Message(), " ")
   709  				nextSequenceToken = &parts[len(parts)-1]
   710  				logrus.WithFields(logrus.Fields{
   711  					"errorCode":     awsErr.Code(),
   712  					"message":       awsErr.Message(),
   713  					"logGroupName":  l.logGroupName,
   714  					"logStreamName": l.logStreamName,
   715  				}).Info("Data already accepted, ignoring error")
   716  				err = nil
   717  			} else if awsErr.Code() == invalidSequenceTokenCode {
   718  				// sequence code is bad, grab the correct one and retry
   719  				parts := strings.Split(awsErr.Message(), " ")
   720  				token := parts[len(parts)-1]
   721  				nextSequenceToken, err = l.putLogEvents(cwEvents, &token)
   722  			}
   723  		}
   724  	}
   725  	if err != nil {
   726  		logrus.Error(err)
   727  	} else {
   728  		l.sequenceToken = nextSequenceToken
   729  	}
   730  }
   731  
   732  // putLogEvents wraps the PutLogEvents API
   733  func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) {
   734  	input := &cloudwatchlogs.PutLogEventsInput{
   735  		LogEvents:     events,
   736  		SequenceToken: sequenceToken,
   737  		LogGroupName:  aws.String(l.logGroupName),
   738  		LogStreamName: aws.String(l.logStreamName),
   739  	}
   740  	resp, err := l.client.PutLogEvents(input)
   741  	if err != nil {
   742  		if awsErr, ok := err.(awserr.Error); ok {
   743  			logrus.WithFields(logrus.Fields{
   744  				"errorCode":     awsErr.Code(),
   745  				"message":       awsErr.Message(),
   746  				"origError":     awsErr.OrigErr(),
   747  				"logGroupName":  l.logGroupName,
   748  				"logStreamName": l.logStreamName,
   749  			}).Error("Failed to put log events")
   750  		}
   751  		return nil, err
   752  	}
   753  	return resp.NextSequenceToken, nil
   754  }
   755  
   756  // ValidateLogOpt looks for awslogs-specific log options awslogs-region, awslogs-endpoint
   757  // awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format,
   758  // awslogs-multiline-pattern
   759  func ValidateLogOpt(cfg map[string]string) error {
   760  	for key := range cfg {
   761  		switch key {
   762  		case logGroupKey:
   763  		case logStreamKey:
   764  		case logCreateGroupKey:
   765  		case regionKey:
   766  		case endpointKey:
   767  		case tagKey:
   768  		case datetimeFormatKey:
   769  		case multilinePatternKey:
   770  		case credentialsEndpointKey:
   771  		case forceFlushIntervalKey:
   772  		case maxBufferedEventsKey:
   773  		case logFormatKey:
   774  		default:
   775  			return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name)
   776  		}
   777  	}
   778  	if cfg[logGroupKey] == "" {
   779  		return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey)
   780  	}
   781  	if cfg[logCreateGroupKey] != "" {
   782  		if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil {
   783  			return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err)
   784  		}
   785  	}
   786  	if cfg[forceFlushIntervalKey] != "" {
   787  		if value, err := strconv.Atoi(cfg[forceFlushIntervalKey]); err != nil || value <= 0 {
   788  			return fmt.Errorf("must specify a positive integer for log opt '%s': %v", forceFlushIntervalKey, cfg[forceFlushIntervalKey])
   789  		}
   790  	}
   791  	if cfg[maxBufferedEventsKey] != "" {
   792  		if value, err := strconv.Atoi(cfg[maxBufferedEventsKey]); err != nil || value <= 0 {
   793  			return fmt.Errorf("must specify a positive integer for log opt '%s': %v", maxBufferedEventsKey, cfg[maxBufferedEventsKey])
   794  		}
   795  	}
   796  	_, datetimeFormatKeyExists := cfg[datetimeFormatKey]
   797  	_, multilinePatternKeyExists := cfg[multilinePatternKey]
   798  	if datetimeFormatKeyExists && multilinePatternKeyExists {
   799  		return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey)
   800  	}
   801  
   802  	if cfg[logFormatKey] != "" {
   803  		// For now, only the "json/emf" log format is supported
   804  		if cfg[logFormatKey] != jsonEmfLogFormat {
   805  			return fmt.Errorf("unsupported log format '%s'", cfg[logFormatKey])
   806  		}
   807  		if datetimeFormatKeyExists || multilinePatternKeyExists {
   808  			return fmt.Errorf("you cannot configure log opt '%s' or '%s' when log opt '%s' is set to '%s'", datetimeFormatKey, multilinePatternKey, logFormatKey, jsonEmfLogFormat)
   809  		}
   810  	}
   811  
   812  	return nil
   813  }
   814  
   815  // Len returns the length of a byTimestamp slice.  Len is required by the
   816  // sort.Interface interface.
   817  func (slice byTimestamp) Len() int {
   818  	return len(slice)
   819  }
   820  
   821  // Less compares two values in a byTimestamp slice by Timestamp.  Less is
   822  // required by the sort.Interface interface.
   823  func (slice byTimestamp) Less(i, j int) bool {
   824  	iTimestamp, jTimestamp := int64(0), int64(0)
   825  	if slice != nil && slice[i].inputLogEvent.Timestamp != nil {
   826  		iTimestamp = *slice[i].inputLogEvent.Timestamp
   827  	}
   828  	if slice != nil && slice[j].inputLogEvent.Timestamp != nil {
   829  		jTimestamp = *slice[j].inputLogEvent.Timestamp
   830  	}
   831  	if iTimestamp == jTimestamp {
   832  		return slice[i].insertOrder < slice[j].insertOrder
   833  	}
   834  	return iTimestamp < jTimestamp
   835  }
   836  
   837  // Swap swaps two values in a byTimestamp slice with each other.  Swap is
   838  // required by the sort.Interface interface.
   839  func (slice byTimestamp) Swap(i, j int) {
   840  	slice[i], slice[j] = slice[j], slice[i]
   841  }
   842  
   843  func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent {
   844  	cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events))
   845  	for i, input := range events {
   846  		cwEvents[i] = input.inputLogEvent
   847  	}
   848  	return cwEvents
   849  }
   850  
   851  func newEventBatch() *eventBatch {
   852  	return &eventBatch{
   853  		batch: make([]wrappedEvent, 0),
   854  		bytes: 0,
   855  	}
   856  }
   857  
   858  // events returns a slice of wrappedEvents sorted in order of their
   859  // timestamps and then by their insertion order (see `byTimestamp`).
   860  //
   861  // Warning: this method is not threadsafe and must not be used
   862  // concurrently.
   863  func (b *eventBatch) events() []wrappedEvent {
   864  	sort.Sort(byTimestamp(b.batch))
   865  	return b.batch
   866  }
   867  
   868  // add adds an event to the batch of events accounting for the
   869  // necessary overhead for an event to be logged. An error will be
   870  // returned if the event cannot be added to the batch due to service
   871  // limits.
   872  //
   873  // Warning: this method is not threadsafe and must not be used
   874  // concurrently.
   875  func (b *eventBatch) add(event wrappedEvent, size int) bool {
   876  	addBytes := size + perEventBytes
   877  
   878  	// verify we are still within service limits
   879  	switch {
   880  	case len(b.batch)+1 > maximumLogEventsPerPut:
   881  		return false
   882  	case b.bytes+addBytes > maximumBytesPerPut:
   883  		return false
   884  	}
   885  
   886  	b.bytes += addBytes
   887  	b.batch = append(b.batch, event)
   888  
   889  	return true
   890  }
   891  
   892  // count is the number of batched events.  Warning: this method
   893  // is not threadsafe and must not be used concurrently.
   894  func (b *eventBatch) count() int {
   895  	return len(b.batch)
   896  }
   897  
   898  // size is the total number of bytes that the batch represents.
   899  //
   900  // Warning: this method is not threadsafe and must not be used
   901  // concurrently.
   902  func (b *eventBatch) size() int {
   903  	return b.bytes
   904  }
   905  
   906  func (b *eventBatch) isEmpty() bool {
   907  	zeroEvents := b.count() == 0
   908  	zeroSize := b.size() == 0
   909  	return zeroEvents && zeroSize
   910  }
   911  
   912  // reset prepares the batch for reuse.
   913  func (b *eventBatch) reset() {
   914  	b.bytes = 0
   915  	b.batch = b.batch[:0]
   916  }