github.com/Prakhar-Agarwal-byte/moby@v0.0.0-20231027092010-a14e3e8ab87e/daemon/logger/awslogs/cloudwatchlogs_test.go (about) 1 package awslogs // import "github.com/Prakhar-Agarwal-byte/moby/daemon/logger/awslogs" 2 3 import ( 4 "context" 5 "errors" 6 "fmt" 7 "net/http" 8 "net/http/httptest" 9 "reflect" 10 "regexp" 11 "strconv" 12 "strings" 13 "sync/atomic" 14 "testing" 15 "time" 16 17 "github.com/aws/aws-sdk-go-v2/aws" 18 "github.com/aws/aws-sdk-go-v2/config" 19 "github.com/aws/aws-sdk-go-v2/credentials" 20 "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" 21 "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" 22 "github.com/Prakhar-Agarwal-byte/moby/daemon/logger" 23 "github.com/Prakhar-Agarwal-byte/moby/daemon/logger/loggerutils" 24 "github.com/Prakhar-Agarwal-byte/moby/dockerversion" 25 "gotest.tools/v3/assert" 26 is "gotest.tools/v3/assert/cmp" 27 ) 28 29 const ( 30 groupName = "groupName" 31 streamName = "streamName" 32 sequenceToken = "sequenceToken" 33 nextSequenceToken = "nextSequenceToken" 34 logline = "this is a log line\r" 35 multilineLogline = "2017-01-01 01:01:44 This is a multiline log entry\r" 36 ) 37 38 // Generates i multi-line events each with j lines 39 func (l *logStream) logGenerator(lineCount int, multilineCount int) { 40 for i := 0; i < multilineCount; i++ { 41 l.Log(&logger.Message{ 42 Line: []byte(multilineLogline), 43 Timestamp: time.Time{}, 44 }) 45 for j := 0; j < lineCount; j++ { 46 l.Log(&logger.Message{ 47 Line: []byte(logline), 48 Timestamp: time.Time{}, 49 }) 50 } 51 } 52 } 53 54 func testEventBatch(events []wrappedEvent) *eventBatch { 55 batch := newEventBatch() 56 for _, event := range events { 57 eventlen := len([]byte(*event.inputLogEvent.Message)) 58 batch.add(event, eventlen) 59 } 60 return batch 61 } 62 63 func TestNewStreamConfig(t *testing.T) { 64 tests := []struct { 65 logStreamName string 66 logGroupName string 67 logCreateGroup string 68 logCreateStream string 69 logNonBlocking string 70 forceFlushInterval string 71 maxBufferedEvents string 72 datetimeFormat string 73 multilinePattern string 74 shouldErr bool 75 testName string 76 }{ 77 {"", groupName, "", "", "", "", "", "", "", false, "defaults"}, 78 {"", groupName, "invalid create group", "", "", "", "", "", "", true, "invalid create group"}, 79 {"", groupName, "", "", "", "invalid flush interval", "", "", "", true, "invalid flush interval"}, 80 {"", groupName, "", "", "", "", "invalid max buffered events", "", "", true, "invalid max buffered events"}, 81 {"", groupName, "", "", "", "", "", "", "n{1001}", true, "invalid multiline pattern"}, 82 {"", groupName, "", "", "", "15", "", "", "", false, "flush interval at 15"}, 83 {"", groupName, "", "", "", "", "1024", "", "", false, "max buffered events at 1024"}, 84 } 85 86 for _, tc := range tests { 87 t.Run(tc.testName, func(t *testing.T) { 88 cfg := map[string]string{ 89 logGroupKey: tc.logGroupName, 90 logCreateGroupKey: tc.logCreateGroup, 91 "mode": tc.logNonBlocking, 92 forceFlushIntervalKey: tc.forceFlushInterval, 93 maxBufferedEventsKey: tc.maxBufferedEvents, 94 logStreamKey: tc.logStreamName, 95 logCreateStreamKey: tc.logCreateStream, 96 datetimeFormatKey: tc.datetimeFormat, 97 multilinePatternKey: tc.multilinePattern, 98 } 99 100 info := logger.Info{ 101 Config: cfg, 102 } 103 logStreamConfig, err := newStreamConfig(info) 104 if tc.shouldErr { 105 assert.Check(t, err != nil, "Expected an error") 106 } else { 107 assert.Check(t, err == nil, "Unexpected error") 108 assert.Check(t, logStreamConfig.logGroupName == tc.logGroupName, "Unexpected logGroupName") 109 if tc.forceFlushInterval != "" { 110 forceFlushIntervalAsInt, _ := strconv.Atoi(info.Config[forceFlushIntervalKey]) 111 assert.Check(t, logStreamConfig.forceFlushInterval == time.Duration(forceFlushIntervalAsInt)*time.Second, "Unexpected forceFlushInterval") 112 } 113 if tc.maxBufferedEvents != "" { 114 maxBufferedEvents, _ := strconv.Atoi(info.Config[maxBufferedEventsKey]) 115 assert.Check(t, logStreamConfig.maxBufferedEvents == maxBufferedEvents, "Unexpected maxBufferedEvents") 116 } 117 } 118 }) 119 } 120 } 121 122 func TestNewAWSLogsClientUserAgentHandler(t *testing.T) { 123 ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 124 userAgent := r.Header.Get("User-Agent") 125 assert.Check(t, is.Contains(userAgent, "Docker/"+dockerversion.Version)) 126 fmt.Fprintln(w, "{}") 127 })) 128 defer ts.Close() 129 130 info := logger.Info{ 131 Config: map[string]string{ 132 regionKey: "us-east-1", 133 endpointKey: ts.URL, 134 }, 135 } 136 137 client, err := newAWSLogsClient( 138 info, 139 config.WithCredentialsProvider(credentials.StaticCredentialsProvider{ 140 Value: aws.Credentials{AccessKeyID: "AKID", SecretAccessKey: "SECRET", SessionToken: "SESSION"}, 141 }), 142 ) 143 assert.NilError(t, err) 144 145 _, err = client.CreateLogGroup(context.TODO(), &cloudwatchlogs.CreateLogGroupInput{LogGroupName: aws.String("foo")}) 146 assert.NilError(t, err) 147 } 148 149 func TestNewAWSLogsClientLogFormatHeaderHandler(t *testing.T) { 150 tests := []struct { 151 logFormat string 152 expectedHeaderValue string 153 }{ 154 { 155 logFormat: jsonEmfLogFormat, 156 expectedHeaderValue: "json/emf", 157 }, 158 { 159 logFormat: "", 160 expectedHeaderValue: "", 161 }, 162 } 163 for _, tc := range tests { 164 t.Run(tc.logFormat, func(t *testing.T) { 165 ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 166 logFormatHeaderVal := r.Header.Get("x-amzn-logs-format") 167 assert.Check(t, is.Equal(tc.expectedHeaderValue, logFormatHeaderVal)) 168 fmt.Fprintln(w, "{}") 169 })) 170 defer ts.Close() 171 172 info := logger.Info{ 173 Config: map[string]string{ 174 regionKey: "us-east-1", 175 logFormatKey: tc.logFormat, 176 endpointKey: ts.URL, 177 }, 178 } 179 180 client, err := newAWSLogsClient( 181 info, 182 config.WithCredentialsProvider(credentials.StaticCredentialsProvider{ 183 Value: aws.Credentials{AccessKeyID: "AKID", SecretAccessKey: "SECRET", SessionToken: "SESSION"}, 184 }), 185 ) 186 assert.NilError(t, err) 187 188 _, err = client.CreateLogGroup(context.TODO(), &cloudwatchlogs.CreateLogGroupInput{LogGroupName: aws.String("foo")}) 189 assert.NilError(t, err) 190 }) 191 } 192 } 193 194 func TestNewAWSLogsClientAWSLogsEndpoint(t *testing.T) { 195 called := atomic.Value{} // for go1.19 and later, can use atomic.Bool 196 called.Store(false) 197 198 ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 199 called.Store(true) 200 fmt.Fprintln(w, "{}") 201 })) 202 defer ts.Close() 203 204 info := logger.Info{ 205 Config: map[string]string{ 206 regionKey: "us-east-1", 207 endpointKey: ts.URL, 208 }, 209 } 210 211 client, err := newAWSLogsClient( 212 info, 213 config.WithCredentialsProvider(credentials.StaticCredentialsProvider{ 214 Value: aws.Credentials{AccessKeyID: "AKID", SecretAccessKey: "SECRET", SessionToken: "SESSION"}, 215 }), 216 ) 217 assert.NilError(t, err) 218 219 _, err = client.CreateLogGroup(context.TODO(), &cloudwatchlogs.CreateLogGroupInput{LogGroupName: aws.String("foo")}) 220 assert.NilError(t, err) 221 222 // make sure the endpoint was actually hit 223 assert.Check(t, called.Load().(bool)) 224 } 225 226 func TestNewAWSLogsClientRegionDetect(t *testing.T) { 227 info := logger.Info{ 228 Config: map[string]string{}, 229 } 230 231 mockMetadata := newMockMetadataClient() 232 newRegionFinder = func(context.Context) (regionFinder, error) { 233 return mockMetadata, nil 234 } 235 mockMetadata.regionResult <- ®ionResult{ 236 successResult: "us-east-1", 237 } 238 239 _, err := newAWSLogsClient(info) 240 assert.NilError(t, err) 241 } 242 243 func TestCreateSuccess(t *testing.T) { 244 mockClient := &mockClient{} 245 stream := &logStream{ 246 client: mockClient, 247 logGroupName: groupName, 248 logStreamName: streamName, 249 logCreateStream: true, 250 } 251 var input *cloudwatchlogs.CreateLogStreamInput 252 mockClient.createLogStreamFunc = func(ctx context.Context, i *cloudwatchlogs.CreateLogStreamInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CreateLogStreamOutput, error) { 253 input = i 254 return &cloudwatchlogs.CreateLogStreamOutput{}, nil 255 } 256 257 err := stream.create() 258 259 assert.NilError(t, err) 260 assert.Equal(t, groupName, aws.ToString(input.LogGroupName), "LogGroupName") 261 assert.Equal(t, streamName, aws.ToString(input.LogStreamName), "LogStreamName") 262 } 263 264 func TestCreateStreamSkipped(t *testing.T) { 265 mockClient := &mockClient{} 266 stream := &logStream{ 267 client: mockClient, 268 logGroupName: groupName, 269 logStreamName: streamName, 270 logCreateStream: false, 271 } 272 mockClient.createLogStreamFunc = func(ctx context.Context, i *cloudwatchlogs.CreateLogStreamInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CreateLogStreamOutput, error) { 273 t.Error("CreateLogStream should not be called") 274 return nil, errors.New("should not be called") 275 } 276 277 err := stream.create() 278 279 assert.NilError(t, err) 280 } 281 282 func TestCreateLogGroupSuccess(t *testing.T) { 283 mockClient := &mockClient{} 284 stream := &logStream{ 285 client: mockClient, 286 logGroupName: groupName, 287 logStreamName: streamName, 288 logCreateGroup: true, 289 logCreateStream: true, 290 } 291 var logGroupInput *cloudwatchlogs.CreateLogGroupInput 292 mockClient.createLogGroupFunc = func(ctx context.Context, input *cloudwatchlogs.CreateLogGroupInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CreateLogGroupOutput, error) { 293 logGroupInput = input 294 return &cloudwatchlogs.CreateLogGroupOutput{}, nil 295 } 296 var logStreamInput *cloudwatchlogs.CreateLogStreamInput 297 createLogStreamCalls := 0 298 mockClient.createLogStreamFunc = func(ctx context.Context, input *cloudwatchlogs.CreateLogStreamInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CreateLogStreamOutput, error) { 299 createLogStreamCalls++ 300 if logGroupInput == nil { 301 // log group not created yet 302 return nil, &types.ResourceNotFoundException{} 303 } 304 logStreamInput = input 305 return &cloudwatchlogs.CreateLogStreamOutput{}, nil 306 } 307 308 err := stream.create() 309 310 assert.NilError(t, err) 311 if createLogStreamCalls < 2 { 312 t.Errorf("Expected CreateLogStream to be called twice, was called %d times", createLogStreamCalls) 313 } 314 assert.Check(t, logGroupInput != nil) 315 assert.Equal(t, groupName, aws.ToString(logGroupInput.LogGroupName), "LogGroupName in LogGroupInput") 316 assert.Check(t, logStreamInput != nil) 317 assert.Equal(t, groupName, aws.ToString(logStreamInput.LogGroupName), "LogGroupName in LogStreamInput") 318 assert.Equal(t, streamName, aws.ToString(logStreamInput.LogStreamName), "LogStreamName in LogStreamInput") 319 } 320 321 func TestCreateError(t *testing.T) { 322 mockClient := &mockClient{} 323 stream := &logStream{ 324 client: mockClient, 325 logCreateStream: true, 326 } 327 mockClient.createLogStreamFunc = func(ctx context.Context, i *cloudwatchlogs.CreateLogStreamInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CreateLogStreamOutput, error) { 328 return nil, errors.New("error") 329 } 330 331 err := stream.create() 332 333 if err == nil { 334 t.Fatal("Expected non-nil err") 335 } 336 } 337 338 func TestCreateAlreadyExists(t *testing.T) { 339 mockClient := &mockClient{} 340 stream := &logStream{ 341 client: mockClient, 342 logCreateStream: true, 343 } 344 calls := 0 345 mockClient.createLogStreamFunc = func(ctx context.Context, input *cloudwatchlogs.CreateLogStreamInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CreateLogStreamOutput, error) { 346 calls++ 347 return nil, &types.ResourceAlreadyExistsException{} 348 } 349 350 err := stream.create() 351 352 assert.NilError(t, err) 353 assert.Equal(t, 1, calls) 354 } 355 356 func TestLogClosed(t *testing.T) { 357 mockClient := &mockClient{} 358 stream := &logStream{ 359 client: mockClient, 360 closed: true, 361 } 362 err := stream.Log(&logger.Message{}) 363 assert.Check(t, err != nil) 364 } 365 366 // TestLogBlocking tests that the Log method blocks appropriately when 367 // non-blocking behavior is not enabled. Blocking is achieved through an 368 // internal channel that must be drained for Log to return. 369 func TestLogBlocking(t *testing.T) { 370 mockClient := &mockClient{} 371 stream := &logStream{ 372 client: mockClient, 373 messages: make(chan *logger.Message), 374 } 375 376 errorCh := make(chan error, 1) 377 started := make(chan bool) 378 go func() { 379 started <- true 380 err := stream.Log(&logger.Message{}) 381 errorCh <- err 382 }() 383 // block until the goroutine above has started 384 <-started 385 select { 386 case err := <-errorCh: 387 t.Fatal("Expected stream.Log to block: ", err) 388 default: 389 } 390 // assuming it is blocked, we can now try to drain the internal channel and 391 // unblock it 392 select { 393 case <-time.After(10 * time.Millisecond): 394 // if we're unable to drain the channel within 10ms, something seems broken 395 t.Fatal("Expected to be able to read from stream.messages but was unable to") 396 case <-stream.messages: 397 } 398 select { 399 case err := <-errorCh: 400 assert.NilError(t, err) 401 402 case <-time.After(30 * time.Second): 403 t.Fatal("timed out waiting for read") 404 } 405 } 406 407 func TestLogBufferEmpty(t *testing.T) { 408 mockClient := &mockClient{} 409 stream := &logStream{ 410 client: mockClient, 411 messages: make(chan *logger.Message, 1), 412 } 413 err := stream.Log(&logger.Message{}) 414 assert.NilError(t, err) 415 } 416 417 func TestPublishBatchSuccess(t *testing.T) { 418 mockClient := &mockClient{} 419 stream := &logStream{ 420 client: mockClient, 421 logGroupName: groupName, 422 logStreamName: streamName, 423 sequenceToken: aws.String(sequenceToken), 424 } 425 var input *cloudwatchlogs.PutLogEventsInput 426 mockClient.putLogEventsFunc = func(ctx context.Context, i *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 427 input = i 428 return &cloudwatchlogs.PutLogEventsOutput{ 429 NextSequenceToken: aws.String(nextSequenceToken), 430 }, nil 431 } 432 events := []wrappedEvent{ 433 { 434 inputLogEvent: types.InputLogEvent{ 435 Message: aws.String(logline), 436 }, 437 }, 438 } 439 440 stream.publishBatch(testEventBatch(events)) 441 assert.Equal(t, nextSequenceToken, aws.ToString(stream.sequenceToken), "sequenceToken") 442 assert.Assert(t, input != nil) 443 assert.Equal(t, sequenceToken, aws.ToString(input.SequenceToken), "input.SequenceToken") 444 assert.Assert(t, len(input.LogEvents) == 1) 445 assert.Equal(t, events[0].inputLogEvent, input.LogEvents[0]) 446 } 447 448 func TestPublishBatchError(t *testing.T) { 449 mockClient := &mockClient{} 450 stream := &logStream{ 451 client: mockClient, 452 logGroupName: groupName, 453 logStreamName: streamName, 454 sequenceToken: aws.String(sequenceToken), 455 } 456 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 457 return nil, errors.New("error") 458 } 459 460 events := []wrappedEvent{ 461 { 462 inputLogEvent: types.InputLogEvent{ 463 Message: aws.String(logline), 464 }, 465 }, 466 } 467 468 stream.publishBatch(testEventBatch(events)) 469 assert.Equal(t, sequenceToken, aws.ToString(stream.sequenceToken)) 470 } 471 472 func TestPublishBatchInvalidSeqSuccess(t *testing.T) { 473 mockClient := &mockClient{} 474 stream := &logStream{ 475 client: mockClient, 476 logGroupName: groupName, 477 logStreamName: streamName, 478 sequenceToken: aws.String(sequenceToken), 479 } 480 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 481 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 482 calls = append(calls, input) 483 if aws.ToString(input.SequenceToken) != "token" { 484 return nil, &types.InvalidSequenceTokenException{ 485 ExpectedSequenceToken: aws.String("token"), 486 } 487 } 488 return &cloudwatchlogs.PutLogEventsOutput{ 489 NextSequenceToken: aws.String(nextSequenceToken), 490 }, nil 491 } 492 493 events := []wrappedEvent{ 494 { 495 inputLogEvent: types.InputLogEvent{ 496 Message: aws.String(logline), 497 }, 498 }, 499 } 500 501 stream.publishBatch(testEventBatch(events)) 502 assert.Equal(t, nextSequenceToken, aws.ToString(stream.sequenceToken)) 503 assert.Assert(t, len(calls) == 2) 504 argument := calls[0] 505 assert.Assert(t, argument != nil) 506 assert.Equal(t, sequenceToken, aws.ToString(argument.SequenceToken)) 507 assert.Assert(t, len(argument.LogEvents) == 1) 508 assert.Equal(t, events[0].inputLogEvent, argument.LogEvents[0]) 509 510 argument = calls[1] 511 assert.Assert(t, argument != nil) 512 assert.Equal(t, "token", aws.ToString(argument.SequenceToken)) 513 assert.Assert(t, len(argument.LogEvents) == 1) 514 assert.Equal(t, events[0].inputLogEvent, argument.LogEvents[0]) 515 } 516 517 func TestPublishBatchAlreadyAccepted(t *testing.T) { 518 mockClient := &mockClient{} 519 stream := &logStream{ 520 client: mockClient, 521 logGroupName: groupName, 522 logStreamName: streamName, 523 sequenceToken: aws.String(sequenceToken), 524 } 525 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 526 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 527 calls = append(calls, input) 528 return nil, &types.DataAlreadyAcceptedException{ 529 ExpectedSequenceToken: aws.String("token"), 530 } 531 } 532 533 events := []wrappedEvent{ 534 { 535 inputLogEvent: types.InputLogEvent{ 536 Message: aws.String(logline), 537 }, 538 }, 539 } 540 541 stream.publishBatch(testEventBatch(events)) 542 assert.Assert(t, stream.sequenceToken != nil) 543 assert.Equal(t, "token", aws.ToString(stream.sequenceToken)) 544 assert.Assert(t, len(calls) == 1) 545 argument := calls[0] 546 assert.Assert(t, argument != nil) 547 assert.Equal(t, sequenceToken, aws.ToString(argument.SequenceToken)) 548 assert.Assert(t, len(argument.LogEvents) == 1) 549 assert.Equal(t, events[0].inputLogEvent, argument.LogEvents[0]) 550 } 551 552 func TestCollectBatchSimple(t *testing.T) { 553 mockClient := &mockClient{} 554 stream := &logStream{ 555 client: mockClient, 556 logGroupName: groupName, 557 logStreamName: streamName, 558 sequenceToken: aws.String(sequenceToken), 559 messages: make(chan *logger.Message), 560 } 561 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 562 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 563 calls = append(calls, input) 564 return &cloudwatchlogs.PutLogEventsOutput{ 565 NextSequenceToken: aws.String(nextSequenceToken), 566 }, nil 567 } 568 ticks := make(chan time.Time) 569 newTicker = func(_ time.Duration) *time.Ticker { 570 return &time.Ticker{ 571 C: ticks, 572 } 573 } 574 d := make(chan bool) 575 close(d) 576 go stream.collectBatch(d) 577 578 stream.Log(&logger.Message{ 579 Line: []byte(logline), 580 Timestamp: time.Time{}, 581 }) 582 583 ticks <- time.Time{} 584 ticks <- time.Time{} 585 stream.Close() 586 587 assert.Assert(t, len(calls) == 1) 588 argument := calls[0] 589 assert.Assert(t, argument != nil) 590 assert.Assert(t, len(argument.LogEvents) == 1) 591 assert.Equal(t, logline, aws.ToString(argument.LogEvents[0].Message)) 592 } 593 594 func TestCollectBatchTicker(t *testing.T) { 595 mockClient := &mockClient{} 596 stream := &logStream{ 597 client: mockClient, 598 logGroupName: groupName, 599 logStreamName: streamName, 600 sequenceToken: aws.String(sequenceToken), 601 messages: make(chan *logger.Message), 602 } 603 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 604 called := make(chan struct{}, 50) 605 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 606 calls = append(calls, input) 607 called <- struct{}{} 608 return &cloudwatchlogs.PutLogEventsOutput{ 609 NextSequenceToken: aws.String(nextSequenceToken), 610 }, nil 611 } 612 ticks := make(chan time.Time) 613 newTicker = func(_ time.Duration) *time.Ticker { 614 return &time.Ticker{ 615 C: ticks, 616 } 617 } 618 619 d := make(chan bool) 620 close(d) 621 go stream.collectBatch(d) 622 623 stream.Log(&logger.Message{ 624 Line: []byte(logline + " 1"), 625 Timestamp: time.Time{}, 626 }) 627 stream.Log(&logger.Message{ 628 Line: []byte(logline + " 2"), 629 Timestamp: time.Time{}, 630 }) 631 632 ticks <- time.Time{} 633 // Verify first batch 634 <-called 635 assert.Assert(t, len(calls) == 1) 636 argument := calls[0] 637 calls = calls[1:] 638 assert.Assert(t, argument != nil) 639 assert.Assert(t, len(argument.LogEvents) == 2) 640 assert.Equal(t, logline+" 1", aws.ToString(argument.LogEvents[0].Message)) 641 assert.Equal(t, logline+" 2", aws.ToString(argument.LogEvents[1].Message)) 642 643 stream.Log(&logger.Message{ 644 Line: []byte(logline + " 3"), 645 Timestamp: time.Time{}, 646 }) 647 648 ticks <- time.Time{} 649 <-called 650 assert.Assert(t, len(calls) == 1) 651 argument = calls[0] 652 close(called) 653 assert.Assert(t, argument != nil) 654 assert.Assert(t, len(argument.LogEvents) == 1) 655 assert.Equal(t, logline+" 3", aws.ToString(argument.LogEvents[0].Message)) 656 657 stream.Close() 658 } 659 660 func TestCollectBatchMultilinePattern(t *testing.T) { 661 mockClient := &mockClient{} 662 multilinePattern := regexp.MustCompile("xxxx") 663 stream := &logStream{ 664 client: mockClient, 665 logGroupName: groupName, 666 logStreamName: streamName, 667 multilinePattern: multilinePattern, 668 sequenceToken: aws.String(sequenceToken), 669 messages: make(chan *logger.Message), 670 } 671 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 672 called := make(chan struct{}, 50) 673 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 674 calls = append(calls, input) 675 called <- struct{}{} 676 return &cloudwatchlogs.PutLogEventsOutput{ 677 NextSequenceToken: aws.String(nextSequenceToken), 678 }, nil 679 } 680 ticks := make(chan time.Time) 681 newTicker = func(_ time.Duration) *time.Ticker { 682 return &time.Ticker{ 683 C: ticks, 684 } 685 } 686 687 d := make(chan bool) 688 close(d) 689 go stream.collectBatch(d) 690 691 stream.Log(&logger.Message{ 692 Line: []byte(logline), 693 Timestamp: time.Now(), 694 }) 695 stream.Log(&logger.Message{ 696 Line: []byte(logline), 697 Timestamp: time.Now(), 698 }) 699 stream.Log(&logger.Message{ 700 Line: []byte("xxxx " + logline), 701 Timestamp: time.Now(), 702 }) 703 704 ticks <- time.Now() 705 706 // Verify single multiline event 707 <-called 708 assert.Assert(t, len(calls) == 1) 709 argument := calls[0] 710 calls = calls[1:] 711 assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") 712 assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") 713 assert.Check(t, is.Equal(logline+"\n"+logline+"\n", aws.ToString(argument.LogEvents[0].Message)), "Received incorrect multiline message") 714 715 stream.Close() 716 717 // Verify single event 718 <-called 719 assert.Assert(t, len(calls) == 1) 720 argument = calls[0] 721 close(called) 722 assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") 723 assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") 724 assert.Check(t, is.Equal("xxxx "+logline+"\n", aws.ToString(argument.LogEvents[0].Message)), "Received incorrect multiline message") 725 } 726 727 func BenchmarkCollectBatch(b *testing.B) { 728 for i := 0; i < b.N; i++ { 729 mockClient := &mockClient{} 730 stream := &logStream{ 731 client: mockClient, 732 logGroupName: groupName, 733 logStreamName: streamName, 734 sequenceToken: aws.String(sequenceToken), 735 messages: make(chan *logger.Message), 736 } 737 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 738 return &cloudwatchlogs.PutLogEventsOutput{ 739 NextSequenceToken: aws.String(nextSequenceToken), 740 }, nil 741 } 742 ticks := make(chan time.Time) 743 newTicker = func(_ time.Duration) *time.Ticker { 744 return &time.Ticker{ 745 C: ticks, 746 } 747 } 748 749 d := make(chan bool) 750 close(d) 751 go stream.collectBatch(d) 752 stream.logGenerator(10, 100) 753 ticks <- time.Time{} 754 stream.Close() 755 } 756 } 757 758 func BenchmarkCollectBatchMultilinePattern(b *testing.B) { 759 for i := 0; i < b.N; i++ { 760 mockClient := &mockClient{} 761 multilinePattern := regexp.MustCompile(`\d{4}-(?:0[1-9]|1[0-2])-(?:0[1-9]|[1,2][0-9]|3[0,1]) (?:[0,1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]`) 762 stream := &logStream{ 763 client: mockClient, 764 logGroupName: groupName, 765 logStreamName: streamName, 766 multilinePattern: multilinePattern, 767 sequenceToken: aws.String(sequenceToken), 768 messages: make(chan *logger.Message), 769 } 770 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 771 return &cloudwatchlogs.PutLogEventsOutput{ 772 NextSequenceToken: aws.String(nextSequenceToken), 773 }, nil 774 } 775 ticks := make(chan time.Time) 776 newTicker = func(_ time.Duration) *time.Ticker { 777 return &time.Ticker{ 778 C: ticks, 779 } 780 } 781 d := make(chan bool) 782 close(d) 783 go stream.collectBatch(d) 784 stream.logGenerator(10, 100) 785 ticks <- time.Time{} 786 stream.Close() 787 } 788 } 789 790 func TestCollectBatchMultilinePatternMaxEventAge(t *testing.T) { 791 mockClient := &mockClient{} 792 multilinePattern := regexp.MustCompile("xxxx") 793 stream := &logStream{ 794 client: mockClient, 795 logGroupName: groupName, 796 logStreamName: streamName, 797 multilinePattern: multilinePattern, 798 sequenceToken: aws.String(sequenceToken), 799 messages: make(chan *logger.Message), 800 } 801 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 802 called := make(chan struct{}, 50) 803 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 804 calls = append(calls, input) 805 called <- struct{}{} 806 return &cloudwatchlogs.PutLogEventsOutput{ 807 NextSequenceToken: aws.String(nextSequenceToken), 808 }, nil 809 } 810 ticks := make(chan time.Time) 811 newTicker = func(_ time.Duration) *time.Ticker { 812 return &time.Ticker{ 813 C: ticks, 814 } 815 } 816 817 d := make(chan bool) 818 close(d) 819 go stream.collectBatch(d) 820 821 stream.Log(&logger.Message{ 822 Line: []byte(logline), 823 Timestamp: time.Now(), 824 }) 825 826 // Log an event 1 second later 827 stream.Log(&logger.Message{ 828 Line: []byte(logline), 829 Timestamp: time.Now().Add(time.Second), 830 }) 831 832 // Fire ticker defaultForceFlushInterval seconds later 833 ticks <- time.Now().Add(defaultForceFlushInterval + time.Second) 834 835 // Verify single multiline event is flushed after maximum event buffer age (defaultForceFlushInterval) 836 <-called 837 assert.Assert(t, len(calls) == 1) 838 argument := calls[0] 839 calls = calls[1:] 840 assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") 841 assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") 842 assert.Check(t, is.Equal(logline+"\n"+logline+"\n", aws.ToString(argument.LogEvents[0].Message)), "Received incorrect multiline message") 843 844 // Log an event 1 second later 845 stream.Log(&logger.Message{ 846 Line: []byte(logline), 847 Timestamp: time.Now().Add(time.Second), 848 }) 849 850 // Fire ticker another defaultForceFlushInterval seconds later 851 ticks <- time.Now().Add(2*defaultForceFlushInterval + time.Second) 852 853 // Verify the event buffer is truly flushed - we should only receive a single event 854 <-called 855 assert.Assert(t, len(calls) == 1) 856 argument = calls[0] 857 close(called) 858 assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") 859 assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") 860 assert.Check(t, is.Equal(logline+"\n", aws.ToString(argument.LogEvents[0].Message)), "Received incorrect multiline message") 861 stream.Close() 862 } 863 864 func TestCollectBatchMultilinePatternNegativeEventAge(t *testing.T) { 865 mockClient := &mockClient{} 866 multilinePattern := regexp.MustCompile("xxxx") 867 stream := &logStream{ 868 client: mockClient, 869 logGroupName: groupName, 870 logStreamName: streamName, 871 multilinePattern: multilinePattern, 872 sequenceToken: aws.String(sequenceToken), 873 messages: make(chan *logger.Message), 874 } 875 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 876 called := make(chan struct{}, 50) 877 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 878 calls = append(calls, input) 879 called <- struct{}{} 880 return &cloudwatchlogs.PutLogEventsOutput{ 881 NextSequenceToken: aws.String(nextSequenceToken), 882 }, nil 883 } 884 ticks := make(chan time.Time) 885 newTicker = func(_ time.Duration) *time.Ticker { 886 return &time.Ticker{ 887 C: ticks, 888 } 889 } 890 891 d := make(chan bool) 892 close(d) 893 go stream.collectBatch(d) 894 895 stream.Log(&logger.Message{ 896 Line: []byte(logline), 897 Timestamp: time.Now(), 898 }) 899 900 // Log an event 1 second later 901 stream.Log(&logger.Message{ 902 Line: []byte(logline), 903 Timestamp: time.Now().Add(time.Second), 904 }) 905 906 // Fire ticker in past to simulate negative event buffer age 907 ticks <- time.Now().Add(-time.Second) 908 909 // Verify single multiline event is flushed with a negative event buffer age 910 <-called 911 assert.Assert(t, len(calls) == 1) 912 argument := calls[0] 913 close(called) 914 assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") 915 assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") 916 assert.Check(t, is.Equal(logline+"\n"+logline+"\n", aws.ToString(argument.LogEvents[0].Message)), "Received incorrect multiline message") 917 918 stream.Close() 919 } 920 921 func TestCollectBatchMultilinePatternMaxEventSize(t *testing.T) { 922 mockClient := &mockClient{} 923 multilinePattern := regexp.MustCompile("xxxx") 924 stream := &logStream{ 925 client: mockClient, 926 logGroupName: groupName, 927 logStreamName: streamName, 928 multilinePattern: multilinePattern, 929 sequenceToken: aws.String(sequenceToken), 930 messages: make(chan *logger.Message), 931 } 932 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 933 called := make(chan struct{}, 50) 934 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 935 calls = append(calls, input) 936 called <- struct{}{} 937 return &cloudwatchlogs.PutLogEventsOutput{ 938 NextSequenceToken: aws.String(nextSequenceToken), 939 }, nil 940 } 941 ticks := make(chan time.Time) 942 newTicker = func(_ time.Duration) *time.Ticker { 943 return &time.Ticker{ 944 C: ticks, 945 } 946 } 947 948 d := make(chan bool) 949 close(d) 950 go stream.collectBatch(d) 951 952 // Log max event size 953 longline := strings.Repeat("A", maximumBytesPerEvent) 954 stream.Log(&logger.Message{ 955 Line: []byte(longline), 956 Timestamp: time.Now(), 957 }) 958 959 // Log short event 960 shortline := strings.Repeat("B", 100) 961 stream.Log(&logger.Message{ 962 Line: []byte(shortline), 963 Timestamp: time.Now(), 964 }) 965 966 // Fire ticker 967 ticks <- time.Now().Add(defaultForceFlushInterval) 968 969 // Verify multiline events 970 // We expect a maximum sized event with no new line characters and a 971 // second short event with a new line character at the end 972 <-called 973 assert.Assert(t, len(calls) == 1) 974 argument := calls[0] 975 close(called) 976 assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") 977 assert.Check(t, is.Equal(2, len(argument.LogEvents)), "Expected two events") 978 assert.Check(t, is.Equal(longline, aws.ToString(argument.LogEvents[0].Message)), "Received incorrect multiline message") 979 assert.Check(t, is.Equal(shortline+"\n", aws.ToString(argument.LogEvents[1].Message)), "Received incorrect multiline message") 980 stream.Close() 981 } 982 983 func TestCollectBatchClose(t *testing.T) { 984 mockClient := &mockClient{} 985 stream := &logStream{ 986 client: mockClient, 987 logGroupName: groupName, 988 logStreamName: streamName, 989 sequenceToken: aws.String(sequenceToken), 990 messages: make(chan *logger.Message), 991 } 992 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 993 called := make(chan struct{}, 50) 994 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 995 calls = append(calls, input) 996 called <- struct{}{} 997 return &cloudwatchlogs.PutLogEventsOutput{ 998 NextSequenceToken: aws.String(nextSequenceToken), 999 }, nil 1000 } 1001 ticks := make(chan time.Time) 1002 newTicker = func(_ time.Duration) *time.Ticker { 1003 return &time.Ticker{ 1004 C: ticks, 1005 } 1006 } 1007 1008 d := make(chan bool) 1009 close(d) 1010 go stream.collectBatch(d) 1011 1012 stream.Log(&logger.Message{ 1013 Line: []byte(logline), 1014 Timestamp: time.Time{}, 1015 }) 1016 1017 // no ticks 1018 stream.Close() 1019 1020 <-called 1021 assert.Assert(t, len(calls) == 1) 1022 argument := calls[0] 1023 close(called) 1024 assert.Assert(t, argument != nil) 1025 assert.Assert(t, len(argument.LogEvents) == 1) 1026 assert.Equal(t, logline, *(argument.LogEvents[0].Message)) 1027 } 1028 1029 func TestEffectiveLen(t *testing.T) { 1030 tests := []struct { 1031 str string 1032 effectiveBytes int 1033 }{ 1034 {"Hello", 5}, 1035 {string([]byte{1, 2, 3, 4}), 4}, 1036 {"🙃", 4}, 1037 {string([]byte{0xFF, 0xFF, 0xFF, 0xFF}), 12}, 1038 {"He\xff\xffo", 9}, 1039 {"", 0}, 1040 } 1041 for i, tc := range tests { 1042 t.Run(fmt.Sprintf("%d/%s", i, tc.str), func(t *testing.T) { 1043 assert.Equal(t, tc.effectiveBytes, effectiveLen(tc.str)) 1044 }) 1045 } 1046 } 1047 1048 func TestFindValidSplit(t *testing.T) { 1049 tests := []struct { 1050 str string 1051 maxEffectiveBytes int 1052 splitOffset int 1053 effectiveBytes int 1054 }{ 1055 {"", 10, 0, 0}, 1056 {"Hello", 6, 5, 5}, 1057 {"Hello", 2, 2, 2}, 1058 {"Hello", 0, 0, 0}, 1059 {"🙃", 3, 0, 0}, 1060 {"🙃", 4, 4, 4}, 1061 {string([]byte{'a', 0xFF}), 2, 1, 1}, 1062 {string([]byte{'a', 0xFF}), 4, 2, 4}, 1063 } 1064 for i, tc := range tests { 1065 t.Run(fmt.Sprintf("%d/%s", i, tc.str), func(t *testing.T) { 1066 splitOffset, effectiveBytes := findValidSplit(tc.str, tc.maxEffectiveBytes) 1067 assert.Equal(t, tc.splitOffset, splitOffset, "splitOffset") 1068 assert.Equal(t, tc.effectiveBytes, effectiveBytes, "effectiveBytes") 1069 t.Log(tc.str[:tc.splitOffset]) 1070 t.Log(tc.str[tc.splitOffset:]) 1071 }) 1072 } 1073 } 1074 1075 func TestProcessEventEmoji(t *testing.T) { 1076 stream := &logStream{} 1077 batch := &eventBatch{} 1078 bytes := []byte(strings.Repeat("🙃", maximumBytesPerEvent/4+1)) 1079 stream.processEvent(batch, bytes, 0) 1080 assert.Equal(t, 2, len(batch.batch), "should be two events in the batch") 1081 assert.Equal(t, strings.Repeat("🙃", maximumBytesPerEvent/4), *batch.batch[0].inputLogEvent.Message) 1082 assert.Equal(t, "🙃", *batch.batch[1].inputLogEvent.Message) 1083 } 1084 1085 func TestCollectBatchLineSplit(t *testing.T) { 1086 mockClient := &mockClient{} 1087 stream := &logStream{ 1088 client: mockClient, 1089 logGroupName: groupName, 1090 logStreamName: streamName, 1091 sequenceToken: aws.String(sequenceToken), 1092 messages: make(chan *logger.Message), 1093 } 1094 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 1095 called := make(chan struct{}, 50) 1096 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 1097 calls = append(calls, input) 1098 called <- struct{}{} 1099 return &cloudwatchlogs.PutLogEventsOutput{ 1100 NextSequenceToken: aws.String(nextSequenceToken), 1101 }, nil 1102 } 1103 ticks := make(chan time.Time) 1104 newTicker = func(_ time.Duration) *time.Ticker { 1105 return &time.Ticker{ 1106 C: ticks, 1107 } 1108 } 1109 1110 d := make(chan bool) 1111 close(d) 1112 go stream.collectBatch(d) 1113 1114 longline := strings.Repeat("A", maximumBytesPerEvent) 1115 stream.Log(&logger.Message{ 1116 Line: []byte(longline + "B"), 1117 Timestamp: time.Time{}, 1118 }) 1119 1120 // no ticks 1121 stream.Close() 1122 1123 <-called 1124 assert.Assert(t, len(calls) == 1) 1125 argument := calls[0] 1126 close(called) 1127 assert.Assert(t, argument != nil) 1128 assert.Assert(t, len(argument.LogEvents) == 2) 1129 assert.Equal(t, longline, aws.ToString(argument.LogEvents[0].Message)) 1130 assert.Equal(t, "B", aws.ToString(argument.LogEvents[1].Message)) 1131 } 1132 1133 func TestCollectBatchLineSplitWithBinary(t *testing.T) { 1134 mockClient := &mockClient{} 1135 stream := &logStream{ 1136 client: mockClient, 1137 logGroupName: groupName, 1138 logStreamName: streamName, 1139 sequenceToken: aws.String(sequenceToken), 1140 messages: make(chan *logger.Message), 1141 } 1142 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 1143 called := make(chan struct{}, 50) 1144 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 1145 calls = append(calls, input) 1146 called <- struct{}{} 1147 return &cloudwatchlogs.PutLogEventsOutput{ 1148 NextSequenceToken: aws.String(nextSequenceToken), 1149 }, nil 1150 } 1151 ticks := make(chan time.Time) 1152 newTicker = func(_ time.Duration) *time.Ticker { 1153 return &time.Ticker{ 1154 C: ticks, 1155 } 1156 } 1157 1158 d := make(chan bool) 1159 close(d) 1160 go stream.collectBatch(d) 1161 1162 longline := strings.Repeat("\xFF", maximumBytesPerEvent/3) // 0xFF is counted as the 3-byte utf8.RuneError 1163 stream.Log(&logger.Message{ 1164 Line: []byte(longline + "\xFD"), 1165 Timestamp: time.Time{}, 1166 }) 1167 1168 // no ticks 1169 stream.Close() 1170 1171 <-called 1172 assert.Assert(t, len(calls) == 1) 1173 argument := calls[0] 1174 close(called) 1175 assert.Assert(t, argument != nil) 1176 assert.Assert(t, len(argument.LogEvents) == 2) 1177 assert.Equal(t, longline, aws.ToString(argument.LogEvents[0].Message)) 1178 assert.Equal(t, "\xFD", aws.ToString(argument.LogEvents[1].Message)) 1179 } 1180 1181 func TestCollectBatchMaxEvents(t *testing.T) { 1182 mockClient := &mockClient{} 1183 stream := &logStream{ 1184 client: mockClient, 1185 logGroupName: groupName, 1186 logStreamName: streamName, 1187 sequenceToken: aws.String(sequenceToken), 1188 messages: make(chan *logger.Message), 1189 } 1190 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 1191 called := make(chan struct{}, 50) 1192 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 1193 calls = append(calls, input) 1194 called <- struct{}{} 1195 return &cloudwatchlogs.PutLogEventsOutput{ 1196 NextSequenceToken: aws.String(nextSequenceToken), 1197 }, nil 1198 } 1199 ticks := make(chan time.Time) 1200 newTicker = func(_ time.Duration) *time.Ticker { 1201 return &time.Ticker{ 1202 C: ticks, 1203 } 1204 } 1205 1206 d := make(chan bool) 1207 close(d) 1208 go stream.collectBatch(d) 1209 1210 line := "A" 1211 for i := 0; i <= maximumLogEventsPerPut; i++ { 1212 stream.Log(&logger.Message{ 1213 Line: []byte(line), 1214 Timestamp: time.Time{}, 1215 }) 1216 } 1217 1218 // no ticks 1219 stream.Close() 1220 1221 <-called 1222 <-called 1223 assert.Assert(t, len(calls) == 2) 1224 argument := calls[0] 1225 assert.Assert(t, argument != nil) 1226 assert.Check(t, len(argument.LogEvents) == maximumLogEventsPerPut) 1227 1228 argument = calls[1] 1229 close(called) 1230 assert.Assert(t, argument != nil) 1231 assert.Assert(t, len(argument.LogEvents) == 1) 1232 } 1233 1234 func TestCollectBatchMaxTotalBytes(t *testing.T) { 1235 expectedPuts := 2 1236 mockClient := &mockClient{} 1237 stream := &logStream{ 1238 client: mockClient, 1239 logGroupName: groupName, 1240 logStreamName: streamName, 1241 sequenceToken: aws.String(sequenceToken), 1242 messages: make(chan *logger.Message), 1243 } 1244 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 1245 called := make(chan struct{}, 50) 1246 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 1247 calls = append(calls, input) 1248 called <- struct{}{} 1249 return &cloudwatchlogs.PutLogEventsOutput{ 1250 NextSequenceToken: aws.String(nextSequenceToken), 1251 }, nil 1252 } 1253 1254 ticks := make(chan time.Time) 1255 newTicker = func(_ time.Duration) *time.Ticker { 1256 return &time.Ticker{ 1257 C: ticks, 1258 } 1259 } 1260 1261 d := make(chan bool) 1262 close(d) 1263 go stream.collectBatch(d) 1264 1265 numPayloads := maximumBytesPerPut / (maximumBytesPerEvent + perEventBytes) 1266 // maxline is the maximum line that could be submitted after 1267 // accounting for its overhead. 1268 maxline := strings.Repeat("A", maximumBytesPerPut-(perEventBytes*numPayloads)) 1269 // This will be split and batched up to the `maximumBytesPerPut' 1270 // (+/- `maximumBytesPerEvent'). This /should/ be aligned, but 1271 // should also tolerate an offset within that range. 1272 stream.Log(&logger.Message{ 1273 Line: []byte(maxline[:len(maxline)/2]), 1274 Timestamp: time.Time{}, 1275 }) 1276 stream.Log(&logger.Message{ 1277 Line: []byte(maxline[len(maxline)/2:]), 1278 Timestamp: time.Time{}, 1279 }) 1280 stream.Log(&logger.Message{ 1281 Line: []byte("B"), 1282 Timestamp: time.Time{}, 1283 }) 1284 1285 // no ticks, guarantee batch by size (and chan close) 1286 stream.Close() 1287 1288 for i := 0; i < expectedPuts; i++ { 1289 <-called 1290 } 1291 assert.Assert(t, len(calls) == expectedPuts) 1292 argument := calls[0] 1293 assert.Assert(t, argument != nil) 1294 1295 // Should total to the maximum allowed bytes. 1296 eventBytes := 0 1297 for _, event := range argument.LogEvents { 1298 eventBytes += len(*event.Message) 1299 } 1300 eventsOverhead := len(argument.LogEvents) * perEventBytes 1301 payloadTotal := eventBytes + eventsOverhead 1302 // lowestMaxBatch allows the payload to be offset if the messages 1303 // don't lend themselves to align with the maximum event size. 1304 lowestMaxBatch := maximumBytesPerPut - maximumBytesPerEvent 1305 1306 assert.Check(t, payloadTotal <= maximumBytesPerPut) 1307 assert.Check(t, payloadTotal >= lowestMaxBatch) 1308 1309 argument = calls[1] 1310 assert.Assert(t, len(argument.LogEvents) == 1) 1311 message := *argument.LogEvents[len(argument.LogEvents)-1].Message 1312 assert.Equal(t, "B", message[len(message)-1:]) 1313 } 1314 1315 func TestCollectBatchMaxTotalBytesWithBinary(t *testing.T) { 1316 expectedPuts := 2 1317 mockClient := &mockClient{} 1318 stream := &logStream{ 1319 client: mockClient, 1320 logGroupName: groupName, 1321 logStreamName: streamName, 1322 sequenceToken: aws.String(sequenceToken), 1323 messages: make(chan *logger.Message), 1324 } 1325 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 1326 called := make(chan struct{}, 50) 1327 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 1328 calls = append(calls, input) 1329 called <- struct{}{} 1330 return &cloudwatchlogs.PutLogEventsOutput{ 1331 NextSequenceToken: aws.String(nextSequenceToken), 1332 }, nil 1333 } 1334 1335 ticks := make(chan time.Time) 1336 newTicker = func(_ time.Duration) *time.Ticker { 1337 return &time.Ticker{ 1338 C: ticks, 1339 } 1340 } 1341 1342 d := make(chan bool) 1343 close(d) 1344 go stream.collectBatch(d) 1345 1346 // maxline is the maximum line that could be submitted after 1347 // accounting for its overhead. 1348 maxline := strings.Repeat("\xFF", (maximumBytesPerPut-perEventBytes)/3) // 0xFF is counted as the 3-byte utf8.RuneError 1349 // This will be split and batched up to the `maximumBytesPerPut' 1350 // (+/- `maximumBytesPerEvent'). This /should/ be aligned, but 1351 // should also tolerate an offset within that range. 1352 stream.Log(&logger.Message{ 1353 Line: []byte(maxline), 1354 Timestamp: time.Time{}, 1355 }) 1356 stream.Log(&logger.Message{ 1357 Line: []byte("B"), 1358 Timestamp: time.Time{}, 1359 }) 1360 1361 // no ticks, guarantee batch by size (and chan close) 1362 stream.Close() 1363 1364 for i := 0; i < expectedPuts; i++ { 1365 <-called 1366 } 1367 assert.Assert(t, len(calls) == expectedPuts) 1368 argument := calls[0] 1369 assert.Assert(t, argument != nil) 1370 1371 // Should total to the maximum allowed bytes. 1372 eventBytes := 0 1373 for _, event := range argument.LogEvents { 1374 eventBytes += effectiveLen(*event.Message) 1375 } 1376 eventsOverhead := len(argument.LogEvents) * perEventBytes 1377 payloadTotal := eventBytes + eventsOverhead 1378 // lowestMaxBatch allows the payload to be offset if the messages 1379 // don't lend themselves to align with the maximum event size. 1380 lowestMaxBatch := maximumBytesPerPut - maximumBytesPerEvent 1381 1382 assert.Check(t, payloadTotal <= maximumBytesPerPut) 1383 assert.Check(t, payloadTotal >= lowestMaxBatch) 1384 1385 argument = calls[1] 1386 message := *argument.LogEvents[len(argument.LogEvents)-1].Message 1387 assert.Equal(t, "B", message[len(message)-1:]) 1388 } 1389 1390 func TestCollectBatchWithDuplicateTimestamps(t *testing.T) { 1391 mockClient := &mockClient{} 1392 stream := &logStream{ 1393 client: mockClient, 1394 logGroupName: groupName, 1395 logStreamName: streamName, 1396 sequenceToken: aws.String(sequenceToken), 1397 messages: make(chan *logger.Message), 1398 } 1399 calls := make([]*cloudwatchlogs.PutLogEventsInput, 0) 1400 called := make(chan struct{}, 50) 1401 mockClient.putLogEventsFunc = func(ctx context.Context, input *cloudwatchlogs.PutLogEventsInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) { 1402 calls = append(calls, input) 1403 called <- struct{}{} 1404 return &cloudwatchlogs.PutLogEventsOutput{ 1405 NextSequenceToken: aws.String(nextSequenceToken), 1406 }, nil 1407 } 1408 ticks := make(chan time.Time) 1409 newTicker = func(_ time.Duration) *time.Ticker { 1410 return &time.Ticker{ 1411 C: ticks, 1412 } 1413 } 1414 1415 d := make(chan bool) 1416 close(d) 1417 go stream.collectBatch(d) 1418 1419 var expectedEvents []types.InputLogEvent 1420 times := maximumLogEventsPerPut 1421 timestamp := time.Now() 1422 for i := 0; i < times; i++ { 1423 line := strconv.Itoa(i) 1424 if i%2 == 0 { 1425 timestamp = timestamp.Add(1 * time.Nanosecond) 1426 } 1427 stream.Log(&logger.Message{ 1428 Line: []byte(line), 1429 Timestamp: timestamp, 1430 }) 1431 expectedEvents = append(expectedEvents, types.InputLogEvent{ 1432 Message: aws.String(line), 1433 Timestamp: aws.Int64(timestamp.UnixNano() / int64(time.Millisecond)), 1434 }) 1435 } 1436 1437 ticks <- time.Time{} 1438 stream.Close() 1439 1440 <-called 1441 assert.Assert(t, len(calls) == 1) 1442 argument := calls[0] 1443 close(called) 1444 assert.Assert(t, argument != nil) 1445 assert.Assert(t, len(argument.LogEvents) == times) 1446 for i := 0; i < times; i++ { 1447 if !reflect.DeepEqual(argument.LogEvents[i], expectedEvents[i]) { 1448 t.Errorf("Expected event to be %v but was %v", expectedEvents[i], argument.LogEvents[i]) 1449 } 1450 } 1451 } 1452 1453 func TestParseLogOptionsMultilinePattern(t *testing.T) { 1454 info := logger.Info{ 1455 Config: map[string]string{ 1456 multilinePatternKey: "^xxxx", 1457 }, 1458 } 1459 1460 multilinePattern, err := parseMultilineOptions(info) 1461 assert.Check(t, err, "Received unexpected error") 1462 assert.Check(t, multilinePattern.MatchString("xxxx"), "No multiline pattern match found") 1463 } 1464 1465 func TestParseLogOptionsDatetimeFormat(t *testing.T) { 1466 datetimeFormatTests := []struct { 1467 format string 1468 match string 1469 }{ 1470 {"%d/%m/%y %a %H:%M:%S%L %Z", "31/12/10 Mon 08:42:44.345 NZDT"}, 1471 {"%Y-%m-%d %A %I:%M:%S.%f%p%z", "2007-12-04 Monday 08:42:44.123456AM+1200"}, 1472 {"%b|%b|%b|%b|%b|%b|%b|%b|%b|%b|%b|%b", "Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec"}, 1473 {"%B|%B|%B|%B|%B|%B|%B|%B|%B|%B|%B|%B", "January|February|March|April|May|June|July|August|September|October|November|December"}, 1474 {"%A|%A|%A|%A|%A|%A|%A", "Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday"}, 1475 {"%a|%a|%a|%a|%a|%a|%a", "Mon|Tue|Wed|Thu|Fri|Sat|Sun"}, 1476 {"Day of the week: %w, Day of the year: %j", "Day of the week: 4, Day of the year: 091"}, 1477 } 1478 for _, dt := range datetimeFormatTests { 1479 t.Run(dt.match, func(t *testing.T) { 1480 info := logger.Info{ 1481 Config: map[string]string{ 1482 datetimeFormatKey: dt.format, 1483 }, 1484 } 1485 multilinePattern, err := parseMultilineOptions(info) 1486 assert.Check(t, err, "Received unexpected error") 1487 assert.Check(t, multilinePattern.MatchString(dt.match), "No multiline pattern match found") 1488 }) 1489 } 1490 } 1491 1492 func TestValidateLogOptionsDatetimeFormatAndMultilinePattern(t *testing.T) { 1493 cfg := map[string]string{ 1494 multilinePatternKey: "^xxxx", 1495 datetimeFormatKey: "%Y-%m-%d", 1496 logGroupKey: groupName, 1497 } 1498 conflictingLogOptionsError := "you cannot configure log opt 'awslogs-datetime-format' and 'awslogs-multiline-pattern' at the same time" 1499 1500 err := ValidateLogOpt(cfg) 1501 assert.Check(t, err != nil, "Expected an error") 1502 assert.Check(t, is.Equal(err.Error(), conflictingLogOptionsError), "Received invalid error") 1503 } 1504 1505 func TestValidateLogOptionsForceFlushIntervalSeconds(t *testing.T) { 1506 tests := []struct { 1507 input string 1508 shouldErr bool 1509 }{ 1510 {"0", true}, 1511 {"-1", true}, 1512 {"a", true}, 1513 {"10", false}, 1514 } 1515 1516 for _, tc := range tests { 1517 t.Run(tc.input, func(t *testing.T) { 1518 cfg := map[string]string{ 1519 forceFlushIntervalKey: tc.input, 1520 logGroupKey: groupName, 1521 } 1522 1523 err := ValidateLogOpt(cfg) 1524 if tc.shouldErr { 1525 expectedErr := "must specify a positive integer for log opt 'awslogs-force-flush-interval-seconds': " + tc.input 1526 assert.Error(t, err, expectedErr) 1527 } else { 1528 assert.NilError(t, err) 1529 } 1530 }) 1531 } 1532 } 1533 1534 func TestValidateLogOptionsMaxBufferedEvents(t *testing.T) { 1535 tests := []struct { 1536 input string 1537 shouldErr bool 1538 }{ 1539 {"0", true}, 1540 {"-1", true}, 1541 {"a", true}, 1542 {"10", false}, 1543 } 1544 1545 for _, tc := range tests { 1546 t.Run(tc.input, func(t *testing.T) { 1547 cfg := map[string]string{ 1548 maxBufferedEventsKey: tc.input, 1549 logGroupKey: groupName, 1550 } 1551 1552 err := ValidateLogOpt(cfg) 1553 if tc.shouldErr { 1554 expectedErr := "must specify a positive integer for log opt 'awslogs-max-buffered-events': " + tc.input 1555 assert.Error(t, err, expectedErr) 1556 } else { 1557 assert.NilError(t, err) 1558 } 1559 }) 1560 } 1561 } 1562 1563 func TestValidateLogOptionsFormat(t *testing.T) { 1564 tests := []struct { 1565 format string 1566 multiLinePattern string 1567 datetimeFormat string 1568 expErrMsg string 1569 }{ 1570 {"json/emf", "", "", ""}, 1571 {"random", "", "", "unsupported log format 'random'"}, 1572 {"", "", "", ""}, 1573 {"json/emf", "---", "", "you cannot configure log opt 'awslogs-datetime-format' or 'awslogs-multiline-pattern' when log opt 'awslogs-format' is set to 'json/emf'"}, 1574 {"json/emf", "", "yyyy-dd-mm", "you cannot configure log opt 'awslogs-datetime-format' or 'awslogs-multiline-pattern' when log opt 'awslogs-format' is set to 'json/emf'"}, 1575 } 1576 1577 for i, tc := range tests { 1578 t.Run(fmt.Sprintf("%d/%s", i, tc.format), func(t *testing.T) { 1579 cfg := map[string]string{ 1580 logGroupKey: groupName, 1581 logFormatKey: tc.format, 1582 } 1583 if tc.multiLinePattern != "" { 1584 cfg[multilinePatternKey] = tc.multiLinePattern 1585 } 1586 if tc.datetimeFormat != "" { 1587 cfg[datetimeFormatKey] = tc.datetimeFormat 1588 } 1589 1590 err := ValidateLogOpt(cfg) 1591 if tc.expErrMsg != "" { 1592 assert.Error(t, err, tc.expErrMsg) 1593 } else { 1594 assert.NilError(t, err) 1595 } 1596 }) 1597 } 1598 } 1599 1600 func TestCreateTagSuccess(t *testing.T) { 1601 mockClient := &mockClient{} 1602 info := logger.Info{ 1603 ContainerName: "/test-container", 1604 ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", 1605 Config: map[string]string{"tag": "{{.Name}}/{{.FullID}}"}, 1606 } 1607 logStreamName, e := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) 1608 if e != nil { 1609 t.Errorf("Error generating tag: %q", e) 1610 } 1611 stream := &logStream{ 1612 client: mockClient, 1613 logGroupName: groupName, 1614 logStreamName: logStreamName, 1615 logCreateStream: true, 1616 } 1617 calls := make([]*cloudwatchlogs.CreateLogStreamInput, 0) 1618 mockClient.createLogStreamFunc = func(ctx context.Context, input *cloudwatchlogs.CreateLogStreamInput, opts ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CreateLogStreamOutput, error) { 1619 calls = append(calls, input) 1620 return &cloudwatchlogs.CreateLogStreamOutput{}, nil 1621 } 1622 1623 err := stream.create() 1624 1625 assert.NilError(t, err) 1626 assert.Equal(t, 1, len(calls)) 1627 argument := calls[0] 1628 1629 assert.Equal(t, "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890", aws.ToString(argument.LogStreamName)) 1630 } 1631 1632 func BenchmarkUnwrapEvents(b *testing.B) { 1633 events := make([]wrappedEvent, maximumLogEventsPerPut) 1634 for i := 0; i < maximumLogEventsPerPut; i++ { 1635 mes := strings.Repeat("0", maximumBytesPerEvent) 1636 events[i].inputLogEvent = types.InputLogEvent{ 1637 Message: &mes, 1638 } 1639 } 1640 1641 b.ResetTimer() 1642 for i := 0; i < b.N; i++ { 1643 res := unwrapEvents(events) 1644 assert.Check(b, is.Len(res, maximumLogEventsPerPut)) 1645 } 1646 } 1647 1648 func TestNewAWSLogsClientCredentialEndpointDetect(t *testing.T) { 1649 // required for the cloudwatchlogs client 1650 t.Setenv("AWS_REGION", "us-west-2") 1651 1652 credsResp := `{ 1653 "AccessKeyId" : "test-access-key-id", 1654 "SecretAccessKey": "test-secret-access-key" 1655 }` 1656 1657 credsRetrieved := false 1658 actualAuthHeader := "" 1659 1660 testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 1661 switch r.URL.Path { 1662 case "/creds": 1663 credsRetrieved = true 1664 w.Header().Set("Content-Type", "application/json") 1665 fmt.Fprintln(w, credsResp) 1666 case "/": 1667 actualAuthHeader = r.Header.Get("Authorization") 1668 w.Header().Set("Content-Type", "application/json") 1669 fmt.Fprintln(w, "{}") 1670 } 1671 })) 1672 defer testServer.Close() 1673 1674 // set the SDKEndpoint in the driver 1675 newSDKEndpoint = testServer.URL 1676 1677 info := logger.Info{ 1678 Config: map[string]string{ 1679 endpointKey: testServer.URL, 1680 credentialsEndpointKey: "/creds", 1681 }, 1682 } 1683 1684 client, err := newAWSLogsClient(info) 1685 assert.Check(t, err) 1686 1687 _, err = client.CreateLogGroup(context.TODO(), &cloudwatchlogs.CreateLogGroupInput{LogGroupName: aws.String("foo")}) 1688 assert.NilError(t, err) 1689 1690 assert.Check(t, credsRetrieved) 1691 1692 // sample header val: 1693 // AWS4-HMAC-SHA256 Credential=test-access-key-id/20220915/us-west-2/logs/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-length;content-type;host;x-amz-date;x-amz-target, Signature=9cc0f8347e379ec77884616bb4b5a9d4a9a11f63cdc4c765e2f0131f45fe06d3 1694 assert.Check(t, is.Contains(actualAuthHeader, "AWS4-HMAC-SHA256 Credential=test-access-key-id/")) 1695 assert.Check(t, is.Contains(actualAuthHeader, "us-west-2")) 1696 assert.Check(t, is.Contains(actualAuthHeader, "Signature=")) 1697 }