github.com/rbisecke/kafka-go@v0.4.27/conn_test.go (about) 1 package kafka 2 3 import ( 4 "bytes" 5 "context" 6 "fmt" 7 "io" 8 "math/rand" 9 "net" 10 "os" 11 "strconv" 12 "testing" 13 "time" 14 15 ktesting "github.com/rbisecke/kafka-go/testing" 16 "golang.org/x/net/nettest" 17 ) 18 19 type timeout struct{} 20 21 func (*timeout) Error() string { return "timeout" } 22 func (*timeout) Temporary() bool { return true } 23 func (*timeout) Timeout() bool { return true } 24 25 // connPipe is an adapter that implements the net.Conn interface on top of 26 // two client kafka connections to pass the nettest.TestConn test suite. 27 type connPipe struct { 28 rconn *Conn 29 wconn *Conn 30 } 31 32 func (c *connPipe) Close() error { 33 b := [1]byte{} // marker that the connection has been closed 34 c.wconn.SetWriteDeadline(time.Time{}) 35 c.wconn.Write(b[:]) 36 c.wconn.Close() 37 c.rconn.Close() 38 return nil 39 } 40 41 func (c *connPipe) Read(b []byte) (int, error) { 42 // See comments in Write. 43 time.Sleep(time.Millisecond) 44 if t := c.rconn.readDeadline(); !t.IsZero() { 45 return 0, &timeout{} 46 } 47 n, err := c.rconn.Read(b) 48 if n == 1 && b[0] == 0 { 49 c.rconn.Close() 50 n, err = 0, io.EOF 51 } 52 return n, err 53 } 54 55 func (c *connPipe) Write(b []byte) (int, error) { 56 // The nettest/ConcurrentMethods test spawns a bunch of goroutines that do 57 // random stuff on the connection, if a Read or Write was issued before a 58 // deadline was set then it could cancel an inflight request to kafka, 59 // resulting in the connection being closed. 60 // To prevent this from happening we wait a little while to give the other 61 // goroutines a chance to start and set the deadline. 62 time.Sleep(time.Millisecond) 63 64 // The nettest code only sets deadlines when it expects the write to time 65 // out. The broker connection is alive and able to accept data, so we need 66 // to simulate the timeout in order to get the tests to pass. 67 if t := c.wconn.writeDeadline(); !t.IsZero() { 68 return 0, &timeout{} 69 } 70 71 return c.wconn.Write(b) 72 } 73 74 func (c *connPipe) LocalAddr() net.Addr { 75 return c.rconn.LocalAddr() 76 } 77 78 func (c *connPipe) RemoteAddr() net.Addr { 79 return c.wconn.LocalAddr() 80 } 81 82 func (c *connPipe) SetDeadline(t time.Time) error { 83 c.rconn.SetDeadline(t) 84 c.wconn.SetDeadline(t) 85 return nil 86 } 87 88 func (c *connPipe) SetReadDeadline(t time.Time) error { 89 return c.rconn.SetReadDeadline(t) 90 } 91 92 func (c *connPipe) SetWriteDeadline(t time.Time) error { 93 return c.wconn.SetWriteDeadline(t) 94 } 95 96 func init() { 97 rand.Seed(time.Now().UnixNano()) 98 } 99 100 func makeTopic() string { 101 return fmt.Sprintf("kafka-go-%016x", rand.Int63()) 102 } 103 104 func makeGroupID() string { 105 return fmt.Sprintf("kafka-go-group-%016x", rand.Int63()) 106 } 107 108 func makeTransactionalID() string { 109 return fmt.Sprintf("kafka-go-transactional-id-%016x", rand.Int63()) 110 } 111 112 func TestConn(t *testing.T) { 113 tests := []struct { 114 scenario string 115 function func(*testing.T, *Conn) 116 minVersion string 117 }{ 118 { 119 scenario: "close right away", 120 function: testConnClose, 121 }, 122 123 { 124 scenario: "ensure the initial offset of a connection is the first offset", 125 function: testConnFirstOffset, 126 }, 127 128 { 129 scenario: "write a single message to kafka should succeed", 130 function: testConnWrite, 131 }, 132 133 { 134 scenario: "writing a message to a closed kafka connection should fail", 135 function: testConnCloseAndWrite, 136 }, 137 138 { 139 scenario: "ensure the connection can seek to the first offset", 140 function: testConnSeekFirstOffset, 141 }, 142 143 { 144 scenario: "ensure the connection can seek to the last offset", 145 function: testConnSeekLastOffset, 146 }, 147 148 { 149 scenario: "ensure the connection can seek relative to the current offset", 150 function: testConnSeekCurrentOffset, 151 }, 152 153 { 154 scenario: "ensure the connection can seek to a random offset", 155 function: testConnSeekRandomOffset, 156 }, 157 158 { 159 scenario: "unchecked seeks allow the connection to be positioned outside the boundaries of the partition", 160 function: testConnSeekDontCheck, 161 }, 162 163 { 164 scenario: "writing and reading messages sequentially should preserve the order", 165 function: testConnWriteReadSequentially, 166 }, 167 168 { 169 scenario: "writing a batch of messages and reading it sequentially should preserve the order", 170 function: testConnWriteBatchReadSequentially, 171 }, 172 173 { 174 scenario: "writing and reading messages concurrently should preserve the order", 175 function: testConnWriteReadConcurrently, 176 }, 177 178 { 179 scenario: "reading messages with a buffer that is too short should return io.ErrShortBuffer and maintain the connection open", 180 function: testConnReadShortBuffer, 181 }, 182 183 { 184 scenario: "reading messages from an empty partition should timeout after reaching the deadline", 185 function: testConnReadEmptyWithDeadline, 186 }, 187 188 { 189 scenario: "write batch of messages and read the highest offset (watermark)", 190 function: testConnReadWatermarkFromBatch, 191 }, 192 193 { 194 scenario: "read a batch with no explicit min or max bytes", 195 function: testConnReadBatchWithNoMinMaxBytes, 196 minVersion: "0.11.0", 197 }, 198 199 { 200 scenario: "read a batch using explicit max wait time", 201 function: testConnReadBatchWithMaxWait, 202 }, 203 204 { 205 scenario: "find the group coordinator", 206 function: testConnFindCoordinator, 207 }, 208 209 { 210 scenario: "test join group with an invalid groupID", 211 function: testConnJoinGroupInvalidGroupID, 212 }, 213 214 { 215 scenario: "test join group with an invalid sessionTimeout", 216 function: testConnJoinGroupInvalidSessionTimeout, 217 }, 218 219 { 220 scenario: "test join group with an invalid refreshTimeout", 221 function: testConnJoinGroupInvalidRefreshTimeout, 222 }, 223 224 { 225 scenario: "test heartbeat once group has been created", 226 function: testConnHeartbeatErr, 227 }, 228 229 { 230 scenario: "test leave group returns error when called outside group", 231 function: testConnLeaveGroupErr, 232 }, 233 234 { 235 scenario: "test sync group with bad memberID", 236 function: testConnSyncGroupErr, 237 }, 238 239 { 240 scenario: "test list groups", 241 function: testConnListGroupsReturnsGroups, 242 minVersion: "0.11.0", 243 }, 244 245 { 246 scenario: "test fetch and commit offset", 247 function: testConnFetchAndCommitOffsets, 248 }, 249 250 { 251 scenario: "test delete topics", 252 function: testDeleteTopics, 253 }, 254 255 { 256 scenario: "test delete topics with an invalid topic", 257 function: testDeleteTopicsInvalidTopic, 258 }, 259 260 { 261 scenario: "test retrieve controller", 262 function: testController, 263 }, 264 265 { 266 scenario: "test list brokers", 267 function: testBrokers, 268 }, 269 270 { 271 scenario: "the connection advertises the broker that it is connected to", 272 function: testConnBroker, 273 }, 274 } 275 276 const ( 277 tcp = "tcp" 278 kafka = "localhost:9092" 279 ) 280 281 for _, test := range tests { 282 if !ktesting.KafkaIsAtLeast(test.minVersion) { 283 t.Log("skipping " + test.scenario + " because broker is not at least version " + test.minVersion) 284 continue 285 } 286 287 testFunc := test.function 288 t.Run(test.scenario, func(t *testing.T) { 289 t.Parallel() 290 291 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 292 defer cancel() 293 294 topic := makeTopic() 295 296 conn, err := (&Dialer{ 297 Resolver: &net.Resolver{}, 298 }).DialLeader(ctx, tcp, kafka, topic, 0) 299 if err != nil { 300 t.Fatal("failed to open a new kafka connection:", err) 301 } 302 defer conn.Close() 303 testFunc(t, conn) 304 }) 305 } 306 307 t.Run("nettest", func(t *testing.T) { 308 // Need ability to skip nettest on newer Kafka versions to avoid these kinds of errors: 309 // --- FAIL: TestConn/nettest (17.56s) 310 // --- FAIL: TestConn/nettest/PingPong (7.40s) 311 // conntest.go:112: unexpected Read error: [7] Request Timed Out: the request exceeded the user-specified time limit in the request 312 // conntest.go:118: mismatching value: got 77, want 78 313 // conntest.go:118: mismatching value: got 78, want 79 314 // ... 315 // 316 // TODO: Figure out why these are happening and fix them (they don't appear to be new). 317 if _, ok := os.LookupEnv("KAFKA_SKIP_NETTEST"); ok { 318 t.Log("skipping nettest because KAFKA_SKIP_NETTEST is set") 319 t.Skip() 320 } 321 322 t.Parallel() 323 324 nettest.TestConn(t, func() (c1 net.Conn, c2 net.Conn, stop func(), err error) { 325 topic1 := makeTopic() 326 topic2 := makeTopic() 327 var t1Reader *Conn 328 var t2Reader *Conn 329 var t1Writer *Conn 330 var t2Writer *Conn 331 dialer := &Dialer{} 332 333 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 334 defer cancel() 335 336 if t1Reader, err = dialer.DialLeader(ctx, tcp, kafka, topic1, 0); err != nil { 337 return 338 } 339 if t2Reader, err = dialer.DialLeader(ctx, tcp, kafka, topic2, 0); err != nil { 340 return 341 } 342 if t1Writer, err = dialer.DialLeader(ctx, tcp, kafka, topic1, 0); err != nil { 343 return 344 } 345 if t2Writer, err = dialer.DialLeader(ctx, tcp, kafka, topic2, 0); err != nil { 346 return 347 } 348 349 stop = func() { 350 t1Reader.Close() 351 t1Writer.Close() 352 t2Reader.Close() 353 t2Writer.Close() 354 } 355 c1 = &connPipe{rconn: t1Reader, wconn: t2Writer} 356 c2 = &connPipe{rconn: t2Reader, wconn: t1Writer} 357 return 358 }) 359 }) 360 } 361 362 func testConnClose(t *testing.T, conn *Conn) { 363 if err := conn.Close(); err != nil { 364 t.Error(err) 365 } 366 } 367 368 func testConnFirstOffset(t *testing.T, conn *Conn) { 369 offset, whence := conn.Offset() 370 371 if offset != 0 && whence != 0 { 372 t.Error("bad first offset:", offset, whence) 373 } 374 } 375 376 func testConnWrite(t *testing.T, conn *Conn) { 377 b := []byte("Hello World!") 378 n, err := conn.Write(b) 379 if err != nil { 380 t.Error(err) 381 } 382 383 if n != len(b) { 384 t.Error("bad length returned by (*Conn).Write:", n) 385 } 386 } 387 388 func testConnCloseAndWrite(t *testing.T, conn *Conn) { 389 conn.Close() 390 391 switch _, err := conn.Write([]byte("Hello World!")); err.(type) { 392 case *net.OpError: 393 default: 394 t.Error(err) 395 } 396 } 397 398 func testConnSeekFirstOffset(t *testing.T, conn *Conn) { 399 for i := 0; i != 10; i++ { 400 if _, err := conn.Write([]byte(strconv.Itoa(i))); err != nil { 401 t.Fatal(err) 402 } 403 } 404 405 offset, err := conn.Seek(0, SeekStart) 406 if err != nil { 407 t.Error(err) 408 } 409 410 if offset != 0 { 411 t.Error("bad offset:", offset) 412 } 413 } 414 415 func testConnSeekLastOffset(t *testing.T, conn *Conn) { 416 for i := 0; i != 10; i++ { 417 if _, err := conn.Write([]byte(strconv.Itoa(i))); err != nil { 418 t.Fatal(err) 419 } 420 } 421 422 offset, err := conn.Seek(0, SeekEnd) 423 if err != nil { 424 t.Error(err) 425 } 426 427 if offset != 10 { 428 t.Error("bad offset:", offset) 429 } 430 } 431 432 func testConnSeekCurrentOffset(t *testing.T, conn *Conn) { 433 for i := 0; i != 10; i++ { 434 if _, err := conn.Write([]byte(strconv.Itoa(i))); err != nil { 435 t.Fatal(err) 436 } 437 } 438 439 offset, err := conn.Seek(5, SeekStart) 440 if err != nil { 441 t.Error(err) 442 } 443 444 if offset != 5 { 445 t.Error("bad offset:", offset) 446 } 447 448 offset, err = conn.Seek(-2, SeekCurrent) 449 if err != nil { 450 t.Error(err) 451 } 452 453 if offset != 3 { 454 t.Error("bad offset:", offset) 455 } 456 } 457 458 func testConnSeekRandomOffset(t *testing.T, conn *Conn) { 459 for i := 0; i != 10; i++ { 460 if _, err := conn.Write([]byte(strconv.Itoa(i))); err != nil { 461 t.Fatal(err) 462 } 463 } 464 465 offset, err := conn.Seek(3, SeekAbsolute) 466 if err != nil { 467 t.Error(err) 468 } 469 470 if offset != 3 { 471 t.Error("bad offset:", offset) 472 } 473 } 474 475 func testConnSeekDontCheck(t *testing.T, conn *Conn) { 476 for i := 0; i != 10; i++ { 477 if _, err := conn.Write([]byte(strconv.Itoa(i))); err != nil { 478 t.Fatal(err) 479 } 480 } 481 482 offset, err := conn.Seek(42, SeekAbsolute|SeekDontCheck) 483 if err != nil { 484 t.Error(err) 485 } 486 487 if offset != 42 { 488 t.Error("bad offset:", offset) 489 } 490 491 if _, err := conn.ReadMessage(1024); err != OffsetOutOfRange { 492 t.Error("unexpected error:", err) 493 } 494 } 495 496 func testConnWriteReadSequentially(t *testing.T, conn *Conn) { 497 for i := 0; i != 10; i++ { 498 if _, err := conn.Write([]byte(strconv.Itoa(i))); err != nil { 499 t.Fatal(err) 500 } 501 } 502 503 b := make([]byte, 128) 504 505 for i := 0; i != 10; i++ { 506 n, err := conn.Read(b) 507 if err != nil { 508 t.Error(err) 509 continue 510 } 511 s := string(b[:n]) 512 if v, err := strconv.Atoi(s); err != nil { 513 t.Error(err) 514 } else if v != i { 515 t.Errorf("bad message read at offset %d: %s", i, s) 516 } 517 } 518 } 519 520 func testConnWriteBatchReadSequentially(t *testing.T, conn *Conn) { 521 msgs := makeTestSequence(10) 522 523 if _, err := conn.WriteMessages(msgs...); err != nil { 524 t.Fatal(err) 525 } 526 527 for i := 0; i != 10; i++ { 528 msg, err := conn.ReadMessage(128) 529 if err != nil { 530 t.Error(err) 531 continue 532 } 533 if !bytes.Equal(msg.Key, msgs[i].Key) { 534 t.Errorf("bad message key at offset %d: %q != %q", i, msg.Key, msgs[i].Key) 535 } 536 if !bytes.Equal(msg.Value, msgs[i].Value) { 537 t.Errorf("bad message value at offset %d: %q != %q", i, msg.Value, msgs[i].Value) 538 } 539 if !msg.Time.Equal(msgs[i].Time) { 540 t.Errorf("bad message time at offset %d: %s != %s", i, msg.Time, msgs[i].Time) 541 } 542 } 543 } 544 545 func testConnReadWatermarkFromBatch(t *testing.T, conn *Conn) { 546 if _, err := conn.WriteMessages(makeTestSequence(10)...); err != nil { 547 t.Fatal(err) 548 } 549 550 const minBytes = 1 551 const maxBytes = 10e6 // 10 MB 552 553 value := make([]byte, 10e3) // 10 KB 554 555 batch := conn.ReadBatch(minBytes, maxBytes) 556 557 for i := 0; i < 10; i++ { 558 _, err := batch.Read(value) 559 if err != nil { 560 if err = batch.Close(); err != nil { 561 t.Fatalf("error trying to read batch message: %s", err) 562 } 563 } 564 565 if batch.HighWaterMark() != 10 { 566 t.Fatal("expected highest offset (watermark) to be 10") 567 } 568 } 569 570 batch.Close() 571 } 572 573 func testConnReadBatchWithNoMinMaxBytes(t *testing.T, conn *Conn) { 574 if _, err := conn.WriteMessages(makeTestSequence(10)...); err != nil { 575 t.Fatal(err) 576 } 577 578 value := make([]byte, 10e3) // 10 KB 579 580 batch := conn.ReadBatchWith(ReadBatchConfig{}) 581 582 for i := 0; i < 10; i++ { 583 _, err := batch.Read(value) 584 if err != nil { 585 if err = batch.Close(); err != nil { 586 t.Fatalf("error trying to read batch message: %s", err) 587 } 588 } 589 590 if batch.HighWaterMark() != 10 { 591 t.Fatal("expected highest offset (watermark) to be 10") 592 } 593 } 594 595 if err := batch.Close(); err != nil { 596 t.Fatalf("error trying to close batch: %s", err) 597 } 598 599 if err := batch.Err(); err != nil { 600 t.Fatalf("broken batch: %s", err) 601 } 602 } 603 604 func testConnReadBatchWithMaxWait(t *testing.T, conn *Conn) { 605 if _, err := conn.WriteMessages(makeTestSequence(10)...); err != nil { 606 t.Fatal(err) 607 } 608 609 const maxBytes = 10e6 // 10 MB 610 611 value := make([]byte, 10e3) // 10 KB 612 613 cfg := ReadBatchConfig{ 614 MinBytes: maxBytes, // use max for both so that we hit max wait time 615 MaxBytes: maxBytes, 616 MaxWait: 500 * time.Millisecond, 617 } 618 619 // set aa read deadline so the batch will succeed. 620 conn.SetDeadline(time.Now().Add(time.Second)) 621 batch := conn.ReadBatchWith(cfg) 622 623 for i := 0; i < 10; i++ { 624 _, err := batch.Read(value) 625 if err != nil { 626 if err = batch.Close(); err != nil { 627 t.Fatalf("error trying to read batch message: %s", err) 628 } 629 } 630 631 if batch.HighWaterMark() != 10 { 632 t.Fatal("expected highest offset (watermark) to be 10") 633 } 634 } 635 636 batch.Close() 637 638 // reset the offset and ensure that the conn deadline takes precedence over 639 // the max wait 640 conn.Seek(0, SeekAbsolute) 641 conn.SetDeadline(time.Now().Add(50 * time.Millisecond)) 642 batch = conn.ReadBatchWith(cfg) 643 if err := batch.Err(); err == nil { 644 t.Fatal("should have timed out, but got no error") 645 } else if netErr, ok := err.(net.Error); !ok || !netErr.Timeout() { 646 t.Fatalf("should have timed out, but got: %v", err) 647 } 648 } 649 650 func waitForCoordinator(t *testing.T, conn *Conn, groupID string) { 651 // ensure that kafka has allocated a group coordinator. oddly, issue doesn't 652 // appear to happen if the kafka been running for a while. 653 const maxAttempts = 20 654 for attempt := 1; attempt <= maxAttempts; attempt++ { 655 _, err := conn.findCoordinator(findCoordinatorRequestV0{ 656 CoordinatorKey: groupID, 657 }) 658 switch err { 659 case nil: 660 return 661 case GroupCoordinatorNotAvailable: 662 time.Sleep(250 * time.Millisecond) 663 default: 664 t.Fatalf("unable to find coordinator for group: %v", err) 665 } 666 } 667 668 t.Fatalf("unable to connect to coordinator after %v attempts", maxAttempts) 669 } 670 671 func createGroup(t *testing.T, conn *Conn, groupID string) (generationID int32, memberID string, stop func()) { 672 waitForCoordinator(t, conn, groupID) 673 674 join := func() (joinGroup joinGroupResponseV1) { 675 var err error 676 for attempt := 0; attempt < 10; attempt++ { 677 joinGroup, err = conn.joinGroup(joinGroupRequestV1{ 678 GroupID: groupID, 679 SessionTimeout: int32(time.Minute / time.Millisecond), 680 RebalanceTimeout: int32(time.Second / time.Millisecond), 681 ProtocolType: "roundrobin", 682 GroupProtocols: []joinGroupRequestGroupProtocolV1{ 683 { 684 ProtocolName: "roundrobin", 685 ProtocolMetadata: []byte("blah"), 686 }, 687 }, 688 }) 689 switch err { 690 case nil: 691 return 692 case NotCoordinatorForGroup: 693 time.Sleep(250 * time.Millisecond) 694 default: 695 t.Fatalf("bad joinGroup: %s", err) 696 } 697 } 698 return 699 } 700 701 // join the group 702 joinGroup := join() 703 704 // sync the group 705 _, err := conn.syncGroup(syncGroupRequestV0{ 706 GroupID: groupID, 707 GenerationID: joinGroup.GenerationID, 708 MemberID: joinGroup.MemberID, 709 GroupAssignments: []syncGroupRequestGroupAssignmentV0{ 710 { 711 MemberID: joinGroup.MemberID, 712 MemberAssignments: []byte("blah"), 713 }, 714 }, 715 }) 716 if err != nil { 717 t.Fatalf("bad syncGroup: %s", err) 718 } 719 720 generationID = joinGroup.GenerationID 721 memberID = joinGroup.MemberID 722 stop = func() { 723 conn.leaveGroup(leaveGroupRequestV0{ 724 GroupID: groupID, 725 MemberID: joinGroup.MemberID, 726 }) 727 } 728 729 return 730 } 731 732 func testConnFindCoordinator(t *testing.T, conn *Conn) { 733 groupID := makeGroupID() 734 735 for attempt := 0; attempt < 10; attempt++ { 736 if attempt != 0 { 737 time.Sleep(time.Millisecond * 50) 738 } 739 response, err := conn.findCoordinator(findCoordinatorRequestV0{CoordinatorKey: groupID}) 740 if err != nil { 741 switch err { 742 case GroupCoordinatorNotAvailable: 743 continue 744 default: 745 t.Fatalf("bad findCoordinator: %s", err) 746 } 747 } 748 749 if response.Coordinator.NodeID == 0 { 750 t.Errorf("bad NodeID") 751 } 752 if response.Coordinator.Host == "" { 753 t.Errorf("bad Host") 754 } 755 if response.Coordinator.Port == 0 { 756 t.Errorf("bad Port") 757 } 758 return 759 } 760 } 761 762 func testConnJoinGroupInvalidGroupID(t *testing.T, conn *Conn) { 763 _, err := conn.joinGroup(joinGroupRequestV1{}) 764 if err != InvalidGroupId && err != NotCoordinatorForGroup { 765 t.Fatalf("expected %v or %v; got %v", InvalidGroupId, NotCoordinatorForGroup, err) 766 } 767 } 768 769 func testConnJoinGroupInvalidSessionTimeout(t *testing.T, conn *Conn) { 770 groupID := makeGroupID() 771 waitForCoordinator(t, conn, groupID) 772 773 _, err := conn.joinGroup(joinGroupRequestV1{ 774 GroupID: groupID, 775 }) 776 if err != InvalidSessionTimeout && err != NotCoordinatorForGroup { 777 t.Fatalf("expected %v or %v; got %v", InvalidSessionTimeout, NotCoordinatorForGroup, err) 778 } 779 } 780 781 func testConnJoinGroupInvalidRefreshTimeout(t *testing.T, conn *Conn) { 782 groupID := makeGroupID() 783 waitForCoordinator(t, conn, groupID) 784 785 _, err := conn.joinGroup(joinGroupRequestV1{ 786 GroupID: groupID, 787 SessionTimeout: int32(3 * time.Second / time.Millisecond), 788 }) 789 if err != InvalidSessionTimeout && err != NotCoordinatorForGroup { 790 t.Fatalf("expected %v or %v; got %v", InvalidSessionTimeout, NotCoordinatorForGroup, err) 791 } 792 } 793 794 func testConnHeartbeatErr(t *testing.T, conn *Conn) { 795 groupID := makeGroupID() 796 createGroup(t, conn, groupID) 797 798 _, err := conn.syncGroup(syncGroupRequestV0{ 799 GroupID: groupID, 800 }) 801 if err != UnknownMemberId && err != NotCoordinatorForGroup { 802 t.Fatalf("expected %v or %v; got %v", UnknownMemberId, NotCoordinatorForGroup, err) 803 } 804 } 805 806 func testConnLeaveGroupErr(t *testing.T, conn *Conn) { 807 groupID := makeGroupID() 808 waitForCoordinator(t, conn, groupID) 809 810 _, err := conn.leaveGroup(leaveGroupRequestV0{ 811 GroupID: groupID, 812 }) 813 if err != UnknownMemberId && err != NotCoordinatorForGroup { 814 t.Fatalf("expected %v or %v; got %v", UnknownMemberId, NotCoordinatorForGroup, err) 815 } 816 } 817 818 func testConnSyncGroupErr(t *testing.T, conn *Conn) { 819 groupID := makeGroupID() 820 waitForCoordinator(t, conn, groupID) 821 822 _, err := conn.syncGroup(syncGroupRequestV0{ 823 GroupID: groupID, 824 }) 825 if err != UnknownMemberId && err != NotCoordinatorForGroup { 826 t.Fatalf("expected %v or %v; got %v", UnknownMemberId, NotCoordinatorForGroup, err) 827 } 828 } 829 830 func testConnListGroupsReturnsGroups(t *testing.T, conn *Conn) { 831 group1 := makeGroupID() 832 _, _, stop1 := createGroup(t, conn, group1) 833 defer stop1() 834 835 group2 := makeGroupID() 836 _, _, stop2 := createGroup(t, conn, group2) 837 defer stop2() 838 839 out, err := conn.listGroups(listGroupsRequestV1{}) 840 if err != nil { 841 t.Fatalf("bad err: %v", err) 842 } 843 844 containsGroup := func(groupID string) bool { 845 for _, group := range out.Groups { 846 if group.GroupID == groupID { 847 return true 848 } 849 } 850 return false 851 } 852 853 if !containsGroup(group1) { 854 t.Errorf("expected groups to contain group1") 855 } 856 857 if !containsGroup(group2) { 858 t.Errorf("expected groups to contain group2") 859 } 860 } 861 862 func testConnFetchAndCommitOffsets(t *testing.T, conn *Conn) { 863 const N = 10 864 if _, err := conn.WriteMessages(makeTestSequence(N)...); err != nil { 865 t.Fatal(err) 866 } 867 868 groupID := makeGroupID() 869 generationID, memberID, stop := createGroup(t, conn, groupID) 870 defer stop() 871 872 request := offsetFetchRequestV1{ 873 GroupID: groupID, 874 Topics: []offsetFetchRequestV1Topic{ 875 { 876 Topic: conn.topic, 877 Partitions: []int32{0}, 878 }, 879 }, 880 } 881 fetch, err := conn.offsetFetch(request) 882 if err != nil { 883 t.Fatalf("bad err: %v", err) 884 } 885 886 if v := len(fetch.Responses); v != 1 { 887 t.Fatalf("expected 1 Response; got %v", v) 888 } 889 890 if v := len(fetch.Responses[0].PartitionResponses); v != 1 { 891 t.Fatalf("expected 1 PartitionResponses; got %v", v) 892 } 893 894 if offset := fetch.Responses[0].PartitionResponses[0].Offset; offset != -1 { 895 t.Fatalf("expected initial offset of -1; got %v", offset) 896 } 897 898 committedOffset := int64(N - 1) 899 _, err = conn.offsetCommit(offsetCommitRequestV2{ 900 GroupID: groupID, 901 GenerationID: generationID, 902 MemberID: memberID, 903 RetentionTime: int64(time.Hour / time.Millisecond), 904 Topics: []offsetCommitRequestV2Topic{ 905 { 906 Topic: conn.topic, 907 Partitions: []offsetCommitRequestV2Partition{ 908 { 909 Partition: 0, 910 Offset: committedOffset, 911 }, 912 }, 913 }, 914 }, 915 }) 916 if err != nil { 917 t.Fatalf("bad error: %v", err) 918 } 919 920 fetch, err = conn.offsetFetch(request) 921 if err != nil { 922 t.Fatalf("bad error: %v", err) 923 } 924 925 fetchedOffset := fetch.Responses[0].PartitionResponses[0].Offset 926 if committedOffset != fetchedOffset { 927 t.Fatalf("bad offset. expected %v; got %v", committedOffset, fetchedOffset) 928 } 929 } 930 931 func testConnWriteReadConcurrently(t *testing.T, conn *Conn) { 932 const N = 1000 933 msgs := make([]string, N) 934 done := make(chan struct{}) 935 written := make(chan struct{}, N/10) 936 937 for i := 0; i != N; i++ { 938 msgs[i] = strconv.Itoa(i) 939 } 940 941 go func() { 942 defer close(done) 943 for _, msg := range msgs { 944 if _, err := conn.Write([]byte(msg)); err != nil { 945 t.Error(err) 946 } 947 written <- struct{}{} 948 } 949 }() 950 951 b := make([]byte, 128) 952 953 for i := 0; i != N; i++ { 954 // wait until at least one message has been written. the reason for 955 // this synchronization is that we aren't using deadlines. as such, if 956 // the read happens before a message is available, it will cause a 957 // deadlock because the read request will never hit the one byte minimum 958 // in order to return and release the lock on the conn. by ensuring 959 // that there's at least one message produced, we don't hit that 960 // condition. 961 <-written 962 n, err := conn.Read(b) 963 if err != nil { 964 t.Error(err) 965 } 966 if s := string(b[:n]); s != strconv.Itoa(i) { 967 t.Errorf("bad message read at offset %d: %s", i, s) 968 } 969 } 970 971 <-done 972 } 973 974 func testConnReadShortBuffer(t *testing.T, conn *Conn) { 975 if _, err := conn.Write([]byte("Hello World!")); err != nil { 976 t.Fatal(err) 977 } 978 979 b := make([]byte, 4) 980 981 for i := 0; i != 10; i++ { 982 b[0] = 0 983 b[1] = 0 984 b[2] = 0 985 b[3] = 0 986 987 n, err := conn.Read(b) 988 if err != io.ErrShortBuffer { 989 t.Error("bad error:", i, err) 990 } 991 if n != 4 { 992 t.Error("bad byte count:", i, n) 993 } 994 if s := string(b); s != "Hell" { 995 t.Error("bad content:", i, s) 996 } 997 } 998 } 999 1000 func testConnReadEmptyWithDeadline(t *testing.T, conn *Conn) { 1001 b := make([]byte, 100) 1002 1003 start := time.Now() 1004 deadline := start.Add(time.Second) 1005 1006 conn.SetReadDeadline(deadline) 1007 n, err := conn.Read(b) 1008 1009 if n != 0 { 1010 t.Error("bad byte count:", n) 1011 } 1012 1013 if !isTimeout(err) { 1014 t.Error("expected timeout error but got", err) 1015 } 1016 } 1017 1018 func testDeleteTopics(t *testing.T, conn *Conn) { 1019 topic1 := makeTopic() 1020 topic2 := makeTopic() 1021 err := conn.CreateTopics( 1022 TopicConfig{ 1023 Topic: topic1, 1024 NumPartitions: 1, 1025 ReplicationFactor: 1, 1026 }, 1027 TopicConfig{ 1028 Topic: topic2, 1029 NumPartitions: 1, 1030 ReplicationFactor: 1, 1031 }, 1032 ) 1033 if err != nil { 1034 t.Fatalf("bad CreateTopics: %v", err) 1035 } 1036 conn.SetDeadline(time.Now().Add(time.Second)) 1037 err = conn.DeleteTopics(topic1, topic2) 1038 if err != nil { 1039 t.Fatalf("bad DeleteTopics: %v", err) 1040 } 1041 partitions, err := conn.ReadPartitions(topic1, topic2) 1042 if err != nil { 1043 t.Fatalf("bad ReadPartitions: %v", err) 1044 } 1045 if len(partitions) != 0 { 1046 t.Fatal("exepected partitions to be empty ") 1047 } 1048 } 1049 1050 func testDeleteTopicsInvalidTopic(t *testing.T, conn *Conn) { 1051 topic := makeTopic() 1052 err := conn.CreateTopics( 1053 TopicConfig{ 1054 Topic: topic, 1055 NumPartitions: 1, 1056 ReplicationFactor: 1, 1057 }, 1058 ) 1059 if err != nil { 1060 t.Fatalf("bad CreateTopics: %v", err) 1061 } 1062 conn.SetDeadline(time.Now().Add(5 * time.Second)) 1063 err = conn.DeleteTopics("invalid-topic", topic) 1064 if err != UnknownTopicOrPartition { 1065 t.Fatalf("expected UnknownTopicOrPartition error, but got %v", err) 1066 } 1067 partitions, err := conn.ReadPartitions(topic) 1068 if err != nil { 1069 t.Fatalf("bad ReadPartitions: %v", err) 1070 } 1071 if len(partitions) != 0 { 1072 t.Fatal("expected partitions to be empty") 1073 } 1074 } 1075 1076 func testController(t *testing.T, conn *Conn) { 1077 b, err := conn.Controller() 1078 if err != nil { 1079 t.Error(err) 1080 } 1081 1082 if b.Host != "localhost" { 1083 t.Errorf("expected localhost received %s", b.Host) 1084 } 1085 if b.Port != 9092 { 1086 t.Errorf("expected 9092 received %d", b.Port) 1087 } 1088 if b.ID != 1 { 1089 t.Errorf("expected 1 received %d", b.ID) 1090 } 1091 if b.Rack != "" { 1092 t.Errorf("expected empty string for rack received %s", b.Rack) 1093 } 1094 } 1095 1096 func testBrokers(t *testing.T, conn *Conn) { 1097 brokers, err := conn.Brokers() 1098 if err != nil { 1099 t.Error(err) 1100 } 1101 1102 if len(brokers) != 1 { 1103 t.Errorf("expected 1 broker in %+v", brokers) 1104 } 1105 1106 if brokers[0].ID != 1 { 1107 t.Errorf("expected ID 1 received %d", brokers[0].ID) 1108 } 1109 } 1110 1111 func testConnBroker(t *testing.T, conn *Conn) { 1112 broker := conn.Broker() 1113 // Depending on the environment the test is being run, IPv4 or IPv6 may be used. 1114 if broker.Host != "::1" && broker.Host != "127.0.0.1" { 1115 t.Errorf("invalid broker address: %q", broker.Host) 1116 } 1117 if broker.Port != 9092 { 1118 t.Errorf("invalid broker port: %d", broker.Port) 1119 } 1120 if broker.ID != 1 { 1121 t.Errorf("invalid broker id: %d", broker.ID) 1122 } 1123 if broker.Rack != "" { 1124 t.Errorf("invalid broker rack: %q", broker.Rack) 1125 } 1126 } 1127 1128 func TestReadPartitionsNoTopic(t *testing.T) { 1129 conn, err := Dial("tcp", "127.0.0.1:9092") 1130 if err != nil { 1131 t.Fatal(err) 1132 } 1133 defer conn.Close() 1134 1135 parts, err := conn.ReadPartitions() 1136 if err != nil { 1137 t.Error(err) 1138 } 1139 1140 if len(parts) == 0 { 1141 t.Errorf("no partitions were returned") 1142 } 1143 } 1144 1145 func TestUnsupportedSASLMechanism(t *testing.T) { 1146 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 1147 defer cancel() 1148 1149 conn, err := (&Dialer{ 1150 Resolver: &net.Resolver{}, 1151 }).DialContext(ctx, "tcp", "127.0.0.1:9093") 1152 if err != nil { 1153 t.Fatal("failed to open a new kafka connection:", err) 1154 } 1155 defer conn.Close() 1156 1157 if err := conn.saslHandshake("FOO"); err != UnsupportedSASLMechanism { 1158 t.Errorf("Expected UnsupportedSASLMechanism but got %v", err) 1159 } 1160 } 1161 1162 const benchmarkMessageCount = 100 1163 1164 func BenchmarkConn(b *testing.B) { 1165 benchmarks := []struct { 1166 scenario string 1167 function func(*testing.B, *Conn, []byte) 1168 }{ 1169 { 1170 scenario: "Seek", 1171 function: benchmarkConnSeek, 1172 }, 1173 1174 { 1175 scenario: "Read", 1176 function: benchmarkConnRead, 1177 }, 1178 1179 { 1180 scenario: "ReadBatch", 1181 function: benchmarkConnReadBatch, 1182 }, 1183 1184 { 1185 scenario: "ReadOffsets", 1186 function: benchmarkConnReadOffsets, 1187 }, 1188 1189 { 1190 scenario: "Write", 1191 function: benchmarkConnWrite, 1192 }, 1193 } 1194 1195 topic := makeTopic() 1196 value := make([]byte, 10e3) // 10 KB 1197 msgs := make([]Message, benchmarkMessageCount) 1198 1199 for i := range msgs { 1200 msgs[i].Value = value 1201 } 1202 1203 conn, _ := DialLeader(context.Background(), "tcp", "localhost:9092", topic, 0) 1204 defer conn.Close() 1205 1206 if _, err := conn.WriteMessages(msgs...); err != nil { 1207 b.Fatal(err) 1208 } 1209 1210 for _, benchmark := range benchmarks { 1211 b.Run(benchmark.scenario, func(b *testing.B) { 1212 if _, err := conn.Seek(0, SeekStart); err != nil { 1213 b.Error(err) 1214 return 1215 } 1216 benchmark.function(b, conn, value) 1217 }) 1218 } 1219 } 1220 1221 func benchmarkConnSeek(b *testing.B, conn *Conn, _ []byte) { 1222 for i := 0; i != b.N; i++ { 1223 if _, err := conn.Seek(int64(i%benchmarkMessageCount), SeekAbsolute); err != nil { 1224 b.Error(err) 1225 return 1226 } 1227 } 1228 } 1229 1230 func benchmarkConnRead(b *testing.B, conn *Conn, a []byte) { 1231 n := 0 1232 i := 0 1233 1234 for i != b.N { 1235 if (i % benchmarkMessageCount) == 0 { 1236 if _, err := conn.Seek(0, SeekStart); err != nil { 1237 b.Error(err) 1238 return 1239 } 1240 } 1241 1242 c, err := conn.Read(a) 1243 if err != nil { 1244 b.Error(err) 1245 return 1246 } 1247 1248 n += c 1249 i++ 1250 } 1251 1252 b.SetBytes(int64(n / i)) 1253 } 1254 1255 func benchmarkConnReadBatch(b *testing.B, conn *Conn, a []byte) { 1256 const minBytes = 1 1257 const maxBytes = 10e6 // 10 MB 1258 1259 batch := conn.ReadBatch(minBytes, maxBytes) 1260 i := 0 1261 n := 0 1262 1263 for i != b.N { 1264 c, err := batch.Read(a) 1265 if err != nil { 1266 if err = batch.Close(); err != nil { 1267 b.Error(err) 1268 return 1269 } 1270 if _, err = conn.Seek(0, SeekStart); err != nil { 1271 b.Error(err) 1272 return 1273 } 1274 batch = conn.ReadBatch(minBytes, maxBytes) 1275 } 1276 n += c 1277 i++ 1278 } 1279 1280 batch.Close() 1281 b.SetBytes(int64(n / i)) 1282 } 1283 1284 func benchmarkConnReadOffsets(b *testing.B, conn *Conn, _ []byte) { 1285 for i := 0; i != b.N; i++ { 1286 _, _, err := conn.ReadOffsets() 1287 if err != nil { 1288 b.Error(err) 1289 return 1290 } 1291 } 1292 } 1293 1294 func benchmarkConnWrite(b *testing.B, conn *Conn, _ []byte) { 1295 a := make([]byte, 10e3) // 10 KB 1296 n := 0 1297 i := 0 1298 1299 for i != b.N { 1300 c, err := conn.Write(a) 1301 if err != nil { 1302 b.Error(err) 1303 return 1304 } 1305 n += c 1306 i++ 1307 } 1308 1309 b.SetBytes(int64(n / i)) 1310 } 1311 1312 func TestEmptyToNullableReturnsNil(t *testing.T) { 1313 if emptyToNullable("") != nil { 1314 t.Error("Empty string is not converted to nil") 1315 } 1316 } 1317 1318 func TestEmptyToNullableLeavesStringsIntact(t *testing.T) { 1319 const s = "abc" 1320 r := emptyToNullable(s) 1321 if *r != s { 1322 t.Error("Non empty string is not equal to the original string") 1323 } 1324 } 1325 1326 func TestMakeBrokersAllPresent(t *testing.T) { 1327 brokers := make(map[int32]Broker) 1328 brokers[1] = Broker{ID: 1, Host: "203.0.113.101", Port: 9092} 1329 brokers[2] = Broker{ID: 1, Host: "203.0.113.102", Port: 9092} 1330 brokers[3] = Broker{ID: 1, Host: "203.0.113.103", Port: 9092} 1331 1332 b := makeBrokers(brokers, 1, 2, 3) 1333 if len(b) != 3 { 1334 t.Errorf("Expected 3 brokers, got %d", len(b)) 1335 } 1336 for _, i := range []int32{1, 2, 3} { 1337 if b[i-1] != brokers[i] { 1338 t.Errorf("Expected broker %d at index %d, got %d", i, i-1, b[i].ID) 1339 } 1340 } 1341 } 1342 1343 func TestMakeBrokersOneMissing(t *testing.T) { 1344 brokers := make(map[int32]Broker) 1345 brokers[1] = Broker{ID: 1, Host: "203.0.113.101", Port: 9092} 1346 brokers[3] = Broker{ID: 1, Host: "203.0.113.103", Port: 9092} 1347 1348 b := makeBrokers(brokers, 1, 2, 3) 1349 if len(b) != 2 { 1350 t.Errorf("Expected 2 brokers, got %d", len(b)) 1351 } 1352 if b[0] != brokers[1] { 1353 t.Errorf("Expected broker 1 at index 0, got %d", b[0].ID) 1354 } 1355 if b[1] != brokers[3] { 1356 t.Errorf("Expected broker 3 at index 1, got %d", b[1].ID) 1357 } 1358 }