github.com/deanMdreon/kafka-go@v0.4.32/reader_test.go (about)

     1  package kafka
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"fmt"
     7  	"io"
     8  	"math/rand"
     9  	"net"
    10  	"os"
    11  	"reflect"
    12  	"strconv"
    13  	"sync"
    14  	"testing"
    15  	"time"
    16  
    17  	"github.com/stretchr/testify/require"
    18  )
    19  
    20  func TestReader(t *testing.T) {
    21  	tests := []struct {
    22  		scenario string
    23  		function func(*testing.T, context.Context, *Reader)
    24  	}{
    25  		{
    26  			scenario: "calling Read with a context that has been canceled returns an error",
    27  			function: testReaderReadCanceled,
    28  		},
    29  
    30  		{
    31  			scenario: "all messages of the stream are returned when calling ReadMessage repeatedly",
    32  			function: testReaderReadMessages,
    33  		},
    34  
    35  		{
    36  			scenario: "test special offsets -1 and -2",
    37  			function: testReaderSetSpecialOffsets,
    38  		},
    39  
    40  		{
    41  			scenario: "setting the offset to random values returns the expected messages when Read is called",
    42  			function: testReaderSetRandomOffset,
    43  		},
    44  
    45  		{
    46  			scenario: "setting the offset by TimeStamp",
    47  			function: testReaderSetOffsetAt,
    48  		},
    49  
    50  		{
    51  			scenario: "calling Lag returns the lag of the last message read from kafka",
    52  			function: testReaderLag,
    53  		},
    54  
    55  		{
    56  			scenario: "calling ReadLag returns the current lag of a reader",
    57  			function: testReaderReadLag,
    58  		},
    59  
    60  		{ // https://github.com/deanMdreon/kafka-go/issues/30
    61  			scenario: "reading from an out-of-range offset waits until the context is cancelled",
    62  			function: testReaderOutOfRangeGetsCanceled,
    63  		},
    64  	}
    65  
    66  	for _, test := range tests {
    67  		testFunc := test.function
    68  		t.Run(test.scenario, func(t *testing.T) {
    69  			t.Parallel()
    70  
    71  			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
    72  			defer cancel()
    73  
    74  			r := NewReader(ReaderConfig{
    75  				Brokers:  []string{"localhost:9092"},
    76  				Topic:    makeTopic(),
    77  				MinBytes: 1,
    78  				MaxBytes: 10e6,
    79  				MaxWait:  100 * time.Millisecond,
    80  			})
    81  			defer r.Close()
    82  			testFunc(t, ctx, r)
    83  		})
    84  	}
    85  }
    86  
    87  func testReaderReadCanceled(t *testing.T, ctx context.Context, r *Reader) {
    88  	ctx, cancel := context.WithCancel(ctx)
    89  	cancel()
    90  
    91  	if _, err := r.ReadMessage(ctx); err != context.Canceled {
    92  		t.Error(err)
    93  	}
    94  }
    95  
    96  func testReaderReadMessages(t *testing.T, ctx context.Context, r *Reader) {
    97  	const N = 1000
    98  	prepareReader(t, ctx, r, makeTestSequence(N)...)
    99  
   100  	var offset int64
   101  
   102  	for i := 0; i != N; i++ {
   103  		m, err := r.ReadMessage(ctx)
   104  		if err != nil {
   105  			t.Error("reading message at offset", offset, "failed:", err)
   106  			return
   107  		}
   108  		offset = m.Offset + 1
   109  		v, _ := strconv.Atoi(string(m.Value))
   110  		if v != i {
   111  			t.Error("message at index", i, "has wrong value:", v)
   112  			return
   113  		}
   114  	}
   115  }
   116  
   117  func testReaderSetSpecialOffsets(t *testing.T, ctx context.Context, r *Reader) {
   118  	prepareReader(t, ctx, r, Message{Value: []byte("first")})
   119  	prepareReader(t, ctx, r, makeTestSequence(3)...)
   120  
   121  	go func() {
   122  		time.Sleep(1 * time.Second)
   123  		prepareReader(t, ctx, r, Message{Value: []byte("last")})
   124  	}()
   125  
   126  	for _, test := range []struct {
   127  		off, final int64
   128  		want       string
   129  	}{
   130  		{FirstOffset, 1, "first"},
   131  		{LastOffset, 5, "last"},
   132  	} {
   133  		offset := test.off
   134  		if err := r.SetOffset(offset); err != nil {
   135  			t.Error("setting offset", offset, "failed:", err)
   136  		}
   137  		m, err := r.ReadMessage(ctx)
   138  		if err != nil {
   139  			t.Error("reading at offset", offset, "failed:", err)
   140  		}
   141  		if string(m.Value) != test.want {
   142  			t.Error("message at offset", offset, "has wrong value:", string(m.Value))
   143  		}
   144  		if off := r.Offset(); off != test.final {
   145  			t.Errorf("bad final offset: got %d, want %d", off, test.final)
   146  		}
   147  	}
   148  }
   149  
   150  func testReaderSetRandomOffset(t *testing.T, ctx context.Context, r *Reader) {
   151  	const N = 10
   152  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   153  
   154  	for i := 0; i != 2*N; i++ {
   155  		offset := rand.Intn(N)
   156  		r.SetOffset(int64(offset))
   157  		m, err := r.ReadMessage(ctx)
   158  		if err != nil {
   159  			t.Error("seeking to offset", offset, "failed:", err)
   160  			return
   161  		}
   162  		v, _ := strconv.Atoi(string(m.Value))
   163  		if v != offset {
   164  			t.Error("message at offset", offset, "has wrong value:", v)
   165  			return
   166  		}
   167  	}
   168  }
   169  
   170  func testReaderSetOffsetAt(t *testing.T, ctx context.Context, r *Reader) {
   171  	// We make 2 batches of messages here with a brief 2 second pause
   172  	// to ensure messages 0...9 will be written a few seconds before messages 10...19
   173  	// We'll then fetch the timestamp for message offset 10 and use that timestamp to set
   174  	// our reader
   175  	const N = 10
   176  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   177  	time.Sleep(time.Second * 2)
   178  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   179  
   180  	var ts time.Time
   181  	for i := 0; i < N*2; i++ {
   182  		m, err := r.ReadMessage(ctx)
   183  		if err != nil {
   184  			t.Error("error reading message", err)
   185  		}
   186  		// grab the time for the 10th message
   187  		if i == 10 {
   188  			ts = m.Time
   189  		}
   190  	}
   191  
   192  	err := r.SetOffsetAt(ctx, ts)
   193  	if err != nil {
   194  		t.Fatal("error setting offset by timestamp", err)
   195  	}
   196  
   197  	m, err := r.ReadMessage(context.Background())
   198  	if err != nil {
   199  		t.Fatal("error reading message", err)
   200  	}
   201  
   202  	if m.Offset != 10 {
   203  		t.Errorf("expected offset of 10, received offset %d", m.Offset)
   204  	}
   205  }
   206  
   207  func testReaderLag(t *testing.T, ctx context.Context, r *Reader) {
   208  	const N = 5
   209  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   210  
   211  	if lag := r.Lag(); lag != 0 {
   212  		t.Errorf("the initial lag value is %d but was expected to be 0", lag)
   213  	}
   214  
   215  	for i := 0; i != N; i++ {
   216  		r.ReadMessage(ctx)
   217  		expect := int64(N - (i + 1))
   218  
   219  		if lag := r.Lag(); lag != expect {
   220  			t.Errorf("the lag value at offset %d is %d but was expected to be %d", i, lag, expect)
   221  		}
   222  	}
   223  }
   224  
   225  func testReaderReadLag(t *testing.T, ctx context.Context, r *Reader) {
   226  	const N = 5
   227  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   228  
   229  	if lag, err := r.ReadLag(ctx); err != nil {
   230  		t.Error(err)
   231  	} else if lag != N {
   232  		t.Errorf("the initial lag value is %d but was expected to be %d", lag, N)
   233  	}
   234  
   235  	for i := 0; i != N; i++ {
   236  		r.ReadMessage(ctx)
   237  		expect := int64(N - (i + 1))
   238  
   239  		if lag, err := r.ReadLag(ctx); err != nil {
   240  			t.Error(err)
   241  		} else if lag != expect {
   242  			t.Errorf("the lag value at offset %d is %d but was expected to be %d", i, lag, expect)
   243  		}
   244  	}
   245  }
   246  
   247  func testReaderOutOfRangeGetsCanceled(t *testing.T, ctx context.Context, r *Reader) {
   248  	prepareReader(t, ctx, r, makeTestSequence(10)...)
   249  
   250  	const D = 100 * time.Millisecond
   251  	t0 := time.Now()
   252  
   253  	ctx, cancel := context.WithTimeout(ctx, D)
   254  	defer cancel()
   255  
   256  	if err := r.SetOffset(42); err != nil {
   257  		t.Error(err)
   258  	}
   259  
   260  	_, err := r.ReadMessage(ctx)
   261  	if err != context.DeadlineExceeded {
   262  		t.Error("bad error:", err)
   263  	}
   264  
   265  	t1 := time.Now()
   266  
   267  	if d := t1.Sub(t0); d < D {
   268  		t.Error("ReadMessage returned too early after", d)
   269  	}
   270  }
   271  
   272  func createTopic(t *testing.T, topic string, partitions int) {
   273  	t.Helper()
   274  
   275  	t.Logf("createTopic(%s, %d)", topic, partitions)
   276  
   277  	conn, err := Dial("tcp", "localhost:9092")
   278  	if err != nil {
   279  		err = fmt.Errorf("createTopic, Dial: %w", err)
   280  		t.Fatal(err)
   281  	}
   282  	defer conn.Close()
   283  
   284  	controller, err := conn.Controller()
   285  	if err != nil {
   286  		err = fmt.Errorf("createTopic, conn.Controller: %w", err)
   287  		t.Fatal(err)
   288  	}
   289  
   290  	conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
   291  	if err != nil {
   292  		t.Fatal(err)
   293  	}
   294  
   295  	conn.SetDeadline(time.Now().Add(10 * time.Second))
   296  
   297  	_, err = conn.createTopics(createTopicsRequestV0{
   298  		Topics: []createTopicsRequestV0Topic{
   299  			{
   300  				Topic:             topic,
   301  				NumPartitions:     int32(partitions),
   302  				ReplicationFactor: 1,
   303  			},
   304  		},
   305  		Timeout: milliseconds(time.Second),
   306  	})
   307  	switch err {
   308  	case nil:
   309  		// ok
   310  	case TopicAlreadyExists:
   311  		// ok
   312  	default:
   313  		err = fmt.Errorf("creaetTopic, conn.createtTopics: %w", err)
   314  		t.Error(err)
   315  		t.FailNow()
   316  	}
   317  
   318  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
   319  	defer cancel()
   320  
   321  	waitForTopic(ctx, t, topic)
   322  }
   323  
   324  // Block until topic exists
   325  func waitForTopic(ctx context.Context, t *testing.T, topic string) {
   326  	t.Helper()
   327  
   328  	for {
   329  		select {
   330  		case <-ctx.Done():
   331  			t.Fatalf("reached deadline before verifying topic existence")
   332  		default:
   333  		}
   334  
   335  		cli := &Client{
   336  			Addr:    TCP("localhost:9092"),
   337  			Timeout: 5 * time.Second,
   338  		}
   339  
   340  		response, err := cli.Metadata(ctx, &MetadataRequest{
   341  			Addr:   cli.Addr,
   342  			Topics: []string{topic},
   343  		})
   344  		if err != nil {
   345  			t.Fatalf("waitForTopic: error listing topics: %s", err.Error())
   346  		}
   347  
   348  		// Find a topic which has at least 1 partition in the metadata response
   349  		for _, top := range response.Topics {
   350  			if top.Name != topic {
   351  				continue
   352  			}
   353  
   354  			numPartitions := len(top.Partitions)
   355  			t.Logf("waitForTopic: found topic %q with %d partitions",
   356  				topic, numPartitions)
   357  
   358  			if numPartitions > 0 {
   359  				return
   360  			}
   361  		}
   362  
   363  		t.Logf("retrying after 1s")
   364  		time.Sleep(time.Second)
   365  		continue
   366  	}
   367  }
   368  
   369  func deleteTopic(t *testing.T, topic ...string) {
   370  	t.Helper()
   371  	conn, err := Dial("tcp", "localhost:9092")
   372  	if err != nil {
   373  		t.Fatal(err)
   374  	}
   375  	defer conn.Close()
   376  
   377  	controller, err := conn.Controller()
   378  	if err != nil {
   379  		t.Fatal(err)
   380  	}
   381  
   382  	conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
   383  	if err != nil {
   384  		t.Fatal(err)
   385  	}
   386  
   387  	conn.SetDeadline(time.Now().Add(10 * time.Second))
   388  
   389  	if err := conn.DeleteTopics(topic...); err != nil {
   390  		t.Fatal(err)
   391  	}
   392  }
   393  
   394  func TestReaderOnNonZeroPartition(t *testing.T) {
   395  	tests := []struct {
   396  		scenario string
   397  		function func(*testing.T, context.Context, *Reader)
   398  	}{
   399  		{
   400  			scenario: "topic and partition should now be included in header",
   401  			function: testReaderSetsTopicAndPartition,
   402  		},
   403  	}
   404  
   405  	for _, test := range tests {
   406  		testFunc := test.function
   407  		t.Run(test.scenario, func(t *testing.T) {
   408  			t.Parallel()
   409  
   410  			topic := makeTopic()
   411  			createTopic(t, topic, 2)
   412  			defer deleteTopic(t, topic)
   413  
   414  			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   415  			defer cancel()
   416  
   417  			r := NewReader(ReaderConfig{
   418  				Brokers:   []string{"localhost:9092"},
   419  				Topic:     topic,
   420  				Partition: 1,
   421  				MinBytes:  1,
   422  				MaxBytes:  10e6,
   423  				MaxWait:   100 * time.Millisecond,
   424  			})
   425  			defer r.Close()
   426  			testFunc(t, ctx, r)
   427  		})
   428  	}
   429  }
   430  
   431  func testReaderSetsTopicAndPartition(t *testing.T, ctx context.Context, r *Reader) {
   432  	const N = 3
   433  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   434  
   435  	for i := 0; i != N; i++ {
   436  		m, err := r.ReadMessage(ctx)
   437  		if err != nil {
   438  			t.Error("reading message failed:", err)
   439  			return
   440  		}
   441  
   442  		if m.Topic == "" {
   443  			t.Error("expected topic to be set")
   444  			return
   445  		}
   446  		if m.Topic != r.config.Topic {
   447  			t.Errorf("expected message to contain topic, %v; got %v", r.config.Topic, m.Topic)
   448  			return
   449  		}
   450  		if m.Partition != r.config.Partition {
   451  			t.Errorf("expected partition to be set; expected 1, got %v", m.Partition)
   452  			return
   453  		}
   454  	}
   455  }
   456  
   457  // TestReadTruncatedMessages uses a configuration designed to get the Broker to
   458  // return truncated messages.  It exercises the case where an earlier bug caused
   459  // reading to time out by attempting to read beyond the current response.  This
   460  // test is not perfect, but it is pretty reliable about reproducing the issue.
   461  //
   462  // NOTE : it currently only succeeds against kafka 0.10.1.0, so it will be
   463  // skipped.  It's here so that it can be manually run.
   464  func TestReadTruncatedMessages(t *testing.T) {
   465  	// todo : it would be great to get it to work against 0.11.0.0 so we could
   466  	//        include it in CI unit tests.
   467  	t.Skip()
   468  
   469  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   470  	defer cancel()
   471  	r := NewReader(ReaderConfig{
   472  		Brokers:  []string{"localhost:9092"},
   473  		Topic:    makeTopic(),
   474  		MinBytes: 1,
   475  		MaxBytes: 100,
   476  		MaxWait:  100 * time.Millisecond,
   477  	})
   478  	defer r.Close()
   479  	n := 500
   480  	prepareReader(t, ctx, r, makeTestSequence(n)...)
   481  	for i := 0; i < n; i++ {
   482  		if _, err := r.ReadMessage(ctx); err != nil {
   483  			t.Fatal(err)
   484  		}
   485  	}
   486  }
   487  
   488  func makeTestSequence(n int) []Message {
   489  	base := time.Now()
   490  	msgs := make([]Message, n)
   491  	for i := 0; i != n; i++ {
   492  		msgs[i] = Message{
   493  			Time:  base.Add(time.Duration(i) * time.Millisecond).Truncate(time.Millisecond),
   494  			Value: []byte(strconv.Itoa(i)),
   495  		}
   496  	}
   497  	return msgs
   498  }
   499  
   500  func prepareReader(t *testing.T, ctx context.Context, r *Reader, msgs ...Message) {
   501  	config := r.Config()
   502  	var conn *Conn
   503  	var err error
   504  
   505  	for {
   506  		if conn, err = DialLeader(ctx, "tcp", "localhost:9092", config.Topic, config.Partition); err == nil {
   507  			break
   508  		}
   509  		select {
   510  		case <-time.After(time.Second):
   511  		case <-ctx.Done():
   512  			t.Fatal(ctx.Err())
   513  		}
   514  	}
   515  
   516  	defer conn.Close()
   517  
   518  	if _, err := conn.WriteMessages(msgs...); err != nil {
   519  		t.Fatal(err)
   520  	}
   521  }
   522  
   523  var (
   524  	benchmarkReaderOnce    sync.Once
   525  	benchmarkReaderTopic   = makeTopic()
   526  	benchmarkReaderPayload = make([]byte, 2*1024)
   527  )
   528  
   529  func BenchmarkReader(b *testing.B) {
   530  	const broker = "localhost:9092"
   531  	ctx := context.Background()
   532  
   533  	benchmarkReaderOnce.Do(func() {
   534  		conn, err := DialLeader(ctx, "tcp", broker, benchmarkReaderTopic, 0)
   535  		if err != nil {
   536  			b.Fatal(err)
   537  		}
   538  		defer conn.Close()
   539  
   540  		msgs := make([]Message, 1000)
   541  		for i := range msgs {
   542  			msgs[i].Value = benchmarkReaderPayload
   543  		}
   544  
   545  		for i := 0; i != 10; i++ { // put 10K messages
   546  			if _, err := conn.WriteMessages(msgs...); err != nil {
   547  				b.Fatal(err)
   548  			}
   549  		}
   550  
   551  		b.ResetTimer()
   552  	})
   553  
   554  	r := NewReader(ReaderConfig{
   555  		Brokers:   []string{broker},
   556  		Topic:     benchmarkReaderTopic,
   557  		Partition: 0,
   558  		MinBytes:  1e3,
   559  		MaxBytes:  1e6,
   560  		MaxWait:   100 * time.Millisecond,
   561  	})
   562  
   563  	for i := 0; i < b.N; i++ {
   564  		if (i % 10000) == 0 {
   565  			r.SetOffset(-1)
   566  		}
   567  		_, err := r.ReadMessage(ctx)
   568  		if err != nil {
   569  			b.Fatal(err)
   570  		}
   571  	}
   572  
   573  	r.Close()
   574  	b.SetBytes(int64(len(benchmarkReaderPayload)))
   575  }
   576  
   577  func TestCloseLeavesGroup(t *testing.T) {
   578  	if os.Getenv("KAFKA_VERSION") == "2.3.1" {
   579  		// There's a bug in 2.3.1 that causes the MemberMetadata to be in the wrong format and thus
   580  		// leads to an error when decoding the DescribeGroupsResponse.
   581  		//
   582  		// See https://issues.apache.org/jira/browse/KAFKA-9150 for details.
   583  		t.Skip("Skipping because kafka version is 2.3.1")
   584  	}
   585  
   586  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   587  	defer cancel()
   588  
   589  	topic := makeTopic()
   590  	createTopic(t, topic, 1)
   591  	defer deleteTopic(t, topic)
   592  
   593  	groupID := makeGroupID()
   594  	r := NewReader(ReaderConfig{
   595  		Brokers:          []string{"localhost:9092"},
   596  		Topic:            topic,
   597  		GroupID:          groupID,
   598  		MinBytes:         1,
   599  		MaxBytes:         10e6,
   600  		MaxWait:          100 * time.Millisecond,
   601  		RebalanceTimeout: time.Second,
   602  	})
   603  	prepareReader(t, ctx, r, Message{Value: []byte("test")})
   604  
   605  	conn, err := Dial("tcp", r.config.Brokers[0])
   606  	if err != nil {
   607  		t.Fatalf("error dialing: %v", err)
   608  	}
   609  	defer conn.Close()
   610  
   611  	client, shutdown := newLocalClient()
   612  	defer shutdown()
   613  
   614  	descGroups := func() DescribeGroupsResponse {
   615  		resp, err := client.DescribeGroups(
   616  			ctx,
   617  			&DescribeGroupsRequest{
   618  				GroupIDs: []string{groupID},
   619  			},
   620  		)
   621  		if err != nil {
   622  			t.Fatalf("error from describeGroups %v", err)
   623  		}
   624  		return *resp
   625  	}
   626  
   627  	_, err = r.ReadMessage(ctx)
   628  	if err != nil {
   629  		t.Fatalf("our reader never joind its group or couldn't read a message: %v", err)
   630  	}
   631  	resp := descGroups()
   632  	if len(resp.Groups) != 1 {
   633  		t.Fatalf("expected 1 group. got: %d", len(resp.Groups))
   634  	}
   635  	if len(resp.Groups[0].Members) != 1 {
   636  		t.Fatalf("expected group membership size of %d, but got %d", 1, len(resp.Groups[0].Members))
   637  	}
   638  
   639  	err = r.Close()
   640  	if err != nil {
   641  		t.Fatalf("unexpected error closing reader: %s", err.Error())
   642  	}
   643  	resp = descGroups()
   644  	if len(resp.Groups) != 1 {
   645  		t.Fatalf("expected 1 group. got: %d", len(resp.Groups))
   646  	}
   647  	if len(resp.Groups[0].Members) != 0 {
   648  		t.Fatalf("expected group membership size of %d, but got %d", 0, len(resp.Groups[0].Members))
   649  	}
   650  }
   651  
   652  func testConsumerGroupImmediateClose(t *testing.T, ctx context.Context, r *Reader) {
   653  	if err := r.Close(); err != nil {
   654  		t.Fatalf("bad err: %v", err)
   655  	}
   656  }
   657  
   658  func testConsumerGroupSimple(t *testing.T, ctx context.Context, r *Reader) {
   659  	if err := r.Close(); err != nil {
   660  		t.Fatalf("bad err: %v", err)
   661  	}
   662  }
   663  
   664  func TestReaderSetOffsetWhenConsumerGroupsEnabled(t *testing.T) {
   665  	r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
   666  	if err := r.SetOffset(LastOffset); err != errNotAvailableWithGroup {
   667  		t.Fatalf("expected %v; got %v", errNotAvailableWithGroup, err)
   668  	}
   669  }
   670  
   671  func TestReaderOffsetWhenConsumerGroupsEnabled(t *testing.T) {
   672  	r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
   673  	if offset := r.Offset(); offset != -1 {
   674  		t.Fatalf("expected -1; got %v", offset)
   675  	}
   676  }
   677  
   678  func TestReaderLagWhenConsumerGroupsEnabled(t *testing.T) {
   679  	r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
   680  	if offset := r.Lag(); offset != -1 {
   681  		t.Fatalf("expected -1; got %v", offset)
   682  	}
   683  }
   684  
   685  func TestReaderReadLagReturnsZeroLagWhenConsumerGroupsEnabled(t *testing.T) {
   686  	r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
   687  	lag, err := r.ReadLag(context.Background())
   688  
   689  	if err != errNotAvailableWithGroup {
   690  		t.Fatalf("expected %v; got %v", errNotAvailableWithGroup, err)
   691  	}
   692  
   693  	if lag != 0 {
   694  		t.Fatalf("expected 0; got %d", lag)
   695  	}
   696  }
   697  
   698  func TestReaderPartitionWhenConsumerGroupsEnabled(t *testing.T) {
   699  	invoke := func() (boom bool) {
   700  		defer func() {
   701  			if r := recover(); r != nil {
   702  				boom = true
   703  			}
   704  		}()
   705  
   706  		NewReader(ReaderConfig{
   707  			GroupID:   "set",
   708  			Partition: 1,
   709  		})
   710  		return false
   711  	}
   712  
   713  	if !invoke() {
   714  		t.Fatalf("expected panic; but NewReader worked?!")
   715  	}
   716  }
   717  
   718  func TestExtractTopics(t *testing.T) {
   719  	testCases := map[string]struct {
   720  		Members []GroupMember
   721  		Topics  []string
   722  	}{
   723  		"nil": {},
   724  		"single member, single topic": {
   725  			Members: []GroupMember{
   726  				{
   727  					ID:     "a",
   728  					Topics: []string{"topic"},
   729  				},
   730  			},
   731  			Topics: []string{"topic"},
   732  		},
   733  		"two members, single topic": {
   734  			Members: []GroupMember{
   735  				{
   736  					ID:     "a",
   737  					Topics: []string{"topic"},
   738  				},
   739  				{
   740  					ID:     "b",
   741  					Topics: []string{"topic"},
   742  				},
   743  			},
   744  			Topics: []string{"topic"},
   745  		},
   746  		"two members, two topics": {
   747  			Members: []GroupMember{
   748  				{
   749  					ID:     "a",
   750  					Topics: []string{"topic-1"},
   751  				},
   752  				{
   753  					ID:     "b",
   754  					Topics: []string{"topic-2"},
   755  				},
   756  			},
   757  			Topics: []string{"topic-1", "topic-2"},
   758  		},
   759  		"three members, three shared topics": {
   760  			Members: []GroupMember{
   761  				{
   762  					ID:     "a",
   763  					Topics: []string{"topic-1", "topic-2"},
   764  				},
   765  				{
   766  					ID:     "b",
   767  					Topics: []string{"topic-2", "topic-3"},
   768  				},
   769  				{
   770  					ID:     "c",
   771  					Topics: []string{"topic-3", "topic-1"},
   772  				},
   773  			},
   774  			Topics: []string{"topic-1", "topic-2", "topic-3"},
   775  		},
   776  	}
   777  
   778  	for label, tc := range testCases {
   779  		t.Run(label, func(t *testing.T) {
   780  			topics := extractTopics(tc.Members)
   781  			if !reflect.DeepEqual(tc.Topics, topics) {
   782  				t.Errorf("expected %v; got %v", tc.Topics, topics)
   783  			}
   784  		})
   785  	}
   786  }
   787  
   788  func TestReaderConsumerGroup(t *testing.T) {
   789  	tests := []struct {
   790  		scenario       string
   791  		partitions     int
   792  		commitInterval time.Duration
   793  		function       func(*testing.T, context.Context, *Reader)
   794  	}{
   795  		{
   796  			scenario:   "basic handshake",
   797  			partitions: 1,
   798  			function:   testReaderConsumerGroupHandshake,
   799  		},
   800  		{
   801  			scenario:   "verify offset committed",
   802  			partitions: 1,
   803  			function:   testReaderConsumerGroupVerifyOffsetCommitted,
   804  		},
   805  
   806  		{
   807  			scenario:       "verify offset committed when using interval committer",
   808  			partitions:     1,
   809  			commitInterval: 400 * time.Millisecond,
   810  			function:       testReaderConsumerGroupVerifyPeriodicOffsetCommitter,
   811  		},
   812  
   813  		{
   814  			scenario:   "rebalance across many partitions and consumers",
   815  			partitions: 8,
   816  			function:   testReaderConsumerGroupRebalanceAcrossManyPartitionsAndConsumers,
   817  		},
   818  
   819  		{
   820  			scenario:   "consumer group commits on close",
   821  			partitions: 3,
   822  			function:   testReaderConsumerGroupVerifyCommitsOnClose,
   823  		},
   824  
   825  		{
   826  			scenario:   "consumer group rebalance",
   827  			partitions: 3,
   828  			function:   testReaderConsumerGroupRebalance,
   829  		},
   830  
   831  		{
   832  			scenario:   "consumer group rebalance across topics",
   833  			partitions: 3,
   834  			function:   testReaderConsumerGroupRebalanceAcrossTopics,
   835  		},
   836  
   837  		{
   838  			scenario:   "consumer group reads content across partitions",
   839  			partitions: 3,
   840  			function:   testReaderConsumerGroupReadContentAcrossPartitions,
   841  		},
   842  
   843  		{
   844  			scenario:   "Close immediately after NewReader",
   845  			partitions: 1,
   846  			function:   testConsumerGroupImmediateClose,
   847  		},
   848  
   849  		{
   850  			scenario:   "Close immediately after NewReader",
   851  			partitions: 1,
   852  			function:   testConsumerGroupSimple,
   853  		},
   854  	}
   855  
   856  	for _, test := range tests {
   857  		t.Run(test.scenario, func(t *testing.T) {
   858  			// It appears that some of the tests depend on all these tests being
   859  			// run concurrently to pass... this is brittle and should be fixed
   860  			// at some point.
   861  			t.Parallel()
   862  
   863  			topic := makeTopic()
   864  			createTopic(t, topic, test.partitions)
   865  			defer deleteTopic(t, topic)
   866  
   867  			groupID := makeGroupID()
   868  			r := NewReader(ReaderConfig{
   869  				Brokers:           []string{"localhost:9092"},
   870  				Topic:             topic,
   871  				GroupID:           groupID,
   872  				HeartbeatInterval: 2 * time.Second,
   873  				CommitInterval:    test.commitInterval,
   874  				RebalanceTimeout:  2 * time.Second,
   875  				RetentionTime:     time.Hour,
   876  				MinBytes:          1,
   877  				MaxBytes:          1e6,
   878  			})
   879  			defer r.Close()
   880  
   881  			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   882  			defer cancel()
   883  
   884  			test.function(t, ctx, r)
   885  		})
   886  	}
   887  }
   888  
   889  func testReaderConsumerGroupHandshake(t *testing.T, ctx context.Context, r *Reader) {
   890  	prepareReader(t, context.Background(), r, makeTestSequence(5)...)
   891  
   892  	m, err := r.ReadMessage(ctx)
   893  	if err != nil {
   894  		t.Errorf("bad err: %v", err)
   895  	}
   896  	if m.Topic != r.config.Topic {
   897  		t.Errorf("topic not set")
   898  	}
   899  	if m.Offset != 0 {
   900  		t.Errorf("offset not set")
   901  	}
   902  
   903  	m, err = r.ReadMessage(ctx)
   904  	if err != nil {
   905  		t.Errorf("bad err: %v", err)
   906  	}
   907  	if m.Topic != r.config.Topic {
   908  		t.Errorf("topic not set")
   909  	}
   910  	if m.Offset != 1 {
   911  		t.Errorf("offset not set")
   912  	}
   913  }
   914  
   915  func testReaderConsumerGroupVerifyOffsetCommitted(t *testing.T, ctx context.Context, r *Reader) {
   916  	prepareReader(t, context.Background(), r, makeTestSequence(3)...)
   917  
   918  	if _, err := r.FetchMessage(ctx); err != nil {
   919  		t.Errorf("bad err: %v", err) // skip the first message
   920  	}
   921  
   922  	m, err := r.FetchMessage(ctx)
   923  	if err != nil {
   924  		t.Errorf("bad err: %v", err)
   925  	}
   926  
   927  	if err := r.CommitMessages(ctx, m); err != nil {
   928  		t.Errorf("bad commit message: %v", err)
   929  	}
   930  
   931  	offsets := getOffsets(t, r.config)
   932  	if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
   933  		t.Errorf("expected %v; got %v", expected, offsets)
   934  	}
   935  }
   936  
   937  func testReaderConsumerGroupVerifyPeriodicOffsetCommitter(t *testing.T, ctx context.Context, r *Reader) {
   938  	prepareReader(t, context.Background(), r, makeTestSequence(3)...)
   939  
   940  	if _, err := r.FetchMessage(ctx); err != nil {
   941  		t.Errorf("bad err: %v", err) // skip the first message
   942  	}
   943  
   944  	m, err := r.FetchMessage(ctx)
   945  	if err != nil {
   946  		t.Errorf("bad err: %v", err)
   947  	}
   948  
   949  	started := time.Now()
   950  	if err := r.CommitMessages(ctx, m); err != nil {
   951  		t.Errorf("bad commit message: %v", err)
   952  	}
   953  	if elapsed := time.Now().Sub(started); elapsed > 10*time.Millisecond {
   954  		t.Errorf("background commits should happen nearly instantly")
   955  	}
   956  
   957  	// wait for committer to pick up the commits
   958  	time.Sleep(r.config.CommitInterval * 3)
   959  
   960  	offsets := getOffsets(t, r.config)
   961  	if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
   962  		t.Errorf("expected %v; got %v", expected, offsets)
   963  	}
   964  }
   965  
   966  func testReaderConsumerGroupVerifyCommitsOnClose(t *testing.T, ctx context.Context, r *Reader) {
   967  	prepareReader(t, context.Background(), r, makeTestSequence(3)...)
   968  
   969  	if _, err := r.FetchMessage(ctx); err != nil {
   970  		t.Errorf("bad err: %v", err) // skip the first message
   971  	}
   972  
   973  	m, err := r.FetchMessage(ctx)
   974  	if err != nil {
   975  		t.Errorf("bad err: %v", err)
   976  	}
   977  
   978  	if err := r.CommitMessages(ctx, m); err != nil {
   979  		t.Errorf("bad commit message: %v", err)
   980  	}
   981  
   982  	if err := r.Close(); err != nil {
   983  		t.Errorf("bad Close: %v", err)
   984  	}
   985  
   986  	r2 := NewReader(r.config)
   987  	defer r2.Close()
   988  
   989  	offsets := getOffsets(t, r2.config)
   990  	if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
   991  		t.Errorf("expected %v; got %v", expected, offsets)
   992  	}
   993  }
   994  
   995  func testReaderConsumerGroupReadContentAcrossPartitions(t *testing.T, ctx context.Context, r *Reader) {
   996  	const N = 12
   997  
   998  	client, shutdown := newLocalClient()
   999  	defer shutdown()
  1000  
  1001  	writer := &Writer{
  1002  		Addr:      TCP(r.config.Brokers...),
  1003  		Topic:     r.config.Topic,
  1004  		Balancer:  &RoundRobin{},
  1005  		BatchSize: 1,
  1006  		Transport: client.Transport,
  1007  	}
  1008  	if err := writer.WriteMessages(ctx, makeTestSequence(N)...); err != nil {
  1009  		t.Fatalf("bad write messages: %v", err)
  1010  	}
  1011  	if err := writer.Close(); err != nil {
  1012  		t.Fatalf("bad write err: %v", err)
  1013  	}
  1014  
  1015  	partitions := map[int]struct{}{}
  1016  	for i := 0; i < N; i++ {
  1017  		m, err := r.FetchMessage(ctx)
  1018  		if err != nil {
  1019  			t.Errorf("bad error: %s", err)
  1020  		}
  1021  		partitions[m.Partition] = struct{}{}
  1022  	}
  1023  
  1024  	if v := len(partitions); v != 3 {
  1025  		t.Errorf("expected messages across 3 partitions; got messages across %v partitions", v)
  1026  	}
  1027  }
  1028  
  1029  func testReaderConsumerGroupRebalance(t *testing.T, ctx context.Context, r *Reader) {
  1030  	r2 := NewReader(r.config)
  1031  	defer r.Close()
  1032  
  1033  	const (
  1034  		N          = 12
  1035  		partitions = 2
  1036  	)
  1037  
  1038  	client, shutdown := newLocalClient()
  1039  	defer shutdown()
  1040  
  1041  	// rebalance should result in 12 message in each of the partitions
  1042  	writer := &Writer{
  1043  		Addr:      TCP(r.config.Brokers...),
  1044  		Topic:     r.config.Topic,
  1045  		Balancer:  &RoundRobin{},
  1046  		BatchSize: 1,
  1047  		Transport: client.Transport,
  1048  	}
  1049  	if err := writer.WriteMessages(ctx, makeTestSequence(N*partitions)...); err != nil {
  1050  		t.Fatalf("bad write messages: %v", err)
  1051  	}
  1052  	if err := writer.Close(); err != nil {
  1053  		t.Fatalf("bad write err: %v", err)
  1054  	}
  1055  
  1056  	// after rebalance, each reader should have a partition to itself
  1057  	for i := 0; i < N; i++ {
  1058  		if _, err := r2.FetchMessage(ctx); err != nil {
  1059  			t.Errorf("expect to read from reader 2")
  1060  		}
  1061  		if _, err := r.FetchMessage(ctx); err != nil {
  1062  			t.Errorf("expect to read from reader 1")
  1063  		}
  1064  	}
  1065  }
  1066  
  1067  func testReaderConsumerGroupRebalanceAcrossTopics(t *testing.T, ctx context.Context, r *Reader) {
  1068  	// create a second reader that shares the groupID, but reads from a different topic
  1069  	client, topic2, shutdown := newLocalClientAndTopic()
  1070  	defer shutdown()
  1071  
  1072  	r2 := NewReader(ReaderConfig{
  1073  		Brokers:           r.config.Brokers,
  1074  		Topic:             topic2,
  1075  		GroupID:           r.config.GroupID,
  1076  		HeartbeatInterval: r.config.HeartbeatInterval,
  1077  		SessionTimeout:    r.config.SessionTimeout,
  1078  		RetentionTime:     r.config.RetentionTime,
  1079  		MinBytes:          r.config.MinBytes,
  1080  		MaxBytes:          r.config.MaxBytes,
  1081  		Logger:            r.config.Logger,
  1082  	})
  1083  	defer r.Close()
  1084  	prepareReader(t, ctx, r2, makeTestSequence(1)...)
  1085  
  1086  	const (
  1087  		N = 12
  1088  	)
  1089  
  1090  	// write messages across both partitions
  1091  	writer := &Writer{
  1092  		Addr:      TCP(r.config.Brokers...),
  1093  		Topic:     r.config.Topic,
  1094  		Balancer:  &RoundRobin{},
  1095  		BatchSize: 1,
  1096  		Transport: client.Transport,
  1097  	}
  1098  	if err := writer.WriteMessages(ctx, makeTestSequence(N)...); err != nil {
  1099  		t.Fatalf("bad write messages: %v", err)
  1100  	}
  1101  	if err := writer.Close(); err != nil {
  1102  		t.Fatalf("bad write err: %v", err)
  1103  	}
  1104  
  1105  	// after rebalance, r2 should read topic2 and r1 should read ALL of the original topic
  1106  	if _, err := r2.FetchMessage(ctx); err != nil {
  1107  		t.Errorf("expect to read from reader 2")
  1108  	}
  1109  
  1110  	// all N messages on the original topic should be read by the original reader
  1111  	for i := 0; i < N; i++ {
  1112  		if _, err := r.FetchMessage(ctx); err != nil {
  1113  			t.Errorf("expect to read from reader 1")
  1114  		}
  1115  	}
  1116  }
  1117  
  1118  func testReaderConsumerGroupRebalanceAcrossManyPartitionsAndConsumers(t *testing.T, ctx context.Context, r *Reader) {
  1119  	// I've rebalanced up to 100 servers, but the rebalance can take upwards
  1120  	// of a minute and that seems too long for unit tests.  Also, setting this
  1121  	// to a larger number seems to make the kafka broker unresponsive.
  1122  	// TODO research if there's a way to reduce rebalance time across many partitions
  1123  	// svls: the described behavior is due to the thundering herd of readers
  1124  	//       hitting the rebalance timeout.  introducing the 100ms sleep in the
  1125  	//       loop below in order to give time for the sync group to finish has
  1126  	//       greatly helped, though we still hit the timeout from time to time.
  1127  	const N = 8
  1128  
  1129  	var readers []*Reader
  1130  
  1131  	for i := 0; i < N-1; i++ {
  1132  		reader := NewReader(r.config)
  1133  		readers = append(readers, reader)
  1134  		time.Sleep(100 * time.Millisecond)
  1135  	}
  1136  	defer func() {
  1137  		for _, r := range readers {
  1138  			r.Close()
  1139  			time.Sleep(100 * time.Millisecond)
  1140  		}
  1141  	}()
  1142  
  1143  	client, shutdown := newLocalClient()
  1144  	defer shutdown()
  1145  
  1146  	// write messages across both partitions
  1147  	writer := &Writer{
  1148  		Addr:      TCP(r.config.Brokers...),
  1149  		Topic:     r.config.Topic,
  1150  		Balancer:  &RoundRobin{},
  1151  		BatchSize: 1,
  1152  		Transport: client.Transport,
  1153  	}
  1154  	if err := writer.WriteMessages(ctx, makeTestSequence(N*3)...); err != nil {
  1155  		t.Fatalf("bad write messages: %v", err)
  1156  	}
  1157  	if err := writer.Close(); err != nil {
  1158  		t.Fatalf("bad write err: %v", err)
  1159  	}
  1160  
  1161  	// all N messages on the original topic should be read by the original reader
  1162  	for i := 0; i < N-1; i++ {
  1163  		if _, err := readers[i].FetchMessage(ctx); err != nil {
  1164  			t.Errorf("reader %v expected to read 1 message", i)
  1165  		}
  1166  	}
  1167  
  1168  	if _, err := r.FetchMessage(ctx); err != nil {
  1169  		t.Errorf("expect to read from original reader")
  1170  	}
  1171  }
  1172  
  1173  func TestOffsetStash(t *testing.T) {
  1174  	const topic = "topic"
  1175  
  1176  	newMessage := func(partition int, offset int64) Message {
  1177  		return Message{
  1178  			Topic:     topic,
  1179  			Partition: partition,
  1180  			Offset:    offset,
  1181  		}
  1182  	}
  1183  
  1184  	tests := map[string]struct {
  1185  		Given    offsetStash
  1186  		Messages []Message
  1187  		Expected offsetStash
  1188  	}{
  1189  		"nil": {},
  1190  		"empty given, single message": {
  1191  			Given:    offsetStash{},
  1192  			Messages: []Message{newMessage(0, 0)},
  1193  			Expected: offsetStash{
  1194  				topic: {0: 1},
  1195  			},
  1196  		},
  1197  		"ignores earlier offsets": {
  1198  			Given: offsetStash{
  1199  				topic: {0: 2},
  1200  			},
  1201  			Messages: []Message{newMessage(0, 0)},
  1202  			Expected: offsetStash{
  1203  				topic: {0: 2},
  1204  			},
  1205  		},
  1206  		"uses latest offset": {
  1207  			Given: offsetStash{},
  1208  			Messages: []Message{
  1209  				newMessage(0, 2),
  1210  				newMessage(0, 3),
  1211  				newMessage(0, 1),
  1212  			},
  1213  			Expected: offsetStash{
  1214  				topic: {0: 4},
  1215  			},
  1216  		},
  1217  		"uses latest offset, across multiple topics": {
  1218  			Given: offsetStash{},
  1219  			Messages: []Message{
  1220  				newMessage(0, 2),
  1221  				newMessage(0, 3),
  1222  				newMessage(0, 1),
  1223  				newMessage(1, 5),
  1224  				newMessage(1, 6),
  1225  			},
  1226  			Expected: offsetStash{
  1227  				topic: {
  1228  					0: 4,
  1229  					1: 7,
  1230  				},
  1231  			},
  1232  		},
  1233  	}
  1234  
  1235  	for label, test := range tests {
  1236  		t.Run(label, func(t *testing.T) {
  1237  			test.Given.merge(makeCommits(test.Messages...))
  1238  			if !reflect.DeepEqual(test.Expected, test.Given) {
  1239  				t.Errorf("expected %v; got %v", test.Expected, test.Given)
  1240  			}
  1241  		})
  1242  	}
  1243  }
  1244  
  1245  type mockOffsetCommitter struct {
  1246  	invocations int
  1247  	failCount   int
  1248  	err         error
  1249  }
  1250  
  1251  func (m *mockOffsetCommitter) offsetCommit(request offsetCommitRequestV2) (offsetCommitResponseV2, error) {
  1252  	m.invocations++
  1253  
  1254  	if m.failCount > 0 {
  1255  		m.failCount--
  1256  		return offsetCommitResponseV2{}, io.EOF
  1257  	}
  1258  
  1259  	return offsetCommitResponseV2{}, nil
  1260  }
  1261  
  1262  func TestValidateReader(t *testing.T) {
  1263  	tests := []struct {
  1264  		config       ReaderConfig
  1265  		errorOccured bool
  1266  	}{
  1267  		{config: ReaderConfig{}, errorOccured: true},
  1268  		{config: ReaderConfig{Brokers: []string{"broker1"}}, errorOccured: true},
  1269  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1"}, errorOccured: false},
  1270  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: -1}, errorOccured: true},
  1271  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: -1}, errorOccured: true},
  1272  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: 5, MaxBytes: -1}, errorOccured: true},
  1273  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: 5, MaxBytes: 6}, errorOccured: false},
  1274  	}
  1275  	for _, test := range tests {
  1276  		err := test.config.Validate()
  1277  		if test.errorOccured && err == nil {
  1278  			t.Fail()
  1279  		}
  1280  		if !test.errorOccured && err != nil {
  1281  			t.Fail()
  1282  		}
  1283  	}
  1284  }
  1285  
  1286  func TestCommitLoopImmediateFlushOnGenerationEnd(t *testing.T) {
  1287  	t.Parallel()
  1288  	var committedOffset int64
  1289  	var commitCount int
  1290  	gen := &Generation{
  1291  		conn: mockCoordinator{
  1292  			offsetCommitFunc: func(r offsetCommitRequestV2) (offsetCommitResponseV2, error) {
  1293  				commitCount++
  1294  				committedOffset = r.Topics[0].Partitions[0].Offset
  1295  				return offsetCommitResponseV2{}, nil
  1296  			},
  1297  		},
  1298  		done:     make(chan struct{}),
  1299  		log:      func(func(Logger)) {},
  1300  		logError: func(func(Logger)) {},
  1301  		joined:   make(chan struct{}),
  1302  	}
  1303  
  1304  	// initialize commits so that the commitLoopImmediate select statement blocks
  1305  	r := &Reader{stctx: context.Background(), commits: make(chan commitRequest, 100)}
  1306  
  1307  	for i := 0; i < 100; i++ {
  1308  		cr := commitRequest{
  1309  			commits: []commit{{
  1310  				topic:     "topic",
  1311  				partition: 0,
  1312  				offset:    int64(i) + 1,
  1313  			}},
  1314  			errch: make(chan<- error, 1),
  1315  		}
  1316  		r.commits <- cr
  1317  	}
  1318  
  1319  	gen.Start(func(ctx context.Context) {
  1320  		r.commitLoopImmediate(ctx, gen)
  1321  	})
  1322  
  1323  	gen.close()
  1324  
  1325  	if committedOffset != 100 {
  1326  		t.Fatalf("expected commited offset to be 100 but got %d", committedOffset)
  1327  	}
  1328  
  1329  	if commitCount >= 100 {
  1330  		t.Fatalf("expected a single final commit on generation end got %d", commitCount)
  1331  	}
  1332  }
  1333  
  1334  func TestCommitOffsetsWithRetry(t *testing.T) {
  1335  	offsets := offsetStash{"topic": {0: 0}}
  1336  
  1337  	tests := map[string]struct {
  1338  		Fails       int
  1339  		Invocations int
  1340  		HasError    bool
  1341  	}{
  1342  		"happy path": {
  1343  			Invocations: 1,
  1344  		},
  1345  		"1 retry": {
  1346  			Fails:       1,
  1347  			Invocations: 2,
  1348  		},
  1349  		"out of retries": {
  1350  			Fails:       defaultCommitRetries + 1,
  1351  			Invocations: defaultCommitRetries,
  1352  			HasError:    true,
  1353  		},
  1354  	}
  1355  
  1356  	for label, test := range tests {
  1357  		t.Run(label, func(t *testing.T) {
  1358  			count := 0
  1359  			gen := &Generation{
  1360  				conn: mockCoordinator{
  1361  					offsetCommitFunc: func(offsetCommitRequestV2) (offsetCommitResponseV2, error) {
  1362  						count++
  1363  						if count <= test.Fails {
  1364  							return offsetCommitResponseV2{}, io.EOF
  1365  						}
  1366  						return offsetCommitResponseV2{}, nil
  1367  					},
  1368  				},
  1369  				done:     make(chan struct{}),
  1370  				log:      func(func(Logger)) {},
  1371  				logError: func(func(Logger)) {},
  1372  			}
  1373  
  1374  			r := &Reader{stctx: context.Background()}
  1375  			err := r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries)
  1376  			switch {
  1377  			case test.HasError && err == nil:
  1378  				t.Error("bad err: expected not nil; got nil")
  1379  			case !test.HasError && err != nil:
  1380  				t.Errorf("bad err: expected nil; got %v", err)
  1381  			}
  1382  		})
  1383  	}
  1384  }
  1385  
  1386  // Test that a reader won't continually rebalance when there are more consumers
  1387  // than partitions in a group.
  1388  // https://github.com/deanMdreon/kafka-go/issues/200
  1389  func TestRebalanceTooManyConsumers(t *testing.T) {
  1390  	ctx := context.Background()
  1391  	conf := ReaderConfig{
  1392  		Brokers: []string{"localhost:9092"},
  1393  		GroupID: makeGroupID(),
  1394  		Topic:   makeTopic(),
  1395  		MaxWait: time.Second,
  1396  	}
  1397  
  1398  	// Create the first reader and wait for it to become the leader.
  1399  	r1 := NewReader(conf)
  1400  	prepareReader(t, ctx, r1, makeTestSequence(1)...)
  1401  	r1.ReadMessage(ctx)
  1402  	// Clear the stats from the first rebalance.
  1403  	r1.Stats()
  1404  
  1405  	// Second reader should cause one rebalance for each r1 and r2.
  1406  	r2 := NewReader(conf)
  1407  
  1408  	// Wait for rebalances.
  1409  	time.Sleep(5 * time.Second)
  1410  
  1411  	// Before the fix, r2 would cause continuous rebalances,
  1412  	// as it tried to handshake() repeatedly.
  1413  	rebalances := r1.Stats().Rebalances + r2.Stats().Rebalances
  1414  	if rebalances > 2 {
  1415  		t.Errorf("unexpected rebalances to first reader, got %d", rebalances)
  1416  	}
  1417  }
  1418  
  1419  func TestConsumerGroupWithMissingTopic(t *testing.T) {
  1420  	t.Skip("this test doesn't work when the cluster is configured to auto-create topics")
  1421  
  1422  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
  1423  	defer cancel()
  1424  
  1425  	conf := ReaderConfig{
  1426  		Brokers:                []string{"localhost:9092"},
  1427  		GroupID:                makeGroupID(),
  1428  		Topic:                  makeTopic(),
  1429  		MaxWait:                time.Second,
  1430  		PartitionWatchInterval: 100 * time.Millisecond,
  1431  		WatchPartitionChanges:  true,
  1432  	}
  1433  
  1434  	r := NewReader(conf)
  1435  	defer r.Close()
  1436  
  1437  	recvErr := make(chan error, 1)
  1438  	go func() {
  1439  		_, err := r.ReadMessage(ctx)
  1440  		recvErr <- err
  1441  	}()
  1442  
  1443  	time.Sleep(time.Second)
  1444  	client, shutdown := newLocalClientWithTopic(conf.Topic, 1)
  1445  	defer shutdown()
  1446  
  1447  	w := &Writer{
  1448  		Addr:         TCP(r.config.Brokers...),
  1449  		Topic:        r.config.Topic,
  1450  		BatchTimeout: 10 * time.Millisecond,
  1451  		BatchSize:    1,
  1452  		Transport:    client.Transport,
  1453  	}
  1454  	defer w.Close()
  1455  	if err := w.WriteMessages(ctx, Message{}); err != nil {
  1456  		t.Fatalf("write error: %+v", err)
  1457  	}
  1458  
  1459  	if err := <-recvErr; err != nil {
  1460  		t.Fatalf("read error: %+v", err)
  1461  	}
  1462  
  1463  	nMsgs := r.Stats().Messages
  1464  	if nMsgs != 1 {
  1465  		t.Fatalf("expected to receive one message, but got %d", nMsgs)
  1466  	}
  1467  }
  1468  
  1469  func TestConsumerGroupWithTopic(t *testing.T) {
  1470  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  1471  	defer cancel()
  1472  
  1473  	conf := ReaderConfig{
  1474  		Brokers:                []string{"localhost:9092"},
  1475  		GroupID:                makeGroupID(),
  1476  		Topic:                  makeTopic(),
  1477  		MaxWait:                time.Second,
  1478  		PartitionWatchInterval: 100 * time.Millisecond,
  1479  		WatchPartitionChanges:  true,
  1480  		Logger:                 newTestKafkaLogger(t, "Reader:"),
  1481  	}
  1482  
  1483  	r := NewReader(conf)
  1484  	defer r.Close()
  1485  
  1486  	recvErr := make(chan error, len(conf.GroupTopics))
  1487  	go func() {
  1488  		msg, err := r.ReadMessage(ctx)
  1489  		t.Log(msg)
  1490  		recvErr <- err
  1491  	}()
  1492  
  1493  	time.Sleep(conf.MaxWait)
  1494  
  1495  	client, shutdown := newLocalClientWithTopic(conf.Topic, 1)
  1496  	defer shutdown()
  1497  
  1498  	w := &Writer{
  1499  		Addr:         TCP(r.config.Brokers...),
  1500  		Topic:        conf.Topic,
  1501  		BatchTimeout: 10 * time.Millisecond,
  1502  		BatchSize:    1,
  1503  		Transport:    client.Transport,
  1504  		Logger:       newTestKafkaLogger(t, "Writer:"),
  1505  	}
  1506  	defer w.Close()
  1507  	if err := w.WriteMessages(ctx, Message{Value: []byte(conf.Topic)}); err != nil {
  1508  		t.Fatalf("write error: %+v", err)
  1509  	}
  1510  
  1511  	if err := <-recvErr; err != nil {
  1512  		t.Fatalf("read error: %+v", err)
  1513  	}
  1514  
  1515  	nMsgs := r.Stats().Messages
  1516  	if nMsgs != 1 {
  1517  		t.Fatalf("expected to receive 1 message, but got %d", nMsgs)
  1518  	}
  1519  }
  1520  
  1521  func TestConsumerGroupWithGroupTopicsSingle(t *testing.T) {
  1522  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  1523  	defer cancel()
  1524  
  1525  	conf := ReaderConfig{
  1526  		Brokers:                []string{"localhost:9092"},
  1527  		GroupID:                makeGroupID(),
  1528  		GroupTopics:            []string{makeTopic()},
  1529  		MaxWait:                time.Second,
  1530  		PartitionWatchInterval: 100 * time.Millisecond,
  1531  		WatchPartitionChanges:  true,
  1532  		Logger:                 newTestKafkaLogger(t, "Reader:"),
  1533  	}
  1534  
  1535  	r := NewReader(conf)
  1536  	defer r.Close()
  1537  
  1538  	recvErr := make(chan error, len(conf.GroupTopics))
  1539  	go func() {
  1540  		msg, err := r.ReadMessage(ctx)
  1541  		t.Log(msg)
  1542  		recvErr <- err
  1543  	}()
  1544  
  1545  	time.Sleep(conf.MaxWait)
  1546  
  1547  	for i, topic := range conf.GroupTopics {
  1548  		client, shutdown := newLocalClientWithTopic(topic, 1)
  1549  		defer shutdown()
  1550  
  1551  		w := &Writer{
  1552  			Addr:         TCP(r.config.Brokers...),
  1553  			Topic:        topic,
  1554  			BatchTimeout: 10 * time.Millisecond,
  1555  			BatchSize:    1,
  1556  			Transport:    client.Transport,
  1557  			Logger:       newTestKafkaLogger(t, fmt.Sprintf("Writer(%d):", i)),
  1558  		}
  1559  		defer w.Close()
  1560  		if err := w.WriteMessages(ctx, Message{Value: []byte(topic)}); err != nil {
  1561  			t.Fatalf("write error: %+v", err)
  1562  		}
  1563  	}
  1564  
  1565  	if err := <-recvErr; err != nil {
  1566  		t.Fatalf("read error: %+v", err)
  1567  	}
  1568  
  1569  	nMsgs := r.Stats().Messages
  1570  	if nMsgs != int64(len(conf.GroupTopics)) {
  1571  		t.Fatalf("expected to receive %d messages, but got %d", len(conf.GroupTopics), nMsgs)
  1572  	}
  1573  }
  1574  
  1575  func TestConsumerGroupWithGroupTopicsMultple(t *testing.T) {
  1576  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  1577  	defer cancel()
  1578  
  1579  	client, shutdown := newLocalClient()
  1580  	defer shutdown()
  1581  
  1582  	conf := ReaderConfig{
  1583  		Brokers:                []string{"localhost:9092"},
  1584  		GroupID:                makeGroupID(),
  1585  		GroupTopics:            []string{makeTopic(), makeTopic()},
  1586  		MaxWait:                time.Second,
  1587  		PartitionWatchInterval: 100 * time.Millisecond,
  1588  		WatchPartitionChanges:  true,
  1589  		Logger:                 newTestKafkaLogger(t, "Reader:"),
  1590  	}
  1591  
  1592  	r := NewReader(conf)
  1593  
  1594  	w := &Writer{
  1595  		Addr:         TCP(r.config.Brokers...),
  1596  		BatchTimeout: 10 * time.Millisecond,
  1597  		BatchSize:    1,
  1598  		Transport:    client.Transport,
  1599  		Logger:       newTestKafkaLogger(t, "Writer:"),
  1600  	}
  1601  	defer w.Close()
  1602  
  1603  	time.Sleep(time.Second)
  1604  
  1605  	msgs := make([]Message, 0, len(conf.GroupTopics))
  1606  	for _, topic := range conf.GroupTopics {
  1607  		msgs = append(msgs, Message{Topic: topic})
  1608  	}
  1609  	if err := w.WriteMessages(ctx, msgs...); err != nil {
  1610  		t.Logf("write error: %+v", err)
  1611  	}
  1612  
  1613  	wg := new(sync.WaitGroup)
  1614  	wg.Add(len(msgs))
  1615  
  1616  	go func() {
  1617  		wg.Wait()
  1618  		t.Log("closing reader")
  1619  		r.Close()
  1620  	}()
  1621  
  1622  	for {
  1623  		msg, err := r.ReadMessage(ctx)
  1624  		if err != nil {
  1625  			if err == io.EOF {
  1626  				t.Log("reader closed")
  1627  				break
  1628  			}
  1629  
  1630  			t.Fatalf("read error: %+v", err)
  1631  		} else {
  1632  			t.Logf("message read: %+v", msg)
  1633  			wg.Done()
  1634  		}
  1635  	}
  1636  
  1637  	nMsgs := r.Stats().Messages
  1638  	if nMsgs != int64(len(conf.GroupTopics)) {
  1639  		t.Fatalf("expected to receive %d messages, but got %d", len(conf.GroupTopics), nMsgs)
  1640  	}
  1641  }
  1642  
  1643  func getOffsets(t *testing.T, config ReaderConfig) map[int]int64 {
  1644  	// minimal config required to lookup coordinator
  1645  	cg := ConsumerGroup{
  1646  		config: ConsumerGroupConfig{
  1647  			ID:      config.GroupID,
  1648  			Brokers: config.Brokers,
  1649  			Dialer:  config.Dialer,
  1650  		},
  1651  	}
  1652  
  1653  	conn, err := cg.coordinator()
  1654  	if err != nil {
  1655  		t.Errorf("unable to connect to coordinator: %v", err)
  1656  	}
  1657  	defer conn.Close()
  1658  
  1659  	offsets, err := conn.offsetFetch(offsetFetchRequestV1{
  1660  		GroupID: config.GroupID,
  1661  		Topics: []offsetFetchRequestV1Topic{{
  1662  			Topic:      config.Topic,
  1663  			Partitions: []int32{0},
  1664  		}},
  1665  	})
  1666  	if err != nil {
  1667  		t.Errorf("bad fetchOffsets: %v", err)
  1668  	}
  1669  
  1670  	m := map[int]int64{}
  1671  
  1672  	for _, r := range offsets.Responses {
  1673  		if r.Topic == config.Topic {
  1674  			for _, p := range r.PartitionResponses {
  1675  				m[int(p.Partition)] = p.Offset
  1676  			}
  1677  		}
  1678  	}
  1679  
  1680  	return m
  1681  }
  1682  
  1683  const (
  1684  	connTO     = 1 * time.Second
  1685  	connTestTO = 2 * connTO
  1686  )
  1687  
  1688  func TestErrorCannotConnect(t *testing.T) {
  1689  	r := NewReader(ReaderConfig{
  1690  		Brokers:     []string{"localhost:9093"},
  1691  		Dialer:      &Dialer{Timeout: connTO},
  1692  		MaxAttempts: 1,
  1693  		Topic:       makeTopic(),
  1694  	})
  1695  	ctx, cancel := context.WithTimeout(context.Background(), connTestTO)
  1696  	defer cancel()
  1697  
  1698  	_, err := r.FetchMessage(ctx)
  1699  	if err == nil || ctx.Err() != nil {
  1700  		t.Errorf("Reader.FetchMessage must fail when it cannot " +
  1701  			"connect")
  1702  	}
  1703  }
  1704  
  1705  func TestErrorCannotConnectGroupSubscription(t *testing.T) {
  1706  	r := NewReader(ReaderConfig{
  1707  		Brokers:     []string{"localhost:9093"},
  1708  		Dialer:      &Dialer{Timeout: 1 * time.Second},
  1709  		GroupID:     "foobar",
  1710  		MaxAttempts: 1,
  1711  		Topic:       makeTopic(),
  1712  	})
  1713  	ctx, cancel := context.WithTimeout(context.Background(), connTestTO)
  1714  	defer cancel()
  1715  
  1716  	_, err := r.FetchMessage(ctx)
  1717  	if err == nil || ctx.Err() != nil {
  1718  		t.Errorf("Reader.FetchMessage with a group subscription " +
  1719  			"must fail when it cannot connect")
  1720  	}
  1721  }
  1722  
  1723  // Tests that the reader can handle messages where the response is truncated
  1724  // due to reaching MaxBytes.
  1725  //
  1726  // If MaxBytes is too small to fit 1 record then it will never truncate, so
  1727  // we start from a small message size and increase it until we are sure
  1728  // truncation has happened at some point.
  1729  func TestReaderTruncatedResponse(t *testing.T) {
  1730  	topic := makeTopic()
  1731  	createTopic(t, topic, 1)
  1732  	defer deleteTopic(t, topic)
  1733  
  1734  	readerMaxBytes := 100
  1735  	batchSize := 4
  1736  	maxMsgPadding := 5
  1737  	readContextTimeout := 10 * time.Second
  1738  
  1739  	var msgs []Message
  1740  	// The key of each message
  1741  	n := 0
  1742  	// `i` is the amount of padding per message
  1743  	for i := 0; i < maxMsgPadding; i++ {
  1744  		bb := bytes.Buffer{}
  1745  		for x := 0; x < i; x++ {
  1746  			_, err := bb.WriteRune('0')
  1747  			require.NoError(t, err)
  1748  		}
  1749  		padding := bb.Bytes()
  1750  		// `j` is the number of times the message repeats
  1751  		for j := 0; j < batchSize*4; j++ {
  1752  			msgs = append(msgs, Message{
  1753  				Key:   []byte(fmt.Sprintf("%05d", n)),
  1754  				Value: padding,
  1755  			})
  1756  			n++
  1757  		}
  1758  	}
  1759  
  1760  	wr := NewWriter(WriterConfig{
  1761  		Brokers:   []string{"localhost:9092"},
  1762  		BatchSize: batchSize,
  1763  		Async:     false,
  1764  		Topic:     topic,
  1765  		Balancer:  &LeastBytes{},
  1766  	})
  1767  	err := wr.WriteMessages(context.Background(), msgs...)
  1768  	require.NoError(t, err)
  1769  
  1770  	ctx, cancel := context.WithTimeout(context.Background(), readContextTimeout)
  1771  	defer cancel()
  1772  	r := NewReader(ReaderConfig{
  1773  		Brokers:  []string{"localhost:9092"},
  1774  		Topic:    topic,
  1775  		MinBytes: 1,
  1776  		MaxBytes: readerMaxBytes,
  1777  		// Speed up testing
  1778  		MaxWait: 100 * time.Millisecond,
  1779  	})
  1780  	defer r.Close()
  1781  
  1782  	expectedKeys := map[string]struct{}{}
  1783  	for _, k := range msgs {
  1784  		expectedKeys[string(k.Key)] = struct{}{}
  1785  	}
  1786  	keys := map[string]struct{}{}
  1787  	for {
  1788  		m, err := r.FetchMessage(ctx)
  1789  		require.NoError(t, err)
  1790  		keys[string(m.Key)] = struct{}{}
  1791  
  1792  		t.Logf("got key %s have %d keys expect %d\n", string(m.Key), len(keys), len(expectedKeys))
  1793  		if len(keys) == len(expectedKeys) {
  1794  			require.Equal(t, expectedKeys, keys)
  1795  			return
  1796  		}
  1797  	}
  1798  }
  1799  
  1800  // Tests that the reader can read record batches from log compacted topics
  1801  // where the batch ends with compacted records.
  1802  //
  1803  // This test forces varying sized chunks of duplicated messages along with
  1804  // configuring the topic with a minimal `segment.bytes` in order to
  1805  // guarantee that at least 1 batch can be compacted down to 0 "unread" messages
  1806  // with at least 1 "old" message otherwise the batch is skipped entirely.
  1807  func TestReaderReadCompactedMessage(t *testing.T) {
  1808  	topic := makeTopic()
  1809  	createTopicWithCompaction(t, topic, 1)
  1810  	defer deleteTopic(t, topic)
  1811  
  1812  	msgs := makeTestDuplicateSequence()
  1813  
  1814  	writeMessagesForCompactionCheck(t, topic, msgs)
  1815  
  1816  	expectedKeys := map[string]int{}
  1817  	for _, msg := range msgs {
  1818  		expectedKeys[string(msg.Key)] = 1
  1819  	}
  1820  
  1821  	// kafka 2.0.1 is extra slow
  1822  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
  1823  	defer cancel()
  1824  	for {
  1825  		success := func() bool {
  1826  			r := NewReader(ReaderConfig{
  1827  				Brokers:  []string{"localhost:9092"},
  1828  				Topic:    topic,
  1829  				MinBytes: 200,
  1830  				MaxBytes: 200,
  1831  				// Speed up testing
  1832  				MaxWait: 100 * time.Millisecond,
  1833  			})
  1834  			defer r.Close()
  1835  
  1836  			keys := map[string]int{}
  1837  			for {
  1838  				m, err := r.FetchMessage(ctx)
  1839  				if err != nil {
  1840  					t.Logf("can't get message from compacted log: %v", err)
  1841  					return false
  1842  				}
  1843  				keys[string(m.Key)]++
  1844  
  1845  				if len(keys) == countKeys(msgs) {
  1846  					t.Logf("got keys: %+v", keys)
  1847  					return reflect.DeepEqual(keys, expectedKeys)
  1848  				}
  1849  			}
  1850  		}()
  1851  		if success {
  1852  			return
  1853  		}
  1854  		select {
  1855  		case <-ctx.Done():
  1856  			t.Fatal(ctx.Err())
  1857  		default:
  1858  		}
  1859  	}
  1860  }
  1861  
  1862  // writeMessagesForCompactionCheck writes messages with specific writer configuration
  1863  func writeMessagesForCompactionCheck(t *testing.T, topic string, msgs []Message) {
  1864  	t.Helper()
  1865  
  1866  	wr := NewWriter(WriterConfig{
  1867  		Brokers: []string{"localhost:9092"},
  1868  		// Batch size must be large enough to have multiple compacted records
  1869  		// for testing more edge cases.
  1870  		BatchSize: 3,
  1871  		Async:     false,
  1872  		Topic:     topic,
  1873  		Balancer:  &LeastBytes{},
  1874  	})
  1875  	err := wr.WriteMessages(context.Background(), msgs...)
  1876  	require.NoError(t, err)
  1877  }
  1878  
  1879  // makeTestDuplicateSequence creates messages for compacted log testing
  1880  //
  1881  // All keys and values are 4 characters long to tightly control how many
  1882  // messages are per log segment.
  1883  func makeTestDuplicateSequence() []Message {
  1884  	var msgs []Message
  1885  	// `n` is an increasing counter so it is never compacted.
  1886  	n := 0
  1887  	// `i` determines how many compacted records to create
  1888  	for i := 0; i < 5; i++ {
  1889  		// `j` is how many times the current pattern repeats. We repeat because
  1890  		// as long as we have a pattern that is slightly larger/smaller than
  1891  		// the log segment size then if we repeat enough it will eventually
  1892  		// try all configurations.
  1893  		for j := 0; j < 30; j++ {
  1894  			msgs = append(msgs, Message{
  1895  				Key:   []byte(fmt.Sprintf("%04d", n)),
  1896  				Value: []byte(fmt.Sprintf("%04d", n)),
  1897  			})
  1898  			n++
  1899  
  1900  			// This produces the duplicated messages to compact.
  1901  			for k := 0; k < i; k++ {
  1902  				msgs = append(msgs, Message{
  1903  					Key:   []byte("dup_"),
  1904  					Value: []byte("dup_"),
  1905  				})
  1906  			}
  1907  		}
  1908  	}
  1909  
  1910  	// "end markers" to force duplicate message outside of the last segment of
  1911  	// the log so that they can all be compacted.
  1912  	for i := 0; i < 10; i++ {
  1913  		msgs = append(msgs, Message{
  1914  			Key:   []byte(fmt.Sprintf("e-%02d", i)),
  1915  			Value: []byte(fmt.Sprintf("e-%02d", i)),
  1916  		})
  1917  	}
  1918  	return msgs
  1919  }
  1920  
  1921  // countKeys counts unique keys from given Message slice
  1922  func countKeys(msgs []Message) int {
  1923  	m := make(map[string]struct{})
  1924  	for _, msg := range msgs {
  1925  		m[string(msg.Key)] = struct{}{}
  1926  	}
  1927  	return len(m)
  1928  }
  1929  
  1930  func createTopicWithCompaction(t *testing.T, topic string, partitions int) {
  1931  	t.Helper()
  1932  
  1933  	t.Logf("createTopic(%s, %d)", topic, partitions)
  1934  
  1935  	conn, err := Dial("tcp", "localhost:9092")
  1936  	require.NoError(t, err)
  1937  	defer conn.Close()
  1938  
  1939  	controller, err := conn.Controller()
  1940  	require.NoError(t, err)
  1941  
  1942  	conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
  1943  	require.NoError(t, err)
  1944  
  1945  	conn.SetDeadline(time.Now().Add(10 * time.Second))
  1946  
  1947  	err = conn.CreateTopics(TopicConfig{
  1948  		Topic:             topic,
  1949  		NumPartitions:     partitions,
  1950  		ReplicationFactor: 1,
  1951  		ConfigEntries: []ConfigEntry{
  1952  			{
  1953  				ConfigName:  "cleanup.policy",
  1954  				ConfigValue: "compact",
  1955  			},
  1956  			{
  1957  				ConfigName:  "segment.bytes",
  1958  				ConfigValue: "200",
  1959  			},
  1960  		},
  1961  	})
  1962  	switch err {
  1963  	case nil:
  1964  		// ok
  1965  	case TopicAlreadyExists:
  1966  		// ok
  1967  	default:
  1968  		require.NoError(t, err)
  1969  	}
  1970  
  1971  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
  1972  	defer cancel()
  1973  	waitForTopic(ctx, t, topic)
  1974  }