github.com/segmentio/kafka-go@v0.4.48-0.20240318174348-3f6244eb34fd/reader_test.go (about)

     1  package kafka
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"math/rand"
    10  	"net"
    11  	"os"
    12  	"reflect"
    13  	"strconv"
    14  	"sync"
    15  	"testing"
    16  	"time"
    17  
    18  	"github.com/stretchr/testify/require"
    19  )
    20  
    21  func TestReader(t *testing.T) {
    22  	tests := []struct {
    23  		scenario string
    24  		function func(*testing.T, context.Context, *Reader)
    25  	}{
    26  		{
    27  			scenario: "calling Read with a context that has been canceled returns an error",
    28  			function: testReaderReadCanceled,
    29  		},
    30  
    31  		{
    32  			scenario: "all messages of the stream are returned when calling ReadMessage repeatedly",
    33  			function: testReaderReadMessages,
    34  		},
    35  
    36  		{
    37  			scenario: "test special offsets -1 and -2",
    38  			function: testReaderSetSpecialOffsets,
    39  		},
    40  
    41  		{
    42  			scenario: "setting the offset to random values returns the expected messages when Read is called",
    43  			function: testReaderSetRandomOffset,
    44  		},
    45  
    46  		{
    47  			scenario: "setting the offset by TimeStamp",
    48  			function: testReaderSetOffsetAt,
    49  		},
    50  
    51  		{
    52  			scenario: "calling Lag returns the lag of the last message read from kafka",
    53  			function: testReaderLag,
    54  		},
    55  
    56  		{
    57  			scenario: "calling ReadLag returns the current lag of a reader",
    58  			function: testReaderReadLag,
    59  		},
    60  
    61  		{ // https://github.com/segmentio/kafka-go/issues/30
    62  			scenario: "reading from an out-of-range offset waits until the context is cancelled",
    63  			function: testReaderOutOfRangeGetsCanceled,
    64  		},
    65  
    66  		{
    67  			scenario: "topic being recreated will return an error",
    68  			function: testReaderTopicRecreated,
    69  		},
    70  	}
    71  
    72  	for _, test := range tests {
    73  		testFunc := test.function
    74  		t.Run(test.scenario, func(t *testing.T) {
    75  			t.Parallel()
    76  
    77  			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
    78  			defer cancel()
    79  
    80  			r := NewReader(ReaderConfig{
    81  				Brokers:  []string{"localhost:9092"},
    82  				Topic:    makeTopic(),
    83  				MinBytes: 1,
    84  				MaxBytes: 10e6,
    85  				MaxWait:  100 * time.Millisecond,
    86  				Logger:   newTestKafkaLogger(t, ""),
    87  			})
    88  			defer r.Close()
    89  			testFunc(t, ctx, r)
    90  		})
    91  	}
    92  }
    93  
    94  func testReaderReadCanceled(t *testing.T, ctx context.Context, r *Reader) {
    95  	ctx, cancel := context.WithCancel(ctx)
    96  	cancel()
    97  
    98  	if _, err := r.ReadMessage(ctx); !errors.Is(err, context.Canceled) {
    99  		t.Error(err)
   100  	}
   101  }
   102  
   103  func testReaderReadMessages(t *testing.T, ctx context.Context, r *Reader) {
   104  	const N = 1000
   105  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   106  
   107  	var offset int64
   108  
   109  	for i := 0; i != N; i++ {
   110  		m, err := r.ReadMessage(ctx)
   111  		if err != nil {
   112  			t.Error("reading message at offset", offset, "failed:", err)
   113  			return
   114  		}
   115  		offset = m.Offset + 1
   116  		v, _ := strconv.Atoi(string(m.Value))
   117  		if v != i {
   118  			t.Error("message at index", i, "has wrong value:", v)
   119  			return
   120  		}
   121  	}
   122  }
   123  
   124  func testReaderSetSpecialOffsets(t *testing.T, ctx context.Context, r *Reader) {
   125  	prepareReader(t, ctx, r, Message{Value: []byte("first")})
   126  	prepareReader(t, ctx, r, makeTestSequence(3)...)
   127  
   128  	go func() {
   129  		time.Sleep(1 * time.Second)
   130  		prepareReader(t, ctx, r, Message{Value: []byte("last")})
   131  	}()
   132  
   133  	for _, test := range []struct {
   134  		off, final int64
   135  		want       string
   136  	}{
   137  		{FirstOffset, 1, "first"},
   138  		{LastOffset, 5, "last"},
   139  	} {
   140  		offset := test.off
   141  		if err := r.SetOffset(offset); err != nil {
   142  			t.Error("setting offset", offset, "failed:", err)
   143  		}
   144  		m, err := r.ReadMessage(ctx)
   145  		if err != nil {
   146  			t.Error("reading at offset", offset, "failed:", err)
   147  		}
   148  		if string(m.Value) != test.want {
   149  			t.Error("message at offset", offset, "has wrong value:", string(m.Value))
   150  		}
   151  		if off := r.Offset(); off != test.final {
   152  			t.Errorf("bad final offset: got %d, want %d", off, test.final)
   153  		}
   154  	}
   155  }
   156  
   157  func testReaderSetRandomOffset(t *testing.T, ctx context.Context, r *Reader) {
   158  	const N = 10
   159  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   160  
   161  	for i := 0; i != 2*N; i++ {
   162  		offset := rand.Intn(N)
   163  		r.SetOffset(int64(offset))
   164  		m, err := r.ReadMessage(ctx)
   165  		if err != nil {
   166  			t.Error("seeking to offset", offset, "failed:", err)
   167  			return
   168  		}
   169  		v, _ := strconv.Atoi(string(m.Value))
   170  		if v != offset {
   171  			t.Error("message at offset", offset, "has wrong value:", v)
   172  			return
   173  		}
   174  	}
   175  }
   176  
   177  func testReaderSetOffsetAt(t *testing.T, ctx context.Context, r *Reader) {
   178  	// We make 2 batches of messages here with a brief 2 second pause
   179  	// to ensure messages 0...9 will be written a few seconds before messages 10...19
   180  	// We'll then fetch the timestamp for message offset 10 and use that timestamp to set
   181  	// our reader
   182  	const N = 10
   183  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   184  	time.Sleep(time.Second * 2)
   185  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   186  
   187  	var ts time.Time
   188  	for i := 0; i < N*2; i++ {
   189  		m, err := r.ReadMessage(ctx)
   190  		if err != nil {
   191  			t.Error("error reading message", err)
   192  		}
   193  		// grab the time for the 10th message
   194  		if i == 10 {
   195  			ts = m.Time
   196  		}
   197  	}
   198  
   199  	err := r.SetOffsetAt(ctx, ts)
   200  	if err != nil {
   201  		t.Fatal("error setting offset by timestamp", err)
   202  	}
   203  
   204  	m, err := r.ReadMessage(context.Background())
   205  	if err != nil {
   206  		t.Fatal("error reading message", err)
   207  	}
   208  
   209  	if m.Offset != 10 {
   210  		t.Errorf("expected offset of 10, received offset %d", m.Offset)
   211  	}
   212  }
   213  
   214  func testReaderLag(t *testing.T, ctx context.Context, r *Reader) {
   215  	const N = 5
   216  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   217  
   218  	if lag := r.Lag(); lag != 0 {
   219  		t.Errorf("the initial lag value is %d but was expected to be 0", lag)
   220  	}
   221  
   222  	for i := 0; i != N; i++ {
   223  		r.ReadMessage(ctx)
   224  		expect := int64(N - (i + 1))
   225  
   226  		if lag := r.Lag(); lag != expect {
   227  			t.Errorf("the lag value at offset %d is %d but was expected to be %d", i, lag, expect)
   228  		}
   229  	}
   230  }
   231  
   232  func testReaderReadLag(t *testing.T, ctx context.Context, r *Reader) {
   233  	const N = 5
   234  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   235  
   236  	if lag, err := r.ReadLag(ctx); err != nil {
   237  		t.Error(err)
   238  	} else if lag != N {
   239  		t.Errorf("the initial lag value is %d but was expected to be %d", lag, N)
   240  	}
   241  
   242  	for i := 0; i != N; i++ {
   243  		r.ReadMessage(ctx)
   244  		expect := int64(N - (i + 1))
   245  
   246  		if lag, err := r.ReadLag(ctx); err != nil {
   247  			t.Error(err)
   248  		} else if lag != expect {
   249  			t.Errorf("the lag value at offset %d is %d but was expected to be %d", i, lag, expect)
   250  		}
   251  	}
   252  }
   253  
   254  func testReaderOutOfRangeGetsCanceled(t *testing.T, ctx context.Context, r *Reader) {
   255  	prepareReader(t, ctx, r, makeTestSequence(10)...)
   256  
   257  	const D = 100 * time.Millisecond
   258  	t0 := time.Now()
   259  
   260  	ctx, cancel := context.WithTimeout(ctx, D)
   261  	defer cancel()
   262  
   263  	if err := r.SetOffset(42); err != nil {
   264  		t.Error(err)
   265  	}
   266  
   267  	_, err := r.ReadMessage(ctx)
   268  	if !errors.Is(err, context.DeadlineExceeded) {
   269  		t.Error("bad error:", err)
   270  	}
   271  
   272  	t1 := time.Now()
   273  
   274  	if d := t1.Sub(t0); d < D {
   275  		t.Error("ReadMessage returned too early after", d)
   276  	}
   277  }
   278  
   279  func createTopic(t *testing.T, topic string, partitions int) {
   280  	t.Helper()
   281  
   282  	t.Logf("createTopic(%s, %d)", topic, partitions)
   283  
   284  	conn, err := Dial("tcp", "localhost:9092")
   285  	if err != nil {
   286  		err = fmt.Errorf("createTopic, Dial: %w", err)
   287  		t.Fatal(err)
   288  	}
   289  	defer conn.Close()
   290  
   291  	controller, err := conn.Controller()
   292  	if err != nil {
   293  		err = fmt.Errorf("createTopic, conn.Controller: %w", err)
   294  		t.Fatal(err)
   295  	}
   296  
   297  	conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
   298  	if err != nil {
   299  		t.Fatal(err)
   300  	}
   301  
   302  	conn.SetDeadline(time.Now().Add(10 * time.Second))
   303  
   304  	_, err = conn.createTopics(createTopicsRequestV0{
   305  		Topics: []createTopicsRequestV0Topic{
   306  			{
   307  				Topic:             topic,
   308  				NumPartitions:     int32(partitions),
   309  				ReplicationFactor: 1,
   310  			},
   311  		},
   312  		Timeout: milliseconds(time.Second),
   313  	})
   314  	if err != nil {
   315  		if !errors.Is(err, TopicAlreadyExists) {
   316  			err = fmt.Errorf("createTopic, conn.createTopics: %w", err)
   317  			t.Error(err)
   318  			t.FailNow()
   319  		}
   320  	}
   321  
   322  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
   323  	defer cancel()
   324  
   325  	waitForTopic(ctx, t, topic)
   326  }
   327  
   328  // Block until topic exists.
   329  func waitForTopic(ctx context.Context, t *testing.T, topic string) {
   330  	t.Helper()
   331  
   332  	for {
   333  		select {
   334  		case <-ctx.Done():
   335  			t.Fatalf("reached deadline before verifying topic existence")
   336  		default:
   337  		}
   338  
   339  		cli := &Client{
   340  			Addr:    TCP("localhost:9092"),
   341  			Timeout: 5 * time.Second,
   342  		}
   343  
   344  		response, err := cli.Metadata(ctx, &MetadataRequest{
   345  			Addr:   cli.Addr,
   346  			Topics: []string{topic},
   347  		})
   348  		if err != nil {
   349  			t.Fatalf("waitForTopic: error listing topics: %s", err.Error())
   350  		}
   351  
   352  		// Find a topic which has at least 1 partition in the metadata response
   353  		for _, top := range response.Topics {
   354  			if top.Name != topic {
   355  				continue
   356  			}
   357  
   358  			numPartitions := len(top.Partitions)
   359  			t.Logf("waitForTopic: found topic %q with %d partitions",
   360  				topic, numPartitions)
   361  
   362  			if numPartitions > 0 {
   363  				return
   364  			}
   365  		}
   366  
   367  		t.Logf("retrying after 1s")
   368  		time.Sleep(time.Second)
   369  		continue
   370  	}
   371  }
   372  
   373  func deleteTopic(t *testing.T, topic ...string) {
   374  	t.Helper()
   375  	conn, err := Dial("tcp", "localhost:9092")
   376  	if err != nil {
   377  		t.Fatal(err)
   378  	}
   379  	defer conn.Close()
   380  
   381  	controller, err := conn.Controller()
   382  	if err != nil {
   383  		t.Fatal(err)
   384  	}
   385  
   386  	conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
   387  	if err != nil {
   388  		t.Fatal(err)
   389  	}
   390  
   391  	conn.SetDeadline(time.Now().Add(10 * time.Second))
   392  
   393  	if err := conn.DeleteTopics(topic...); err != nil {
   394  		t.Fatal(err)
   395  	}
   396  }
   397  
   398  func TestReaderOnNonZeroPartition(t *testing.T) {
   399  	tests := []struct {
   400  		scenario string
   401  		function func(*testing.T, context.Context, *Reader)
   402  	}{
   403  		{
   404  			scenario: "topic and partition should now be included in header",
   405  			function: testReaderSetsTopicAndPartition,
   406  		},
   407  	}
   408  
   409  	for _, test := range tests {
   410  		testFunc := test.function
   411  		t.Run(test.scenario, func(t *testing.T) {
   412  			t.Parallel()
   413  
   414  			topic := makeTopic()
   415  			createTopic(t, topic, 2)
   416  			defer deleteTopic(t, topic)
   417  
   418  			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   419  			defer cancel()
   420  
   421  			r := NewReader(ReaderConfig{
   422  				Brokers:   []string{"localhost:9092"},
   423  				Topic:     topic,
   424  				Partition: 1,
   425  				MinBytes:  1,
   426  				MaxBytes:  10e6,
   427  				MaxWait:   100 * time.Millisecond,
   428  			})
   429  			defer r.Close()
   430  			testFunc(t, ctx, r)
   431  		})
   432  	}
   433  }
   434  
   435  func testReaderSetsTopicAndPartition(t *testing.T, ctx context.Context, r *Reader) {
   436  	const N = 3
   437  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   438  
   439  	for i := 0; i != N; i++ {
   440  		m, err := r.ReadMessage(ctx)
   441  		if err != nil {
   442  			t.Error("reading message failed:", err)
   443  			return
   444  		}
   445  
   446  		if m.Topic == "" {
   447  			t.Error("expected topic to be set")
   448  			return
   449  		}
   450  		if m.Topic != r.config.Topic {
   451  			t.Errorf("expected message to contain topic, %v; got %v", r.config.Topic, m.Topic)
   452  			return
   453  		}
   454  		if m.Partition != r.config.Partition {
   455  			t.Errorf("expected partition to be set; expected 1, got %v", m.Partition)
   456  			return
   457  		}
   458  	}
   459  }
   460  
   461  // TestReadTruncatedMessages uses a configuration designed to get the Broker to
   462  // return truncated messages.  It exercises the case where an earlier bug caused
   463  // reading to time out by attempting to read beyond the current response.  This
   464  // test is not perfect, but it is pretty reliable about reproducing the issue.
   465  //
   466  // NOTE : it currently only succeeds against kafka 0.10.1.0, so it will be
   467  // skipped.  It's here so that it can be manually run.
   468  func TestReadTruncatedMessages(t *testing.T) {
   469  	// todo : it would be great to get it to work against 0.11.0.0 so we could
   470  	//        include it in CI unit tests.
   471  	t.Skip()
   472  
   473  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   474  	defer cancel()
   475  	r := NewReader(ReaderConfig{
   476  		Brokers:  []string{"localhost:9092"},
   477  		Topic:    makeTopic(),
   478  		MinBytes: 1,
   479  		MaxBytes: 100,
   480  		MaxWait:  100 * time.Millisecond,
   481  	})
   482  	defer r.Close()
   483  	n := 500
   484  	prepareReader(t, ctx, r, makeTestSequence(n)...)
   485  	for i := 0; i < n; i++ {
   486  		if _, err := r.ReadMessage(ctx); err != nil {
   487  			t.Fatal(err)
   488  		}
   489  	}
   490  }
   491  
   492  func makeTestSequence(n int) []Message {
   493  	base := time.Now()
   494  	msgs := make([]Message, n)
   495  	for i := 0; i != n; i++ {
   496  		msgs[i] = Message{
   497  			Time:  base.Add(time.Duration(i) * time.Millisecond).Truncate(time.Millisecond),
   498  			Value: []byte(strconv.Itoa(i)),
   499  		}
   500  	}
   501  	return msgs
   502  }
   503  
   504  func prepareReader(t *testing.T, ctx context.Context, r *Reader, msgs ...Message) {
   505  	config := r.Config()
   506  	var conn *Conn
   507  	var err error
   508  
   509  	for {
   510  		if conn, err = DialLeader(ctx, "tcp", "localhost:9092", config.Topic, config.Partition); err == nil {
   511  			break
   512  		}
   513  		select {
   514  		case <-time.After(time.Second):
   515  		case <-ctx.Done():
   516  			t.Fatal(ctx.Err())
   517  		}
   518  	}
   519  
   520  	defer conn.Close()
   521  
   522  	if _, err := conn.WriteMessages(msgs...); err != nil {
   523  		t.Fatal(err)
   524  	}
   525  }
   526  
   527  var (
   528  	benchmarkReaderOnce    sync.Once
   529  	benchmarkReaderTopic   = makeTopic()
   530  	benchmarkReaderPayload = make([]byte, 2*1024)
   531  )
   532  
   533  func BenchmarkReader(b *testing.B) {
   534  	const broker = "localhost:9092"
   535  	ctx := context.Background()
   536  
   537  	benchmarkReaderOnce.Do(func() {
   538  		conn, err := DialLeader(ctx, "tcp", broker, benchmarkReaderTopic, 0)
   539  		if err != nil {
   540  			b.Fatal(err)
   541  		}
   542  		defer conn.Close()
   543  
   544  		msgs := make([]Message, 1000)
   545  		for i := range msgs {
   546  			msgs[i].Value = benchmarkReaderPayload
   547  		}
   548  
   549  		for i := 0; i != 10; i++ { // put 10K messages
   550  			if _, err := conn.WriteMessages(msgs...); err != nil {
   551  				b.Fatal(err)
   552  			}
   553  		}
   554  
   555  		b.ResetTimer()
   556  	})
   557  
   558  	r := NewReader(ReaderConfig{
   559  		Brokers:   []string{broker},
   560  		Topic:     benchmarkReaderTopic,
   561  		Partition: 0,
   562  		MinBytes:  1e3,
   563  		MaxBytes:  1e6,
   564  		MaxWait:   100 * time.Millisecond,
   565  	})
   566  
   567  	for i := 0; i < b.N; i++ {
   568  		if (i % 10000) == 0 {
   569  			r.SetOffset(-1)
   570  		}
   571  		_, err := r.ReadMessage(ctx)
   572  		if err != nil {
   573  			b.Fatal(err)
   574  		}
   575  	}
   576  
   577  	r.Close()
   578  	b.SetBytes(int64(len(benchmarkReaderPayload)))
   579  }
   580  
   581  func TestCloseLeavesGroup(t *testing.T) {
   582  	if os.Getenv("KAFKA_VERSION") == "2.3.1" {
   583  		// There's a bug in 2.3.1 that causes the MemberMetadata to be in the wrong format and thus
   584  		// leads to an error when decoding the DescribeGroupsResponse.
   585  		//
   586  		// See https://issues.apache.org/jira/browse/KAFKA-9150 for details.
   587  		t.Skip("Skipping because kafka version is 2.3.1")
   588  	}
   589  
   590  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   591  	defer cancel()
   592  
   593  	topic := makeTopic()
   594  	createTopic(t, topic, 1)
   595  	defer deleteTopic(t, topic)
   596  
   597  	groupID := makeGroupID()
   598  	r := NewReader(ReaderConfig{
   599  		Brokers:          []string{"localhost:9092"},
   600  		Topic:            topic,
   601  		GroupID:          groupID,
   602  		MinBytes:         1,
   603  		MaxBytes:         10e6,
   604  		MaxWait:          100 * time.Millisecond,
   605  		RebalanceTimeout: time.Second,
   606  	})
   607  	prepareReader(t, ctx, r, Message{Value: []byte("test")})
   608  
   609  	conn, err := Dial("tcp", r.config.Brokers[0])
   610  	if err != nil {
   611  		t.Fatalf("error dialing: %v", err)
   612  	}
   613  	defer conn.Close()
   614  
   615  	client, shutdown := newLocalClient()
   616  	defer shutdown()
   617  
   618  	descGroups := func() DescribeGroupsResponse {
   619  		resp, err := client.DescribeGroups(
   620  			ctx,
   621  			&DescribeGroupsRequest{
   622  				GroupIDs: []string{groupID},
   623  			},
   624  		)
   625  		if err != nil {
   626  			t.Fatalf("error from describeGroups %v", err)
   627  		}
   628  		return *resp
   629  	}
   630  
   631  	_, err = r.ReadMessage(ctx)
   632  	if err != nil {
   633  		t.Fatalf("our reader never joind its group or couldn't read a message: %v", err)
   634  	}
   635  	resp := descGroups()
   636  	if len(resp.Groups) != 1 {
   637  		t.Fatalf("expected 1 group. got: %d", len(resp.Groups))
   638  	}
   639  	if len(resp.Groups[0].Members) != 1 {
   640  		t.Fatalf("expected group membership size of %d, but got %d", 1, len(resp.Groups[0].Members))
   641  	}
   642  
   643  	err = r.Close()
   644  	if err != nil {
   645  		t.Fatalf("unexpected error closing reader: %s", err.Error())
   646  	}
   647  	resp = descGroups()
   648  	if len(resp.Groups) != 1 {
   649  		t.Fatalf("expected 1 group. got: %d", len(resp.Groups))
   650  	}
   651  	if len(resp.Groups[0].Members) != 0 {
   652  		t.Fatalf("expected group membership size of %d, but got %d", 0, len(resp.Groups[0].Members))
   653  	}
   654  }
   655  
   656  func testConsumerGroupImmediateClose(t *testing.T, ctx context.Context, r *Reader) {
   657  	if err := r.Close(); err != nil {
   658  		t.Fatalf("bad err: %v", err)
   659  	}
   660  }
   661  
   662  func testConsumerGroupSimple(t *testing.T, ctx context.Context, r *Reader) {
   663  	if err := r.Close(); err != nil {
   664  		t.Fatalf("bad err: %v", err)
   665  	}
   666  }
   667  
   668  func TestReaderSetOffsetWhenConsumerGroupsEnabled(t *testing.T) {
   669  	r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
   670  	if err := r.SetOffset(LastOffset); !errors.Is(err, errNotAvailableWithGroup) {
   671  		t.Fatalf("expected %v; got %v", errNotAvailableWithGroup, err)
   672  	}
   673  }
   674  
   675  func TestReaderOffsetWhenConsumerGroupsEnabled(t *testing.T) {
   676  	r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
   677  	if offset := r.Offset(); offset != -1 {
   678  		t.Fatalf("expected -1; got %v", offset)
   679  	}
   680  }
   681  
   682  func TestReaderLagWhenConsumerGroupsEnabled(t *testing.T) {
   683  	r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
   684  	if offset := r.Lag(); offset != -1 {
   685  		t.Fatalf("expected -1; got %v", offset)
   686  	}
   687  }
   688  
   689  func TestReaderReadLagReturnsZeroLagWhenConsumerGroupsEnabled(t *testing.T) {
   690  	r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
   691  	lag, err := r.ReadLag(context.Background())
   692  
   693  	if !errors.Is(err, errNotAvailableWithGroup) {
   694  		t.Fatalf("expected %v; got %v", errNotAvailableWithGroup, err)
   695  	}
   696  
   697  	if lag != 0 {
   698  		t.Fatalf("expected 0; got %d", lag)
   699  	}
   700  }
   701  
   702  func TestReaderPartitionWhenConsumerGroupsEnabled(t *testing.T) {
   703  	invoke := func() (boom bool) {
   704  		defer func() {
   705  			if r := recover(); r != nil {
   706  				boom = true
   707  			}
   708  		}()
   709  
   710  		NewReader(ReaderConfig{
   711  			GroupID:   "set",
   712  			Partition: 1,
   713  		})
   714  		return false
   715  	}
   716  
   717  	if !invoke() {
   718  		t.Fatalf("expected panic; but NewReader worked?!")
   719  	}
   720  }
   721  
   722  func TestExtractTopics(t *testing.T) {
   723  	testCases := map[string]struct {
   724  		Members []GroupMember
   725  		Topics  []string
   726  	}{
   727  		"nil": {},
   728  		"single member, single topic": {
   729  			Members: []GroupMember{
   730  				{
   731  					ID:     "a",
   732  					Topics: []string{"topic"},
   733  				},
   734  			},
   735  			Topics: []string{"topic"},
   736  		},
   737  		"two members, single topic": {
   738  			Members: []GroupMember{
   739  				{
   740  					ID:     "a",
   741  					Topics: []string{"topic"},
   742  				},
   743  				{
   744  					ID:     "b",
   745  					Topics: []string{"topic"},
   746  				},
   747  			},
   748  			Topics: []string{"topic"},
   749  		},
   750  		"two members, two topics": {
   751  			Members: []GroupMember{
   752  				{
   753  					ID:     "a",
   754  					Topics: []string{"topic-1"},
   755  				},
   756  				{
   757  					ID:     "b",
   758  					Topics: []string{"topic-2"},
   759  				},
   760  			},
   761  			Topics: []string{"topic-1", "topic-2"},
   762  		},
   763  		"three members, three shared topics": {
   764  			Members: []GroupMember{
   765  				{
   766  					ID:     "a",
   767  					Topics: []string{"topic-1", "topic-2"},
   768  				},
   769  				{
   770  					ID:     "b",
   771  					Topics: []string{"topic-2", "topic-3"},
   772  				},
   773  				{
   774  					ID:     "c",
   775  					Topics: []string{"topic-3", "topic-1"},
   776  				},
   777  			},
   778  			Topics: []string{"topic-1", "topic-2", "topic-3"},
   779  		},
   780  	}
   781  
   782  	for label, tc := range testCases {
   783  		t.Run(label, func(t *testing.T) {
   784  			topics := extractTopics(tc.Members)
   785  			if !reflect.DeepEqual(tc.Topics, topics) {
   786  				t.Errorf("expected %v; got %v", tc.Topics, topics)
   787  			}
   788  		})
   789  	}
   790  }
   791  
   792  func TestReaderConsumerGroup(t *testing.T) {
   793  	tests := []struct {
   794  		scenario       string
   795  		partitions     int
   796  		commitInterval time.Duration
   797  		function       func(*testing.T, context.Context, *Reader)
   798  	}{
   799  		{
   800  			scenario:   "basic handshake",
   801  			partitions: 1,
   802  			function:   testReaderConsumerGroupHandshake,
   803  		},
   804  		{
   805  			scenario:   "verify offset committed",
   806  			partitions: 1,
   807  			function:   testReaderConsumerGroupVerifyOffsetCommitted,
   808  		},
   809  
   810  		{
   811  			scenario:       "verify offset committed when using interval committer",
   812  			partitions:     1,
   813  			commitInterval: 400 * time.Millisecond,
   814  			function:       testReaderConsumerGroupVerifyPeriodicOffsetCommitter,
   815  		},
   816  
   817  		{
   818  			scenario:   "rebalance across many partitions and consumers",
   819  			partitions: 8,
   820  			function:   testReaderConsumerGroupRebalanceAcrossManyPartitionsAndConsumers,
   821  		},
   822  
   823  		{
   824  			scenario:   "consumer group commits on close",
   825  			partitions: 3,
   826  			function:   testReaderConsumerGroupVerifyCommitsOnClose,
   827  		},
   828  
   829  		{
   830  			scenario:   "consumer group rebalance",
   831  			partitions: 3,
   832  			function:   testReaderConsumerGroupRebalance,
   833  		},
   834  
   835  		{
   836  			scenario:   "consumer group rebalance across topics",
   837  			partitions: 3,
   838  			function:   testReaderConsumerGroupRebalanceAcrossTopics,
   839  		},
   840  
   841  		{
   842  			scenario:   "consumer group reads content across partitions",
   843  			partitions: 3,
   844  			function:   testReaderConsumerGroupReadContentAcrossPartitions,
   845  		},
   846  
   847  		{
   848  			scenario:   "Close immediately after NewReader",
   849  			partitions: 1,
   850  			function:   testConsumerGroupImmediateClose,
   851  		},
   852  
   853  		{
   854  			scenario:   "Close immediately after NewReader",
   855  			partitions: 1,
   856  			function:   testConsumerGroupSimple,
   857  		},
   858  	}
   859  
   860  	for _, test := range tests {
   861  		t.Run(test.scenario, func(t *testing.T) {
   862  			// It appears that some of the tests depend on all these tests being
   863  			// run concurrently to pass... this is brittle and should be fixed
   864  			// at some point.
   865  			t.Parallel()
   866  
   867  			topic := makeTopic()
   868  			createTopic(t, topic, test.partitions)
   869  			defer deleteTopic(t, topic)
   870  
   871  			groupID := makeGroupID()
   872  			r := NewReader(ReaderConfig{
   873  				Brokers:           []string{"localhost:9092"},
   874  				Topic:             topic,
   875  				GroupID:           groupID,
   876  				HeartbeatInterval: 2 * time.Second,
   877  				CommitInterval:    test.commitInterval,
   878  				RebalanceTimeout:  2 * time.Second,
   879  				RetentionTime:     time.Hour,
   880  				MinBytes:          1,
   881  				MaxBytes:          1e6,
   882  			})
   883  			defer r.Close()
   884  
   885  			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   886  			defer cancel()
   887  
   888  			test.function(t, ctx, r)
   889  		})
   890  	}
   891  }
   892  
   893  func testReaderConsumerGroupHandshake(t *testing.T, ctx context.Context, r *Reader) {
   894  	prepareReader(t, context.Background(), r, makeTestSequence(5)...)
   895  
   896  	m, err := r.ReadMessage(ctx)
   897  	if err != nil {
   898  		t.Errorf("bad err: %v", err)
   899  	}
   900  	if m.Topic != r.config.Topic {
   901  		t.Errorf("topic not set")
   902  	}
   903  	if m.Offset != 0 {
   904  		t.Errorf("offset not set")
   905  	}
   906  
   907  	m, err = r.ReadMessage(ctx)
   908  	if err != nil {
   909  		t.Errorf("bad err: %v", err)
   910  	}
   911  	if m.Topic != r.config.Topic {
   912  		t.Errorf("topic not set")
   913  	}
   914  	if m.Offset != 1 {
   915  		t.Errorf("offset not set")
   916  	}
   917  }
   918  
   919  func testReaderConsumerGroupVerifyOffsetCommitted(t *testing.T, ctx context.Context, r *Reader) {
   920  	prepareReader(t, context.Background(), r, makeTestSequence(3)...)
   921  
   922  	if _, err := r.FetchMessage(ctx); err != nil {
   923  		t.Errorf("bad err: %v", err) // skip the first message
   924  	}
   925  
   926  	m, err := r.FetchMessage(ctx)
   927  	if err != nil {
   928  		t.Errorf("bad err: %v", err)
   929  	}
   930  
   931  	if err := r.CommitMessages(ctx, m); err != nil {
   932  		t.Errorf("bad commit message: %v", err)
   933  	}
   934  
   935  	offsets := getOffsets(t, r.config)
   936  	if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
   937  		t.Errorf("expected %v; got %v", expected, offsets)
   938  	}
   939  }
   940  
   941  func testReaderConsumerGroupVerifyPeriodicOffsetCommitter(t *testing.T, ctx context.Context, r *Reader) {
   942  	prepareReader(t, context.Background(), r, makeTestSequence(3)...)
   943  
   944  	if _, err := r.FetchMessage(ctx); err != nil {
   945  		t.Errorf("bad err: %v", err) // skip the first message
   946  	}
   947  
   948  	m, err := r.FetchMessage(ctx)
   949  	if err != nil {
   950  		t.Errorf("bad err: %v", err)
   951  	}
   952  
   953  	started := time.Now()
   954  	if err := r.CommitMessages(ctx, m); err != nil {
   955  		t.Errorf("bad commit message: %v", err)
   956  	}
   957  	if elapsed := time.Since(started); elapsed > 10*time.Millisecond {
   958  		t.Errorf("background commits should happen nearly instantly")
   959  	}
   960  
   961  	// wait for committer to pick up the commits
   962  	time.Sleep(r.config.CommitInterval * 3)
   963  
   964  	offsets := getOffsets(t, r.config)
   965  	if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
   966  		t.Errorf("expected %v; got %v", expected, offsets)
   967  	}
   968  }
   969  
   970  func testReaderConsumerGroupVerifyCommitsOnClose(t *testing.T, ctx context.Context, r *Reader) {
   971  	prepareReader(t, context.Background(), r, makeTestSequence(3)...)
   972  
   973  	if _, err := r.FetchMessage(ctx); err != nil {
   974  		t.Errorf("bad err: %v", err) // skip the first message
   975  	}
   976  
   977  	m, err := r.FetchMessage(ctx)
   978  	if err != nil {
   979  		t.Errorf("bad err: %v", err)
   980  	}
   981  
   982  	if err := r.CommitMessages(ctx, m); err != nil {
   983  		t.Errorf("bad commit message: %v", err)
   984  	}
   985  
   986  	if err := r.Close(); err != nil {
   987  		t.Errorf("bad Close: %v", err)
   988  	}
   989  
   990  	r2 := NewReader(r.config)
   991  	defer r2.Close()
   992  
   993  	offsets := getOffsets(t, r2.config)
   994  	if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
   995  		t.Errorf("expected %v; got %v", expected, offsets)
   996  	}
   997  }
   998  
   999  func testReaderConsumerGroupReadContentAcrossPartitions(t *testing.T, ctx context.Context, r *Reader) {
  1000  	const N = 12
  1001  
  1002  	client, shutdown := newLocalClient()
  1003  	defer shutdown()
  1004  
  1005  	writer := &Writer{
  1006  		Addr:      TCP(r.config.Brokers...),
  1007  		Topic:     r.config.Topic,
  1008  		Balancer:  &RoundRobin{},
  1009  		BatchSize: 1,
  1010  		Transport: client.Transport,
  1011  	}
  1012  	if err := writer.WriteMessages(ctx, makeTestSequence(N)...); err != nil {
  1013  		t.Fatalf("bad write messages: %v", err)
  1014  	}
  1015  	if err := writer.Close(); err != nil {
  1016  		t.Fatalf("bad write err: %v", err)
  1017  	}
  1018  
  1019  	partitions := map[int]struct{}{}
  1020  	for i := 0; i < N; i++ {
  1021  		m, err := r.FetchMessage(ctx)
  1022  		if err != nil {
  1023  			t.Errorf("bad error: %s", err)
  1024  		}
  1025  		partitions[m.Partition] = struct{}{}
  1026  	}
  1027  
  1028  	if v := len(partitions); v != 3 {
  1029  		t.Errorf("expected messages across 3 partitions; got messages across %v partitions", v)
  1030  	}
  1031  }
  1032  
  1033  func testReaderConsumerGroupRebalance(t *testing.T, ctx context.Context, r *Reader) {
  1034  	r2 := NewReader(r.config)
  1035  	defer r.Close()
  1036  
  1037  	const (
  1038  		N          = 12
  1039  		partitions = 2
  1040  	)
  1041  
  1042  	client, shutdown := newLocalClient()
  1043  	defer shutdown()
  1044  
  1045  	// rebalance should result in 12 message in each of the partitions
  1046  	writer := &Writer{
  1047  		Addr:      TCP(r.config.Brokers...),
  1048  		Topic:     r.config.Topic,
  1049  		Balancer:  &RoundRobin{},
  1050  		BatchSize: 1,
  1051  		Transport: client.Transport,
  1052  	}
  1053  	if err := writer.WriteMessages(ctx, makeTestSequence(N*partitions)...); err != nil {
  1054  		t.Fatalf("bad write messages: %v", err)
  1055  	}
  1056  	if err := writer.Close(); err != nil {
  1057  		t.Fatalf("bad write err: %v", err)
  1058  	}
  1059  
  1060  	// after rebalance, each reader should have a partition to itself
  1061  	for i := 0; i < N; i++ {
  1062  		if _, err := r2.FetchMessage(ctx); err != nil {
  1063  			t.Errorf("expect to read from reader 2")
  1064  		}
  1065  		if _, err := r.FetchMessage(ctx); err != nil {
  1066  			t.Errorf("expect to read from reader 1")
  1067  		}
  1068  	}
  1069  }
  1070  
  1071  func testReaderConsumerGroupRebalanceAcrossTopics(t *testing.T, ctx context.Context, r *Reader) {
  1072  	// create a second reader that shares the groupID, but reads from a different topic
  1073  	client, topic2, shutdown := newLocalClientAndTopic()
  1074  	defer shutdown()
  1075  
  1076  	r2 := NewReader(ReaderConfig{
  1077  		Brokers:           r.config.Brokers,
  1078  		Topic:             topic2,
  1079  		GroupID:           r.config.GroupID,
  1080  		HeartbeatInterval: r.config.HeartbeatInterval,
  1081  		SessionTimeout:    r.config.SessionTimeout,
  1082  		RetentionTime:     r.config.RetentionTime,
  1083  		MinBytes:          r.config.MinBytes,
  1084  		MaxBytes:          r.config.MaxBytes,
  1085  		Logger:            r.config.Logger,
  1086  	})
  1087  	defer r.Close()
  1088  	prepareReader(t, ctx, r2, makeTestSequence(1)...)
  1089  
  1090  	const (
  1091  		N = 12
  1092  	)
  1093  
  1094  	// write messages across both partitions
  1095  	writer := &Writer{
  1096  		Addr:      TCP(r.config.Brokers...),
  1097  		Topic:     r.config.Topic,
  1098  		Balancer:  &RoundRobin{},
  1099  		BatchSize: 1,
  1100  		Transport: client.Transport,
  1101  	}
  1102  	if err := writer.WriteMessages(ctx, makeTestSequence(N)...); err != nil {
  1103  		t.Fatalf("bad write messages: %v", err)
  1104  	}
  1105  	if err := writer.Close(); err != nil {
  1106  		t.Fatalf("bad write err: %v", err)
  1107  	}
  1108  
  1109  	// after rebalance, r2 should read topic2 and r1 should read ALL of the original topic
  1110  	if _, err := r2.FetchMessage(ctx); err != nil {
  1111  		t.Errorf("expect to read from reader 2")
  1112  	}
  1113  
  1114  	// all N messages on the original topic should be read by the original reader
  1115  	for i := 0; i < N; i++ {
  1116  		if _, err := r.FetchMessage(ctx); err != nil {
  1117  			t.Errorf("expect to read from reader 1")
  1118  		}
  1119  	}
  1120  }
  1121  
  1122  func testReaderConsumerGroupRebalanceAcrossManyPartitionsAndConsumers(t *testing.T, ctx context.Context, r *Reader) {
  1123  	// I've rebalanced up to 100 servers, but the rebalance can take upwards
  1124  	// of a minute and that seems too long for unit tests.  Also, setting this
  1125  	// to a larger number seems to make the kafka broker unresponsive.
  1126  	// TODO research if there's a way to reduce rebalance time across many partitions
  1127  	// svls: the described behavior is due to the thundering herd of readers
  1128  	//       hitting the rebalance timeout.  introducing the 100ms sleep in the
  1129  	//       loop below in order to give time for the sync group to finish has
  1130  	//       greatly helped, though we still hit the timeout from time to time.
  1131  	const N = 8
  1132  
  1133  	var readers []*Reader
  1134  
  1135  	for i := 0; i < N-1; i++ {
  1136  		reader := NewReader(r.config)
  1137  		readers = append(readers, reader)
  1138  		time.Sleep(100 * time.Millisecond)
  1139  	}
  1140  	defer func() {
  1141  		for _, r := range readers {
  1142  			r.Close()
  1143  			time.Sleep(100 * time.Millisecond)
  1144  		}
  1145  	}()
  1146  
  1147  	client, shutdown := newLocalClient()
  1148  	defer shutdown()
  1149  
  1150  	// write messages across both partitions
  1151  	writer := &Writer{
  1152  		Addr:      TCP(r.config.Brokers...),
  1153  		Topic:     r.config.Topic,
  1154  		Balancer:  &RoundRobin{},
  1155  		BatchSize: 1,
  1156  		Transport: client.Transport,
  1157  	}
  1158  	if err := writer.WriteMessages(ctx, makeTestSequence(N*3)...); err != nil {
  1159  		t.Fatalf("bad write messages: %v", err)
  1160  	}
  1161  	if err := writer.Close(); err != nil {
  1162  		t.Fatalf("bad write err: %v", err)
  1163  	}
  1164  
  1165  	// all N messages on the original topic should be read by the original reader
  1166  	for i := 0; i < N-1; i++ {
  1167  		if _, err := readers[i].FetchMessage(ctx); err != nil {
  1168  			t.Errorf("reader %v expected to read 1 message", i)
  1169  		}
  1170  	}
  1171  
  1172  	if _, err := r.FetchMessage(ctx); err != nil {
  1173  		t.Errorf("expect to read from original reader")
  1174  	}
  1175  }
  1176  
  1177  func TestOffsetStash(t *testing.T) {
  1178  	const topic = "topic"
  1179  
  1180  	newMessage := func(partition int, offset int64) Message {
  1181  		return Message{
  1182  			Topic:     topic,
  1183  			Partition: partition,
  1184  			Offset:    offset,
  1185  		}
  1186  	}
  1187  
  1188  	tests := map[string]struct {
  1189  		Given    offsetStash
  1190  		Messages []Message
  1191  		Expected offsetStash
  1192  	}{
  1193  		"nil": {},
  1194  		"empty given, single message": {
  1195  			Given:    offsetStash{},
  1196  			Messages: []Message{newMessage(0, 0)},
  1197  			Expected: offsetStash{
  1198  				topic: {0: 1},
  1199  			},
  1200  		},
  1201  		"ignores earlier offsets": {
  1202  			Given: offsetStash{
  1203  				topic: {0: 2},
  1204  			},
  1205  			Messages: []Message{newMessage(0, 0)},
  1206  			Expected: offsetStash{
  1207  				topic: {0: 2},
  1208  			},
  1209  		},
  1210  		"uses latest offset": {
  1211  			Given: offsetStash{},
  1212  			Messages: []Message{
  1213  				newMessage(0, 2),
  1214  				newMessage(0, 3),
  1215  				newMessage(0, 1),
  1216  			},
  1217  			Expected: offsetStash{
  1218  				topic: {0: 4},
  1219  			},
  1220  		},
  1221  		"uses latest offset, across multiple topics": {
  1222  			Given: offsetStash{},
  1223  			Messages: []Message{
  1224  				newMessage(0, 2),
  1225  				newMessage(0, 3),
  1226  				newMessage(0, 1),
  1227  				newMessage(1, 5),
  1228  				newMessage(1, 6),
  1229  			},
  1230  			Expected: offsetStash{
  1231  				topic: {
  1232  					0: 4,
  1233  					1: 7,
  1234  				},
  1235  			},
  1236  		},
  1237  	}
  1238  
  1239  	for label, test := range tests {
  1240  		t.Run(label, func(t *testing.T) {
  1241  			test.Given.merge(makeCommits(test.Messages...))
  1242  			if !reflect.DeepEqual(test.Expected, test.Given) {
  1243  				t.Errorf("expected %v; got %v", test.Expected, test.Given)
  1244  			}
  1245  		})
  1246  	}
  1247  }
  1248  
  1249  func TestValidateReader(t *testing.T) {
  1250  	tests := []struct {
  1251  		config       ReaderConfig
  1252  		errorOccured bool
  1253  	}{
  1254  		{config: ReaderConfig{}, errorOccured: true},
  1255  		{config: ReaderConfig{Brokers: []string{"broker1"}}, errorOccured: true},
  1256  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1"}, errorOccured: false},
  1257  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: -1}, errorOccured: true},
  1258  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: -1}, errorOccured: true},
  1259  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: 5, MaxBytes: -1}, errorOccured: true},
  1260  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: 5, MaxBytes: 6}, errorOccured: false},
  1261  	}
  1262  	for _, test := range tests {
  1263  		err := test.config.Validate()
  1264  		if test.errorOccured && err == nil {
  1265  			t.Fail()
  1266  		}
  1267  		if !test.errorOccured && err != nil {
  1268  			t.Fail()
  1269  		}
  1270  	}
  1271  }
  1272  
  1273  func TestCommitLoopImmediateFlushOnGenerationEnd(t *testing.T) {
  1274  	t.Parallel()
  1275  	var committedOffset int64
  1276  	var commitCount int
  1277  	gen := &Generation{
  1278  		conn: mockCoordinator{
  1279  			offsetCommitFunc: func(r offsetCommitRequestV2) (offsetCommitResponseV2, error) {
  1280  				commitCount++
  1281  				committedOffset = r.Topics[0].Partitions[0].Offset
  1282  				return offsetCommitResponseV2{}, nil
  1283  			},
  1284  		},
  1285  		done:     make(chan struct{}),
  1286  		log:      func(func(Logger)) {},
  1287  		logError: func(func(Logger)) {},
  1288  		joined:   make(chan struct{}),
  1289  	}
  1290  
  1291  	// initialize commits so that the commitLoopImmediate select statement blocks
  1292  	r := &Reader{stctx: context.Background(), commits: make(chan commitRequest, 100)}
  1293  
  1294  	for i := 0; i < 100; i++ {
  1295  		cr := commitRequest{
  1296  			commits: []commit{{
  1297  				topic:     "topic",
  1298  				partition: 0,
  1299  				offset:    int64(i) + 1,
  1300  			}},
  1301  			errch: make(chan<- error, 1),
  1302  		}
  1303  		r.commits <- cr
  1304  	}
  1305  
  1306  	gen.Start(func(ctx context.Context) {
  1307  		r.commitLoopImmediate(ctx, gen)
  1308  	})
  1309  
  1310  	gen.close()
  1311  
  1312  	if committedOffset != 100 {
  1313  		t.Fatalf("expected commited offset to be 100 but got %d", committedOffset)
  1314  	}
  1315  
  1316  	if commitCount >= 100 {
  1317  		t.Fatalf("expected a single final commit on generation end got %d", commitCount)
  1318  	}
  1319  }
  1320  
  1321  func TestCommitOffsetsWithRetry(t *testing.T) {
  1322  	offsets := offsetStash{"topic": {0: 0}}
  1323  
  1324  	tests := map[string]struct {
  1325  		Fails       int
  1326  		Invocations int
  1327  		HasError    bool
  1328  	}{
  1329  		"happy path": {
  1330  			Invocations: 1,
  1331  		},
  1332  		"1 retry": {
  1333  			Fails:       1,
  1334  			Invocations: 2,
  1335  		},
  1336  		"out of retries": {
  1337  			Fails:       defaultCommitRetries + 1,
  1338  			Invocations: defaultCommitRetries,
  1339  			HasError:    true,
  1340  		},
  1341  	}
  1342  
  1343  	for label, test := range tests {
  1344  		t.Run(label, func(t *testing.T) {
  1345  			count := 0
  1346  			gen := &Generation{
  1347  				conn: mockCoordinator{
  1348  					offsetCommitFunc: func(offsetCommitRequestV2) (offsetCommitResponseV2, error) {
  1349  						count++
  1350  						if count <= test.Fails {
  1351  							return offsetCommitResponseV2{}, io.EOF
  1352  						}
  1353  						return offsetCommitResponseV2{}, nil
  1354  					},
  1355  				},
  1356  				done:     make(chan struct{}),
  1357  				log:      func(func(Logger)) {},
  1358  				logError: func(func(Logger)) {},
  1359  			}
  1360  
  1361  			r := &Reader{stctx: context.Background()}
  1362  			err := r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries)
  1363  			switch {
  1364  			case test.HasError && err == nil:
  1365  				t.Error("bad err: expected not nil; got nil")
  1366  			case !test.HasError && err != nil:
  1367  				t.Errorf("bad err: expected nil; got %v", err)
  1368  			}
  1369  		})
  1370  	}
  1371  }
  1372  
  1373  // Test that a reader won't continually rebalance when there are more consumers
  1374  // than partitions in a group.
  1375  // https://github.com/segmentio/kafka-go/issues/200
  1376  func TestRebalanceTooManyConsumers(t *testing.T) {
  1377  	ctx := context.Background()
  1378  	conf := ReaderConfig{
  1379  		Brokers: []string{"localhost:9092"},
  1380  		GroupID: makeGroupID(),
  1381  		Topic:   makeTopic(),
  1382  		MaxWait: time.Second,
  1383  	}
  1384  
  1385  	// Create the first reader and wait for it to become the leader.
  1386  	r1 := NewReader(conf)
  1387  	prepareReader(t, ctx, r1, makeTestSequence(1)...)
  1388  	r1.ReadMessage(ctx)
  1389  	// Clear the stats from the first rebalance.
  1390  	r1.Stats()
  1391  
  1392  	// Second reader should cause one rebalance for each r1 and r2.
  1393  	r2 := NewReader(conf)
  1394  
  1395  	// Wait for rebalances.
  1396  	time.Sleep(5 * time.Second)
  1397  
  1398  	// Before the fix, r2 would cause continuous rebalances,
  1399  	// as it tried to handshake() repeatedly.
  1400  	rebalances := r1.Stats().Rebalances + r2.Stats().Rebalances
  1401  	if rebalances > 2 {
  1402  		t.Errorf("unexpected rebalances to first reader, got %d", rebalances)
  1403  	}
  1404  }
  1405  
  1406  func TestConsumerGroupWithMissingTopic(t *testing.T) {
  1407  	t.Skip("this test doesn't work when the cluster is configured to auto-create topics")
  1408  
  1409  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
  1410  	defer cancel()
  1411  
  1412  	conf := ReaderConfig{
  1413  		Brokers:                []string{"localhost:9092"},
  1414  		GroupID:                makeGroupID(),
  1415  		Topic:                  makeTopic(),
  1416  		MaxWait:                time.Second,
  1417  		PartitionWatchInterval: 100 * time.Millisecond,
  1418  		WatchPartitionChanges:  true,
  1419  	}
  1420  
  1421  	r := NewReader(conf)
  1422  	defer r.Close()
  1423  
  1424  	recvErr := make(chan error, 1)
  1425  	go func() {
  1426  		_, err := r.ReadMessage(ctx)
  1427  		recvErr <- err
  1428  	}()
  1429  
  1430  	time.Sleep(time.Second)
  1431  	client, shutdown := newLocalClientWithTopic(conf.Topic, 1)
  1432  	defer shutdown()
  1433  
  1434  	w := &Writer{
  1435  		Addr:         TCP(r.config.Brokers...),
  1436  		Topic:        r.config.Topic,
  1437  		BatchTimeout: 10 * time.Millisecond,
  1438  		BatchSize:    1,
  1439  		Transport:    client.Transport,
  1440  	}
  1441  	defer w.Close()
  1442  	if err := w.WriteMessages(ctx, Message{}); err != nil {
  1443  		t.Fatalf("write error: %+v", err)
  1444  	}
  1445  
  1446  	if err := <-recvErr; err != nil {
  1447  		t.Fatalf("read error: %+v", err)
  1448  	}
  1449  
  1450  	nMsgs := r.Stats().Messages
  1451  	if nMsgs != 1 {
  1452  		t.Fatalf("expected to receive one message, but got %d", nMsgs)
  1453  	}
  1454  }
  1455  
  1456  func TestConsumerGroupWithTopic(t *testing.T) {
  1457  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  1458  	defer cancel()
  1459  
  1460  	conf := ReaderConfig{
  1461  		Brokers:                []string{"localhost:9092"},
  1462  		GroupID:                makeGroupID(),
  1463  		Topic:                  makeTopic(),
  1464  		MaxWait:                time.Second,
  1465  		PartitionWatchInterval: 100 * time.Millisecond,
  1466  		WatchPartitionChanges:  true,
  1467  		Logger:                 newTestKafkaLogger(t, "Reader:"),
  1468  	}
  1469  
  1470  	r := NewReader(conf)
  1471  	defer r.Close()
  1472  
  1473  	recvErr := make(chan error, len(conf.GroupTopics))
  1474  	go func() {
  1475  		msg, err := r.ReadMessage(ctx)
  1476  		t.Log(msg)
  1477  		recvErr <- err
  1478  	}()
  1479  
  1480  	time.Sleep(conf.MaxWait)
  1481  
  1482  	client, shutdown := newLocalClientWithTopic(conf.Topic, 1)
  1483  	defer shutdown()
  1484  
  1485  	w := &Writer{
  1486  		Addr:         TCP(r.config.Brokers...),
  1487  		Topic:        conf.Topic,
  1488  		BatchTimeout: 10 * time.Millisecond,
  1489  		BatchSize:    1,
  1490  		Transport:    client.Transport,
  1491  		Logger:       newTestKafkaLogger(t, "Writer:"),
  1492  	}
  1493  	defer w.Close()
  1494  	if err := w.WriteMessages(ctx, Message{Value: []byte(conf.Topic)}); err != nil {
  1495  		t.Fatalf("write error: %+v", err)
  1496  	}
  1497  
  1498  	if err := <-recvErr; err != nil {
  1499  		t.Fatalf("read error: %+v", err)
  1500  	}
  1501  
  1502  	nMsgs := r.Stats().Messages
  1503  	if nMsgs != 1 {
  1504  		t.Fatalf("expected to receive 1 message, but got %d", nMsgs)
  1505  	}
  1506  }
  1507  
  1508  func TestConsumerGroupWithGroupTopicsSingle(t *testing.T) {
  1509  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  1510  	defer cancel()
  1511  
  1512  	conf := ReaderConfig{
  1513  		Brokers:                []string{"localhost:9092"},
  1514  		GroupID:                makeGroupID(),
  1515  		GroupTopics:            []string{makeTopic()},
  1516  		MaxWait:                time.Second,
  1517  		PartitionWatchInterval: 100 * time.Millisecond,
  1518  		WatchPartitionChanges:  true,
  1519  		Logger:                 newTestKafkaLogger(t, "Reader:"),
  1520  	}
  1521  
  1522  	r := NewReader(conf)
  1523  	defer r.Close()
  1524  
  1525  	recvErr := make(chan error, len(conf.GroupTopics))
  1526  	go func() {
  1527  		msg, err := r.ReadMessage(ctx)
  1528  		t.Log(msg)
  1529  		recvErr <- err
  1530  	}()
  1531  
  1532  	time.Sleep(conf.MaxWait)
  1533  
  1534  	for i, topic := range conf.GroupTopics {
  1535  		client, shutdown := newLocalClientWithTopic(topic, 1)
  1536  		defer shutdown()
  1537  
  1538  		w := &Writer{
  1539  			Addr:         TCP(r.config.Brokers...),
  1540  			Topic:        topic,
  1541  			BatchTimeout: 10 * time.Millisecond,
  1542  			BatchSize:    1,
  1543  			Transport:    client.Transport,
  1544  			Logger:       newTestKafkaLogger(t, fmt.Sprintf("Writer(%d):", i)),
  1545  		}
  1546  		defer w.Close()
  1547  		if err := w.WriteMessages(ctx, Message{Value: []byte(topic)}); err != nil {
  1548  			t.Fatalf("write error: %+v", err)
  1549  		}
  1550  	}
  1551  
  1552  	if err := <-recvErr; err != nil {
  1553  		t.Fatalf("read error: %+v", err)
  1554  	}
  1555  
  1556  	nMsgs := r.Stats().Messages
  1557  	if nMsgs != int64(len(conf.GroupTopics)) {
  1558  		t.Fatalf("expected to receive %d messages, but got %d", len(conf.GroupTopics), nMsgs)
  1559  	}
  1560  }
  1561  
  1562  func TestConsumerGroupWithGroupTopicsMultple(t *testing.T) {
  1563  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  1564  	defer cancel()
  1565  
  1566  	client, shutdown := newLocalClient()
  1567  	defer shutdown()
  1568  
  1569  	conf := ReaderConfig{
  1570  		Brokers:                []string{"localhost:9092"},
  1571  		GroupID:                makeGroupID(),
  1572  		GroupTopics:            []string{makeTopic(), makeTopic()},
  1573  		MaxWait:                time.Second,
  1574  		PartitionWatchInterval: 100 * time.Millisecond,
  1575  		WatchPartitionChanges:  true,
  1576  		Logger:                 newTestKafkaLogger(t, "Reader:"),
  1577  	}
  1578  
  1579  	r := NewReader(conf)
  1580  
  1581  	w := &Writer{
  1582  		Addr:         TCP(r.config.Brokers...),
  1583  		BatchTimeout: 10 * time.Millisecond,
  1584  		BatchSize:    1,
  1585  		Transport:    client.Transport,
  1586  		Logger:       newTestKafkaLogger(t, "Writer:"),
  1587  	}
  1588  	defer w.Close()
  1589  
  1590  	time.Sleep(time.Second)
  1591  
  1592  	msgs := make([]Message, 0, len(conf.GroupTopics))
  1593  	for _, topic := range conf.GroupTopics {
  1594  		msgs = append(msgs, Message{Topic: topic})
  1595  	}
  1596  	if err := w.WriteMessages(ctx, msgs...); err != nil {
  1597  		t.Logf("write error: %+v", err)
  1598  	}
  1599  
  1600  	wg := new(sync.WaitGroup)
  1601  	wg.Add(len(msgs))
  1602  
  1603  	go func() {
  1604  		wg.Wait()
  1605  		t.Log("closing reader")
  1606  		r.Close()
  1607  	}()
  1608  
  1609  	for {
  1610  		msg, err := r.ReadMessage(ctx)
  1611  		if err != nil {
  1612  			if errors.Is(err, io.EOF) {
  1613  				t.Log("reader closed")
  1614  				break
  1615  			}
  1616  
  1617  			t.Fatalf("read error: %+v", err)
  1618  		} else {
  1619  			t.Logf("message read: %+v", msg)
  1620  			wg.Done()
  1621  		}
  1622  	}
  1623  
  1624  	nMsgs := r.Stats().Messages
  1625  	if nMsgs != int64(len(conf.GroupTopics)) {
  1626  		t.Fatalf("expected to receive %d messages, but got %d", len(conf.GroupTopics), nMsgs)
  1627  	}
  1628  }
  1629  
  1630  func getOffsets(t *testing.T, config ReaderConfig) map[int]int64 {
  1631  	// minimal config required to lookup coordinator
  1632  	cg := ConsumerGroup{
  1633  		config: ConsumerGroupConfig{
  1634  			ID:      config.GroupID,
  1635  			Brokers: config.Brokers,
  1636  			Dialer:  config.Dialer,
  1637  		},
  1638  	}
  1639  
  1640  	conn, err := cg.coordinator()
  1641  	if err != nil {
  1642  		t.Errorf("unable to connect to coordinator: %v", err)
  1643  	}
  1644  	defer conn.Close()
  1645  
  1646  	offsets, err := conn.offsetFetch(offsetFetchRequestV1{
  1647  		GroupID: config.GroupID,
  1648  		Topics: []offsetFetchRequestV1Topic{{
  1649  			Topic:      config.Topic,
  1650  			Partitions: []int32{0},
  1651  		}},
  1652  	})
  1653  	if err != nil {
  1654  		t.Errorf("bad fetchOffsets: %v", err)
  1655  	}
  1656  
  1657  	m := map[int]int64{}
  1658  
  1659  	for _, r := range offsets.Responses {
  1660  		if r.Topic == config.Topic {
  1661  			for _, p := range r.PartitionResponses {
  1662  				m[int(p.Partition)] = p.Offset
  1663  			}
  1664  		}
  1665  	}
  1666  
  1667  	return m
  1668  }
  1669  
  1670  const (
  1671  	connTO     = 1 * time.Second
  1672  	connTestTO = 2 * connTO
  1673  )
  1674  
  1675  func TestErrorCannotConnect(t *testing.T) {
  1676  	r := NewReader(ReaderConfig{
  1677  		Brokers:     []string{"localhost:9093"},
  1678  		Dialer:      &Dialer{Timeout: connTO},
  1679  		MaxAttempts: 1,
  1680  		Topic:       makeTopic(),
  1681  	})
  1682  	ctx, cancel := context.WithTimeout(context.Background(), connTestTO)
  1683  	defer cancel()
  1684  
  1685  	_, err := r.FetchMessage(ctx)
  1686  	if err == nil || ctx.Err() != nil {
  1687  		t.Errorf("Reader.FetchMessage must fail when it cannot " +
  1688  			"connect")
  1689  	}
  1690  }
  1691  
  1692  func TestErrorCannotConnectGroupSubscription(t *testing.T) {
  1693  	r := NewReader(ReaderConfig{
  1694  		Brokers:     []string{"localhost:9093"},
  1695  		Dialer:      &Dialer{Timeout: 1 * time.Second},
  1696  		GroupID:     "foobar",
  1697  		MaxAttempts: 1,
  1698  		Topic:       makeTopic(),
  1699  	})
  1700  	ctx, cancel := context.WithTimeout(context.Background(), connTestTO)
  1701  	defer cancel()
  1702  
  1703  	_, err := r.FetchMessage(ctx)
  1704  	if err == nil || ctx.Err() != nil {
  1705  		t.Errorf("Reader.FetchMessage with a group subscription " +
  1706  			"must fail when it cannot connect")
  1707  	}
  1708  }
  1709  
  1710  // Tests that the reader can handle messages where the response is truncated
  1711  // due to reaching MaxBytes.
  1712  //
  1713  // If MaxBytes is too small to fit 1 record then it will never truncate, so
  1714  // we start from a small message size and increase it until we are sure
  1715  // truncation has happened at some point.
  1716  func TestReaderTruncatedResponse(t *testing.T) {
  1717  	topic := makeTopic()
  1718  	createTopic(t, topic, 1)
  1719  	defer deleteTopic(t, topic)
  1720  
  1721  	readerMaxBytes := 100
  1722  	batchSize := 4
  1723  	maxMsgPadding := 5
  1724  	readContextTimeout := 10 * time.Second
  1725  
  1726  	var msgs []Message
  1727  	// The key of each message
  1728  	n := 0
  1729  	// `i` is the amount of padding per message
  1730  	for i := 0; i < maxMsgPadding; i++ {
  1731  		bb := bytes.Buffer{}
  1732  		for x := 0; x < i; x++ {
  1733  			_, err := bb.WriteRune('0')
  1734  			require.NoError(t, err)
  1735  		}
  1736  		padding := bb.Bytes()
  1737  		// `j` is the number of times the message repeats
  1738  		for j := 0; j < batchSize*4; j++ {
  1739  			msgs = append(msgs, Message{
  1740  				Key:   []byte(fmt.Sprintf("%05d", n)),
  1741  				Value: padding,
  1742  			})
  1743  			n++
  1744  		}
  1745  	}
  1746  
  1747  	wr := NewWriter(WriterConfig{
  1748  		Brokers:   []string{"localhost:9092"},
  1749  		BatchSize: batchSize,
  1750  		Async:     false,
  1751  		Topic:     topic,
  1752  		Balancer:  &LeastBytes{},
  1753  	})
  1754  	err := wr.WriteMessages(context.Background(), msgs...)
  1755  	require.NoError(t, err)
  1756  
  1757  	ctx, cancel := context.WithTimeout(context.Background(), readContextTimeout)
  1758  	defer cancel()
  1759  	r := NewReader(ReaderConfig{
  1760  		Brokers:  []string{"localhost:9092"},
  1761  		Topic:    topic,
  1762  		MinBytes: 1,
  1763  		MaxBytes: readerMaxBytes,
  1764  		// Speed up testing
  1765  		MaxWait: 100 * time.Millisecond,
  1766  	})
  1767  	defer r.Close()
  1768  
  1769  	expectedKeys := map[string]struct{}{}
  1770  	for _, k := range msgs {
  1771  		expectedKeys[string(k.Key)] = struct{}{}
  1772  	}
  1773  	keys := map[string]struct{}{}
  1774  	for {
  1775  		m, err := r.FetchMessage(ctx)
  1776  		require.NoError(t, err)
  1777  		keys[string(m.Key)] = struct{}{}
  1778  
  1779  		t.Logf("got key %s have %d keys expect %d\n", string(m.Key), len(keys), len(expectedKeys))
  1780  		if len(keys) == len(expectedKeys) {
  1781  			require.Equal(t, expectedKeys, keys)
  1782  			return
  1783  		}
  1784  	}
  1785  }
  1786  
  1787  // Tests that the reader can read record batches from log compacted topics
  1788  // where the batch ends with compacted records.
  1789  //
  1790  // This test forces varying sized chunks of duplicated messages along with
  1791  // configuring the topic with a minimal `segment.bytes` in order to
  1792  // guarantee that at least 1 batch can be compacted down to 0 "unread" messages
  1793  // with at least 1 "old" message otherwise the batch is skipped entirely.
  1794  func TestReaderReadCompactedMessage(t *testing.T) {
  1795  	topic := makeTopic()
  1796  	createTopicWithCompaction(t, topic, 1)
  1797  	defer deleteTopic(t, topic)
  1798  
  1799  	msgs := makeTestDuplicateSequence()
  1800  
  1801  	writeMessagesForCompactionCheck(t, topic, msgs)
  1802  
  1803  	expectedKeys := map[string]int{}
  1804  	for _, msg := range msgs {
  1805  		expectedKeys[string(msg.Key)] = 1
  1806  	}
  1807  
  1808  	// kafka 2.0.1 is extra slow
  1809  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
  1810  	defer cancel()
  1811  	for {
  1812  		success := func() bool {
  1813  			r := NewReader(ReaderConfig{
  1814  				Brokers:  []string{"localhost:9092"},
  1815  				Topic:    topic,
  1816  				MinBytes: 200,
  1817  				MaxBytes: 200,
  1818  				// Speed up testing
  1819  				MaxWait: 100 * time.Millisecond,
  1820  			})
  1821  			defer r.Close()
  1822  
  1823  			keys := map[string]int{}
  1824  			for {
  1825  				m, err := r.FetchMessage(ctx)
  1826  				if err != nil {
  1827  					t.Logf("can't get message from compacted log: %v", err)
  1828  					return false
  1829  				}
  1830  				keys[string(m.Key)]++
  1831  
  1832  				if len(keys) == countKeys(msgs) {
  1833  					t.Logf("got keys: %+v", keys)
  1834  					return reflect.DeepEqual(keys, expectedKeys)
  1835  				}
  1836  			}
  1837  		}()
  1838  		if success {
  1839  			return
  1840  		}
  1841  		select {
  1842  		case <-ctx.Done():
  1843  			t.Fatal(ctx.Err())
  1844  		default:
  1845  		}
  1846  	}
  1847  }
  1848  
  1849  // writeMessagesForCompactionCheck writes messages with specific writer configuration.
  1850  func writeMessagesForCompactionCheck(t *testing.T, topic string, msgs []Message) {
  1851  	t.Helper()
  1852  
  1853  	wr := NewWriter(WriterConfig{
  1854  		Brokers: []string{"localhost:9092"},
  1855  		// Batch size must be large enough to have multiple compacted records
  1856  		// for testing more edge cases.
  1857  		BatchSize: 3,
  1858  		Async:     false,
  1859  		Topic:     topic,
  1860  		Balancer:  &LeastBytes{},
  1861  	})
  1862  	err := wr.WriteMessages(context.Background(), msgs...)
  1863  	require.NoError(t, err)
  1864  }
  1865  
  1866  // makeTestDuplicateSequence creates messages for compacted log testing
  1867  //
  1868  // All keys and values are 4 characters long to tightly control how many
  1869  // messages are per log segment.
  1870  func makeTestDuplicateSequence() []Message {
  1871  	var msgs []Message
  1872  	// `n` is an increasing counter so it is never compacted.
  1873  	n := 0
  1874  	// `i` determines how many compacted records to create
  1875  	for i := 0; i < 5; i++ {
  1876  		// `j` is how many times the current pattern repeats. We repeat because
  1877  		// as long as we have a pattern that is slightly larger/smaller than
  1878  		// the log segment size then if we repeat enough it will eventually
  1879  		// try all configurations.
  1880  		for j := 0; j < 30; j++ {
  1881  			msgs = append(msgs, Message{
  1882  				Key:   []byte(fmt.Sprintf("%04d", n)),
  1883  				Value: []byte(fmt.Sprintf("%04d", n)),
  1884  			})
  1885  			n++
  1886  
  1887  			// This produces the duplicated messages to compact.
  1888  			for k := 0; k < i; k++ {
  1889  				msgs = append(msgs, Message{
  1890  					Key:   []byte("dup_"),
  1891  					Value: []byte("dup_"),
  1892  				})
  1893  			}
  1894  		}
  1895  	}
  1896  
  1897  	// "end markers" to force duplicate message outside of the last segment of
  1898  	// the log so that they can all be compacted.
  1899  	for i := 0; i < 10; i++ {
  1900  		msgs = append(msgs, Message{
  1901  			Key:   []byte(fmt.Sprintf("e-%02d", i)),
  1902  			Value: []byte(fmt.Sprintf("e-%02d", i)),
  1903  		})
  1904  	}
  1905  	return msgs
  1906  }
  1907  
  1908  // countKeys counts unique keys from given Message slice.
  1909  func countKeys(msgs []Message) int {
  1910  	m := make(map[string]struct{})
  1911  	for _, msg := range msgs {
  1912  		m[string(msg.Key)] = struct{}{}
  1913  	}
  1914  	return len(m)
  1915  }
  1916  
  1917  func createTopicWithCompaction(t *testing.T, topic string, partitions int) {
  1918  	t.Helper()
  1919  
  1920  	t.Logf("createTopic(%s, %d)", topic, partitions)
  1921  
  1922  	conn, err := Dial("tcp", "localhost:9092")
  1923  	require.NoError(t, err)
  1924  	defer conn.Close()
  1925  
  1926  	controller, err := conn.Controller()
  1927  	require.NoError(t, err)
  1928  
  1929  	conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
  1930  	require.NoError(t, err)
  1931  
  1932  	conn.SetDeadline(time.Now().Add(10 * time.Second))
  1933  
  1934  	err = conn.CreateTopics(TopicConfig{
  1935  		Topic:             topic,
  1936  		NumPartitions:     partitions,
  1937  		ReplicationFactor: 1,
  1938  		ConfigEntries: []ConfigEntry{
  1939  			{
  1940  				ConfigName:  "cleanup.policy",
  1941  				ConfigValue: "compact",
  1942  			},
  1943  			{
  1944  				ConfigName:  "segment.bytes",
  1945  				ConfigValue: "200",
  1946  			},
  1947  		},
  1948  	})
  1949  	if err != nil {
  1950  		if !errors.Is(err, TopicAlreadyExists) {
  1951  			require.NoError(t, err)
  1952  		}
  1953  	}
  1954  
  1955  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
  1956  	defer cancel()
  1957  	waitForTopic(ctx, t, topic)
  1958  }
  1959  
  1960  // The current behavior of the Reader is to retry OffsetOutOfRange errors
  1961  // indefinitely, which results in programs hanging in the event of a topic being
  1962  // re-created while a consumer is running. To retain backwards-compatibility,
  1963  // ReaderConfig.OffsetOutOfRangeError is being used to instruct the Reader to
  1964  // return an error in this case instead, allowing callers to react.
  1965  func testReaderTopicRecreated(t *testing.T, ctx context.Context, r *Reader) {
  1966  	r.config.OffsetOutOfRangeError = true
  1967  
  1968  	topic := r.config.Topic
  1969  
  1970  	// add 1 message to the topic
  1971  	prepareReader(t, ctx, r, makeTestSequence(1)...)
  1972  
  1973  	// consume the message (moving the offset from 0 -> 1)
  1974  	_, err := r.ReadMessage(ctx)
  1975  	require.NoError(t, err)
  1976  
  1977  	// destroy the topic, then recreate it so the offset now becomes 0
  1978  	deleteTopic(t, topic)
  1979  	createTopic(t, topic, 1)
  1980  
  1981  	// expect an error, since the offset should now be out of range
  1982  	_, err = r.ReadMessage(ctx)
  1983  	require.ErrorIs(t, err, OffsetOutOfRange)
  1984  }