github.com/rbisecke/kafka-go@v0.4.27/reader_test.go (about)

     1  package kafka
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io"
     7  	"math/rand"
     8  	"net"
     9  	"os"
    10  	"reflect"
    11  	"strconv"
    12  	"sync"
    13  	"testing"
    14  	"time"
    15  )
    16  
    17  func TestReader(t *testing.T) {
    18  	tests := []struct {
    19  		scenario string
    20  		function func(*testing.T, context.Context, *Reader)
    21  	}{
    22  		{
    23  			scenario: "calling Read with a context that has been canceled returns an error",
    24  			function: testReaderReadCanceled,
    25  		},
    26  
    27  		{
    28  			scenario: "all messages of the stream are returned when calling ReadMessage repeatedly",
    29  			function: testReaderReadMessages,
    30  		},
    31  
    32  		{
    33  			scenario: "test special offsets -1 and -2",
    34  			function: testReaderSetSpecialOffsets,
    35  		},
    36  
    37  		{
    38  			scenario: "setting the offset to random values returns the expected messages when Read is called",
    39  			function: testReaderSetRandomOffset,
    40  		},
    41  
    42  		{
    43  			scenario: "setting the offset by TimeStamp",
    44  			function: testReaderSetOffsetAt,
    45  		},
    46  
    47  		{
    48  			scenario: "calling Lag returns the lag of the last message read from kafka",
    49  			function: testReaderLag,
    50  		},
    51  
    52  		{
    53  			scenario: "calling ReadLag returns the current lag of a reader",
    54  			function: testReaderReadLag,
    55  		},
    56  
    57  		{ // https://github.com/rbisecke/kafka-go/issues/30
    58  			scenario: "reading from an out-of-range offset waits until the context is cancelled",
    59  			function: testReaderOutOfRangeGetsCanceled,
    60  		},
    61  	}
    62  
    63  	for _, test := range tests {
    64  		testFunc := test.function
    65  		t.Run(test.scenario, func(t *testing.T) {
    66  			t.Parallel()
    67  
    68  			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
    69  			defer cancel()
    70  
    71  			r := NewReader(ReaderConfig{
    72  				Brokers:  []string{"localhost:9092"},
    73  				Topic:    makeTopic(),
    74  				MinBytes: 1,
    75  				MaxBytes: 10e6,
    76  				MaxWait:  100 * time.Millisecond,
    77  			})
    78  			defer r.Close()
    79  			testFunc(t, ctx, r)
    80  		})
    81  	}
    82  }
    83  
    84  func testReaderReadCanceled(t *testing.T, ctx context.Context, r *Reader) {
    85  	ctx, cancel := context.WithCancel(ctx)
    86  	cancel()
    87  
    88  	if _, err := r.ReadMessage(ctx); err != context.Canceled {
    89  		t.Error(err)
    90  	}
    91  }
    92  
    93  func testReaderReadMessages(t *testing.T, ctx context.Context, r *Reader) {
    94  	const N = 1000
    95  	prepareReader(t, ctx, r, makeTestSequence(N)...)
    96  
    97  	var offset int64
    98  
    99  	for i := 0; i != N; i++ {
   100  		m, err := r.ReadMessage(ctx)
   101  		if err != nil {
   102  			t.Error("reading message at offset", offset, "failed:", err)
   103  			return
   104  		}
   105  		offset = m.Offset + 1
   106  		v, _ := strconv.Atoi(string(m.Value))
   107  		if v != i {
   108  			t.Error("message at index", i, "has wrong value:", v)
   109  			return
   110  		}
   111  	}
   112  }
   113  
   114  func testReaderSetSpecialOffsets(t *testing.T, ctx context.Context, r *Reader) {
   115  	prepareReader(t, ctx, r, Message{Value: []byte("first")})
   116  	prepareReader(t, ctx, r, makeTestSequence(3)...)
   117  
   118  	go func() {
   119  		time.Sleep(1 * time.Second)
   120  		prepareReader(t, ctx, r, Message{Value: []byte("last")})
   121  	}()
   122  
   123  	for _, test := range []struct {
   124  		off, final int64
   125  		want       string
   126  	}{
   127  		{FirstOffset, 1, "first"},
   128  		{LastOffset, 5, "last"},
   129  	} {
   130  		offset := test.off
   131  		if err := r.SetOffset(offset); err != nil {
   132  			t.Error("setting offset", offset, "failed:", err)
   133  		}
   134  		m, err := r.ReadMessage(ctx)
   135  		if err != nil {
   136  			t.Error("reading at offset", offset, "failed:", err)
   137  		}
   138  		if string(m.Value) != test.want {
   139  			t.Error("message at offset", offset, "has wrong value:", string(m.Value))
   140  		}
   141  		if off := r.Offset(); off != test.final {
   142  			t.Errorf("bad final offset: got %d, want %d", off, test.final)
   143  		}
   144  	}
   145  }
   146  
   147  func testReaderSetRandomOffset(t *testing.T, ctx context.Context, r *Reader) {
   148  	const N = 10
   149  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   150  
   151  	for i := 0; i != 2*N; i++ {
   152  		offset := rand.Intn(N)
   153  		r.SetOffset(int64(offset))
   154  		m, err := r.ReadMessage(ctx)
   155  		if err != nil {
   156  			t.Error("seeking to offset", offset, "failed:", err)
   157  			return
   158  		}
   159  		v, _ := strconv.Atoi(string(m.Value))
   160  		if v != offset {
   161  			t.Error("message at offset", offset, "has wrong value:", v)
   162  			return
   163  		}
   164  	}
   165  }
   166  
   167  func testReaderSetOffsetAt(t *testing.T, ctx context.Context, r *Reader) {
   168  	// We make 2 batches of messages here with a brief 2 second pause
   169  	// to ensure messages 0...9 will be written a few seconds before messages 10...19
   170  	// We'll then fetch the timestamp for message offset 10 and use that timestamp to set
   171  	// our reader
   172  	const N = 10
   173  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   174  	time.Sleep(time.Second * 2)
   175  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   176  
   177  	var ts time.Time
   178  	for i := 0; i < N*2; i++ {
   179  		m, err := r.ReadMessage(ctx)
   180  		if err != nil {
   181  			t.Error("error reading message", err)
   182  		}
   183  		// grab the time for the 10th message
   184  		if i == 10 {
   185  			ts = m.Time
   186  		}
   187  	}
   188  
   189  	err := r.SetOffsetAt(ctx, ts)
   190  	if err != nil {
   191  		t.Fatal("error setting offset by timestamp", err)
   192  	}
   193  
   194  	m, err := r.ReadMessage(context.Background())
   195  	if err != nil {
   196  		t.Fatal("error reading message", err)
   197  	}
   198  
   199  	if m.Offset != 10 {
   200  		t.Errorf("expected offset of 10, received offset %d", m.Offset)
   201  	}
   202  }
   203  
   204  func testReaderLag(t *testing.T, ctx context.Context, r *Reader) {
   205  	const N = 5
   206  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   207  
   208  	if lag := r.Lag(); lag != 0 {
   209  		t.Errorf("the initial lag value is %d but was expected to be 0", lag)
   210  	}
   211  
   212  	for i := 0; i != N; i++ {
   213  		r.ReadMessage(ctx)
   214  		expect := int64(N - (i + 1))
   215  
   216  		if lag := r.Lag(); lag != expect {
   217  			t.Errorf("the lag value at offset %d is %d but was expected to be %d", i, lag, expect)
   218  		}
   219  	}
   220  }
   221  
   222  func testReaderReadLag(t *testing.T, ctx context.Context, r *Reader) {
   223  	const N = 5
   224  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   225  
   226  	if lag, err := r.ReadLag(ctx); err != nil {
   227  		t.Error(err)
   228  	} else if lag != N {
   229  		t.Errorf("the initial lag value is %d but was expected to be %d", lag, N)
   230  	}
   231  
   232  	for i := 0; i != N; i++ {
   233  		r.ReadMessage(ctx)
   234  		expect := int64(N - (i + 1))
   235  
   236  		if lag, err := r.ReadLag(ctx); err != nil {
   237  			t.Error(err)
   238  		} else if lag != expect {
   239  			t.Errorf("the lag value at offset %d is %d but was expected to be %d", i, lag, expect)
   240  		}
   241  	}
   242  }
   243  
   244  func testReaderOutOfRangeGetsCanceled(t *testing.T, ctx context.Context, r *Reader) {
   245  	prepareReader(t, ctx, r, makeTestSequence(10)...)
   246  
   247  	const D = 100 * time.Millisecond
   248  	t0 := time.Now()
   249  
   250  	ctx, cancel := context.WithTimeout(ctx, D)
   251  	defer cancel()
   252  
   253  	if err := r.SetOffset(42); err != nil {
   254  		t.Error(err)
   255  	}
   256  
   257  	_, err := r.ReadMessage(ctx)
   258  	if err != context.DeadlineExceeded {
   259  		t.Error("bad error:", err)
   260  	}
   261  
   262  	t1 := time.Now()
   263  
   264  	if d := t1.Sub(t0); d < D {
   265  		t.Error("ReadMessage returned too early after", d)
   266  	}
   267  }
   268  
   269  func createTopic(t *testing.T, topic string, partitions int) {
   270  	t.Helper()
   271  
   272  	t.Logf("createTopic(%s, %d)", topic, partitions)
   273  
   274  	conn, err := Dial("tcp", "localhost:9092")
   275  	if err != nil {
   276  		err = fmt.Errorf("createTopic, Dial: %w", err)
   277  		t.Fatal(err)
   278  	}
   279  	defer conn.Close()
   280  
   281  	controller, err := conn.Controller()
   282  	if err != nil {
   283  		err = fmt.Errorf("createTopic, conn.Controller: %w", err)
   284  		t.Fatal(err)
   285  	}
   286  
   287  	conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
   288  	if err != nil {
   289  		t.Fatal(err)
   290  	}
   291  
   292  	conn.SetDeadline(time.Now().Add(10 * time.Second))
   293  
   294  	_, err = conn.createTopics(createTopicsRequestV0{
   295  		Topics: []createTopicsRequestV0Topic{
   296  			{
   297  				Topic:             topic,
   298  				NumPartitions:     int32(partitions),
   299  				ReplicationFactor: 1,
   300  			},
   301  		},
   302  		Timeout: milliseconds(time.Second),
   303  	})
   304  	switch err {
   305  	case nil:
   306  		// ok
   307  	case TopicAlreadyExists:
   308  		// ok
   309  	default:
   310  		err = fmt.Errorf("creaetTopic, conn.createtTopics: %w", err)
   311  		t.Error(err)
   312  		t.FailNow()
   313  	}
   314  
   315  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
   316  	defer cancel()
   317  
   318  	waitForTopic(ctx, t, topic)
   319  }
   320  
   321  // Block until topic exists
   322  func waitForTopic(ctx context.Context, t *testing.T, topic string) {
   323  	t.Helper()
   324  
   325  	for {
   326  		select {
   327  		case <-ctx.Done():
   328  			t.Fatalf("reached deadline before verifying topic existence")
   329  		default:
   330  		}
   331  
   332  		cli := &Client{
   333  			Addr:    TCP("localhost:9092"),
   334  			Timeout: 5 * time.Second,
   335  		}
   336  
   337  		response, err := cli.Metadata(ctx, &MetadataRequest{
   338  			Addr:   cli.Addr,
   339  			Topics: []string{topic},
   340  		})
   341  		if err != nil {
   342  			t.Fatalf("waitForTopic: error listing topics: %s", err.Error())
   343  		}
   344  
   345  		// Find a topic which has at least 1 partition in the metadata response
   346  		for _, top := range response.Topics {
   347  			if top.Name != topic {
   348  				continue
   349  			}
   350  
   351  			numPartitions := len(top.Partitions)
   352  			t.Logf("waitForTopic: found topic %q with %d partitions",
   353  				topic, numPartitions)
   354  
   355  			if numPartitions > 0 {
   356  				return
   357  			}
   358  		}
   359  
   360  		t.Logf("retrying after 1s")
   361  		time.Sleep(time.Second)
   362  		continue
   363  	}
   364  }
   365  
   366  func deleteTopic(t *testing.T, topic ...string) {
   367  	t.Helper()
   368  	conn, err := Dial("tcp", "localhost:9092")
   369  	if err != nil {
   370  		t.Fatal(err)
   371  	}
   372  	defer conn.Close()
   373  
   374  	controller, err := conn.Controller()
   375  	if err != nil {
   376  		t.Fatal(err)
   377  	}
   378  
   379  	conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
   380  	if err != nil {
   381  		t.Fatal(err)
   382  	}
   383  
   384  	conn.SetDeadline(time.Now().Add(10 * time.Second))
   385  
   386  	if err := conn.DeleteTopics(topic...); err != nil {
   387  		t.Fatal(err)
   388  	}
   389  }
   390  
   391  func TestReaderOnNonZeroPartition(t *testing.T) {
   392  	tests := []struct {
   393  		scenario string
   394  		function func(*testing.T, context.Context, *Reader)
   395  	}{
   396  		{
   397  			scenario: "topic and partition should now be included in header",
   398  			function: testReaderSetsTopicAndPartition,
   399  		},
   400  	}
   401  
   402  	for _, test := range tests {
   403  		testFunc := test.function
   404  		t.Run(test.scenario, func(t *testing.T) {
   405  			t.Parallel()
   406  
   407  			topic := makeTopic()
   408  			createTopic(t, topic, 2)
   409  			defer deleteTopic(t, topic)
   410  
   411  			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   412  			defer cancel()
   413  
   414  			r := NewReader(ReaderConfig{
   415  				Brokers:   []string{"localhost:9092"},
   416  				Topic:     topic,
   417  				Partition: 1,
   418  				MinBytes:  1,
   419  				MaxBytes:  10e6,
   420  				MaxWait:   100 * time.Millisecond,
   421  			})
   422  			defer r.Close()
   423  			testFunc(t, ctx, r)
   424  		})
   425  	}
   426  }
   427  
   428  func testReaderSetsTopicAndPartition(t *testing.T, ctx context.Context, r *Reader) {
   429  	const N = 3
   430  	prepareReader(t, ctx, r, makeTestSequence(N)...)
   431  
   432  	for i := 0; i != N; i++ {
   433  		m, err := r.ReadMessage(ctx)
   434  		if err != nil {
   435  			t.Error("reading message failed:", err)
   436  			return
   437  		}
   438  
   439  		if m.Topic == "" {
   440  			t.Error("expected topic to be set")
   441  			return
   442  		}
   443  		if m.Topic != r.config.Topic {
   444  			t.Errorf("expected message to contain topic, %v; got %v", r.config.Topic, m.Topic)
   445  			return
   446  		}
   447  		if m.Partition != r.config.Partition {
   448  			t.Errorf("expected partition to be set; expected 1, got %v", m.Partition)
   449  			return
   450  		}
   451  	}
   452  }
   453  
   454  // TestReadTruncatedMessages uses a configuration designed to get the Broker to
   455  // return truncated messages.  It exercises the case where an earlier bug caused
   456  // reading to time out by attempting to read beyond the current response.  This
   457  // test is not perfect, but it is pretty reliable about reproducing the issue.
   458  //
   459  // NOTE : it currently only succeeds against kafka 0.10.1.0, so it will be
   460  // skipped.  It's here so that it can be manually run.
   461  func TestReadTruncatedMessages(t *testing.T) {
   462  	// todo : it would be great to get it to work against 0.11.0.0 so we could
   463  	//        include it in CI unit tests.
   464  	t.Skip()
   465  
   466  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   467  	defer cancel()
   468  	r := NewReader(ReaderConfig{
   469  		Brokers:  []string{"localhost:9092"},
   470  		Topic:    makeTopic(),
   471  		MinBytes: 1,
   472  		MaxBytes: 100,
   473  		MaxWait:  100 * time.Millisecond,
   474  	})
   475  	defer r.Close()
   476  	n := 500
   477  	prepareReader(t, ctx, r, makeTestSequence(n)...)
   478  	for i := 0; i < n; i++ {
   479  		if _, err := r.ReadMessage(ctx); err != nil {
   480  			t.Fatal(err)
   481  		}
   482  	}
   483  }
   484  
   485  func makeTestSequence(n int) []Message {
   486  	base := time.Now()
   487  	msgs := make([]Message, n)
   488  	for i := 0; i != n; i++ {
   489  		msgs[i] = Message{
   490  			Time:  base.Add(time.Duration(i) * time.Millisecond).Truncate(time.Millisecond),
   491  			Value: []byte(strconv.Itoa(i)),
   492  		}
   493  	}
   494  	return msgs
   495  }
   496  
   497  func prepareReader(t *testing.T, ctx context.Context, r *Reader, msgs ...Message) {
   498  	var config = r.Config()
   499  	var conn *Conn
   500  	var err error
   501  
   502  	for {
   503  		if conn, err = DialLeader(ctx, "tcp", "localhost:9092", config.Topic, config.Partition); err == nil {
   504  			break
   505  		}
   506  		select {
   507  		case <-time.After(time.Second):
   508  		case <-ctx.Done():
   509  			t.Fatal(ctx.Err())
   510  		}
   511  	}
   512  
   513  	defer conn.Close()
   514  
   515  	if _, err := conn.WriteMessages(msgs...); err != nil {
   516  		t.Fatal(err)
   517  	}
   518  }
   519  
   520  var (
   521  	benchmarkReaderOnce    sync.Once
   522  	benchmarkReaderTopic   = makeTopic()
   523  	benchmarkReaderPayload = make([]byte, 2*1024)
   524  )
   525  
   526  func BenchmarkReader(b *testing.B) {
   527  	const broker = "localhost:9092"
   528  	ctx := context.Background()
   529  
   530  	benchmarkReaderOnce.Do(func() {
   531  		conn, err := DialLeader(ctx, "tcp", broker, benchmarkReaderTopic, 0)
   532  		if err != nil {
   533  			b.Fatal(err)
   534  		}
   535  		defer conn.Close()
   536  
   537  		msgs := make([]Message, 1000)
   538  		for i := range msgs {
   539  			msgs[i].Value = benchmarkReaderPayload
   540  		}
   541  
   542  		for i := 0; i != 10; i++ { // put 10K messages
   543  			if _, err := conn.WriteMessages(msgs...); err != nil {
   544  				b.Fatal(err)
   545  			}
   546  		}
   547  
   548  		b.ResetTimer()
   549  	})
   550  
   551  	r := NewReader(ReaderConfig{
   552  		Brokers:   []string{broker},
   553  		Topic:     benchmarkReaderTopic,
   554  		Partition: 0,
   555  		MinBytes:  1e3,
   556  		MaxBytes:  1e6,
   557  		MaxWait:   100 * time.Millisecond,
   558  	})
   559  
   560  	for i := 0; i < b.N; i++ {
   561  		if (i % 10000) == 0 {
   562  			r.SetOffset(-1)
   563  		}
   564  		_, err := r.ReadMessage(ctx)
   565  		if err != nil {
   566  			b.Fatal(err)
   567  		}
   568  	}
   569  
   570  	r.Close()
   571  	b.SetBytes(int64(len(benchmarkReaderPayload)))
   572  }
   573  
   574  func TestCloseLeavesGroup(t *testing.T) {
   575  	if os.Getenv("KAFKA_VERSION") == "2.3.1" {
   576  		// There's a bug in 2.3.1 that causes the MemberMetadata to be in the wrong format and thus
   577  		// leads to an error when decoding the DescribeGroupsResponse.
   578  		//
   579  		// See https://issues.apache.org/jira/browse/KAFKA-9150 for details.
   580  		t.Skip("Skipping because kafka version is 2.3.1")
   581  	}
   582  
   583  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   584  	defer cancel()
   585  
   586  	topic := makeTopic()
   587  	createTopic(t, topic, 1)
   588  	defer deleteTopic(t, topic)
   589  
   590  	groupID := makeGroupID()
   591  	r := NewReader(ReaderConfig{
   592  		Brokers:          []string{"localhost:9092"},
   593  		Topic:            topic,
   594  		GroupID:          groupID,
   595  		MinBytes:         1,
   596  		MaxBytes:         10e6,
   597  		MaxWait:          100 * time.Millisecond,
   598  		RebalanceTimeout: time.Second,
   599  	})
   600  	prepareReader(t, ctx, r, Message{Value: []byte("test")})
   601  
   602  	conn, err := Dial("tcp", r.config.Brokers[0])
   603  	if err != nil {
   604  		t.Fatalf("error dialing: %v", err)
   605  	}
   606  	defer conn.Close()
   607  
   608  	client, shutdown := newLocalClient()
   609  	defer shutdown()
   610  
   611  	descGroups := func() DescribeGroupsResponse {
   612  		resp, err := client.DescribeGroups(
   613  			ctx,
   614  			&DescribeGroupsRequest{
   615  				GroupIDs: []string{groupID},
   616  			},
   617  		)
   618  		if err != nil {
   619  			t.Fatalf("error from describeGroups %v", err)
   620  		}
   621  		return *resp
   622  	}
   623  
   624  	_, err = r.ReadMessage(ctx)
   625  	if err != nil {
   626  		t.Fatalf("our reader never joind its group or couldn't read a message: %v", err)
   627  	}
   628  	resp := descGroups()
   629  	if len(resp.Groups) != 1 {
   630  		t.Fatalf("expected 1 group. got: %d", len(resp.Groups))
   631  	}
   632  	if len(resp.Groups[0].Members) != 1 {
   633  		t.Fatalf("expected group membership size of %d, but got %d", 1, len(resp.Groups[0].Members))
   634  	}
   635  
   636  	err = r.Close()
   637  	if err != nil {
   638  		t.Fatalf("unexpected error closing reader: %s", err.Error())
   639  	}
   640  	resp = descGroups()
   641  	if len(resp.Groups) != 1 {
   642  		t.Fatalf("expected 1 group. got: %d", len(resp.Groups))
   643  	}
   644  	if len(resp.Groups[0].Members) != 0 {
   645  		t.Fatalf("expected group membership size of %d, but got %d", 0, len(resp.Groups[0].Members))
   646  	}
   647  }
   648  
   649  func testConsumerGroupImmediateClose(t *testing.T, ctx context.Context, r *Reader) {
   650  	if err := r.Close(); err != nil {
   651  		t.Fatalf("bad err: %v", err)
   652  	}
   653  }
   654  
   655  func testConsumerGroupSimple(t *testing.T, ctx context.Context, r *Reader) {
   656  	if err := r.Close(); err != nil {
   657  		t.Fatalf("bad err: %v", err)
   658  	}
   659  }
   660  
   661  func TestReaderSetOffsetWhenConsumerGroupsEnabled(t *testing.T) {
   662  	r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
   663  	if err := r.SetOffset(LastOffset); err != errNotAvailableWithGroup {
   664  		t.Fatalf("expected %v; got %v", errNotAvailableWithGroup, err)
   665  	}
   666  }
   667  
   668  func TestReaderOffsetWhenConsumerGroupsEnabled(t *testing.T) {
   669  	r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
   670  	if offset := r.Offset(); offset != -1 {
   671  		t.Fatalf("expected -1; got %v", offset)
   672  	}
   673  }
   674  
   675  func TestReaderLagWhenConsumerGroupsEnabled(t *testing.T) {
   676  	r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
   677  	if offset := r.Lag(); offset != -1 {
   678  		t.Fatalf("expected -1; got %v", offset)
   679  	}
   680  }
   681  
   682  func TestReaderReadLagReturnsZeroLagWhenConsumerGroupsEnabled(t *testing.T) {
   683  	r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
   684  	lag, err := r.ReadLag(context.Background())
   685  
   686  	if err != errNotAvailableWithGroup {
   687  		t.Fatalf("expected %v; got %v", errNotAvailableWithGroup, err)
   688  	}
   689  
   690  	if lag != 0 {
   691  		t.Fatalf("expected 0; got %d", lag)
   692  	}
   693  }
   694  
   695  func TestReaderPartitionWhenConsumerGroupsEnabled(t *testing.T) {
   696  	invoke := func() (boom bool) {
   697  		defer func() {
   698  			if r := recover(); r != nil {
   699  				boom = true
   700  			}
   701  		}()
   702  
   703  		NewReader(ReaderConfig{
   704  			GroupID:   "set",
   705  			Partition: 1,
   706  		})
   707  		return false
   708  	}
   709  
   710  	if !invoke() {
   711  		t.Fatalf("expected panic; but NewReader worked?!")
   712  	}
   713  
   714  }
   715  
   716  func TestExtractTopics(t *testing.T) {
   717  	testCases := map[string]struct {
   718  		Members []GroupMember
   719  		Topics  []string
   720  	}{
   721  		"nil": {},
   722  		"single member, single topic": {
   723  			Members: []GroupMember{
   724  				{
   725  					ID:     "a",
   726  					Topics: []string{"topic"},
   727  				},
   728  			},
   729  			Topics: []string{"topic"},
   730  		},
   731  		"two members, single topic": {
   732  			Members: []GroupMember{
   733  				{
   734  					ID:     "a",
   735  					Topics: []string{"topic"},
   736  				},
   737  				{
   738  					ID:     "b",
   739  					Topics: []string{"topic"},
   740  				},
   741  			},
   742  			Topics: []string{"topic"},
   743  		},
   744  		"two members, two topics": {
   745  			Members: []GroupMember{
   746  				{
   747  					ID:     "a",
   748  					Topics: []string{"topic-1"},
   749  				},
   750  				{
   751  					ID:     "b",
   752  					Topics: []string{"topic-2"},
   753  				},
   754  			},
   755  			Topics: []string{"topic-1", "topic-2"},
   756  		},
   757  		"three members, three shared topics": {
   758  			Members: []GroupMember{
   759  				{
   760  					ID:     "a",
   761  					Topics: []string{"topic-1", "topic-2"},
   762  				},
   763  				{
   764  					ID:     "b",
   765  					Topics: []string{"topic-2", "topic-3"},
   766  				},
   767  				{
   768  					ID:     "c",
   769  					Topics: []string{"topic-3", "topic-1"},
   770  				},
   771  			},
   772  			Topics: []string{"topic-1", "topic-2", "topic-3"},
   773  		},
   774  	}
   775  
   776  	for label, tc := range testCases {
   777  		t.Run(label, func(t *testing.T) {
   778  			topics := extractTopics(tc.Members)
   779  			if !reflect.DeepEqual(tc.Topics, topics) {
   780  				t.Errorf("expected %v; got %v", tc.Topics, topics)
   781  			}
   782  		})
   783  	}
   784  }
   785  
   786  func TestReaderConsumerGroup(t *testing.T) {
   787  	tests := []struct {
   788  		scenario       string
   789  		partitions     int
   790  		commitInterval time.Duration
   791  		function       func(*testing.T, context.Context, *Reader)
   792  	}{
   793  		{
   794  			scenario:   "basic handshake",
   795  			partitions: 1,
   796  			function:   testReaderConsumerGroupHandshake,
   797  		},
   798  		{
   799  			scenario:   "verify offset committed",
   800  			partitions: 1,
   801  			function:   testReaderConsumerGroupVerifyOffsetCommitted,
   802  		},
   803  
   804  		{
   805  			scenario:       "verify offset committed when using interval committer",
   806  			partitions:     1,
   807  			commitInterval: 400 * time.Millisecond,
   808  			function:       testReaderConsumerGroupVerifyPeriodicOffsetCommitter,
   809  		},
   810  
   811  		{
   812  			scenario:   "rebalance across many partitions and consumers",
   813  			partitions: 8,
   814  			function:   testReaderConsumerGroupRebalanceAcrossManyPartitionsAndConsumers,
   815  		},
   816  
   817  		{
   818  			scenario:   "consumer group commits on close",
   819  			partitions: 3,
   820  			function:   testReaderConsumerGroupVerifyCommitsOnClose,
   821  		},
   822  
   823  		{
   824  			scenario:   "consumer group rebalance",
   825  			partitions: 3,
   826  			function:   testReaderConsumerGroupRebalance,
   827  		},
   828  
   829  		{
   830  			scenario:   "consumer group rebalance across topics",
   831  			partitions: 3,
   832  			function:   testReaderConsumerGroupRebalanceAcrossTopics,
   833  		},
   834  
   835  		{
   836  			scenario:   "consumer group reads content across partitions",
   837  			partitions: 3,
   838  			function:   testReaderConsumerGroupReadContentAcrossPartitions,
   839  		},
   840  
   841  		{
   842  			scenario:   "Close immediately after NewReader",
   843  			partitions: 1,
   844  			function:   testConsumerGroupImmediateClose,
   845  		},
   846  
   847  		{
   848  			scenario:   "Close immediately after NewReader",
   849  			partitions: 1,
   850  			function:   testConsumerGroupSimple,
   851  		},
   852  	}
   853  
   854  	for _, test := range tests {
   855  		t.Run(test.scenario, func(t *testing.T) {
   856  			// It appears that some of the tests depend on all these tests being
   857  			// run concurrently to pass... this is brittle and should be fixed
   858  			// at some point.
   859  			t.Parallel()
   860  
   861  			topic := makeTopic()
   862  			createTopic(t, topic, test.partitions)
   863  			defer deleteTopic(t, topic)
   864  
   865  			groupID := makeGroupID()
   866  			r := NewReader(ReaderConfig{
   867  				Brokers:           []string{"localhost:9092"},
   868  				Topic:             topic,
   869  				GroupID:           groupID,
   870  				HeartbeatInterval: 2 * time.Second,
   871  				CommitInterval:    test.commitInterval,
   872  				RebalanceTimeout:  2 * time.Second,
   873  				RetentionTime:     time.Hour,
   874  				MinBytes:          1,
   875  				MaxBytes:          1e6,
   876  			})
   877  			defer r.Close()
   878  
   879  			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   880  			defer cancel()
   881  
   882  			test.function(t, ctx, r)
   883  		})
   884  	}
   885  }
   886  
   887  func testReaderConsumerGroupHandshake(t *testing.T, ctx context.Context, r *Reader) {
   888  	prepareReader(t, context.Background(), r, makeTestSequence(5)...)
   889  
   890  	m, err := r.ReadMessage(ctx)
   891  	if err != nil {
   892  		t.Errorf("bad err: %v", err)
   893  	}
   894  	if m.Topic != r.config.Topic {
   895  		t.Errorf("topic not set")
   896  	}
   897  	if m.Offset != 0 {
   898  		t.Errorf("offset not set")
   899  	}
   900  
   901  	m, err = r.ReadMessage(ctx)
   902  	if err != nil {
   903  		t.Errorf("bad err: %v", err)
   904  	}
   905  	if m.Topic != r.config.Topic {
   906  		t.Errorf("topic not set")
   907  	}
   908  	if m.Offset != 1 {
   909  		t.Errorf("offset not set")
   910  	}
   911  }
   912  
   913  func testReaderConsumerGroupVerifyOffsetCommitted(t *testing.T, ctx context.Context, r *Reader) {
   914  	prepareReader(t, context.Background(), r, makeTestSequence(3)...)
   915  
   916  	if _, err := r.FetchMessage(ctx); err != nil {
   917  		t.Errorf("bad err: %v", err) // skip the first message
   918  	}
   919  
   920  	m, err := r.FetchMessage(ctx)
   921  	if err != nil {
   922  		t.Errorf("bad err: %v", err)
   923  	}
   924  
   925  	if err := r.CommitMessages(ctx, m); err != nil {
   926  		t.Errorf("bad commit message: %v", err)
   927  	}
   928  
   929  	offsets := getOffsets(t, r.config)
   930  	if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
   931  		t.Errorf("expected %v; got %v", expected, offsets)
   932  	}
   933  }
   934  
   935  func testReaderConsumerGroupVerifyPeriodicOffsetCommitter(t *testing.T, ctx context.Context, r *Reader) {
   936  	prepareReader(t, context.Background(), r, makeTestSequence(3)...)
   937  
   938  	if _, err := r.FetchMessage(ctx); err != nil {
   939  		t.Errorf("bad err: %v", err) // skip the first message
   940  	}
   941  
   942  	m, err := r.FetchMessage(ctx)
   943  	if err != nil {
   944  		t.Errorf("bad err: %v", err)
   945  	}
   946  
   947  	started := time.Now()
   948  	if err := r.CommitMessages(ctx, m); err != nil {
   949  		t.Errorf("bad commit message: %v", err)
   950  	}
   951  	if elapsed := time.Now().Sub(started); elapsed > 10*time.Millisecond {
   952  		t.Errorf("background commits should happen nearly instantly")
   953  	}
   954  
   955  	// wait for committer to pick up the commits
   956  	time.Sleep(r.config.CommitInterval * 3)
   957  
   958  	offsets := getOffsets(t, r.config)
   959  	if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
   960  		t.Errorf("expected %v; got %v", expected, offsets)
   961  	}
   962  }
   963  
   964  func testReaderConsumerGroupVerifyCommitsOnClose(t *testing.T, ctx context.Context, r *Reader) {
   965  	prepareReader(t, context.Background(), r, makeTestSequence(3)...)
   966  
   967  	if _, err := r.FetchMessage(ctx); err != nil {
   968  		t.Errorf("bad err: %v", err) // skip the first message
   969  	}
   970  
   971  	m, err := r.FetchMessage(ctx)
   972  	if err != nil {
   973  		t.Errorf("bad err: %v", err)
   974  	}
   975  
   976  	if err := r.CommitMessages(ctx, m); err != nil {
   977  		t.Errorf("bad commit message: %v", err)
   978  	}
   979  
   980  	if err := r.Close(); err != nil {
   981  		t.Errorf("bad Close: %v", err)
   982  	}
   983  
   984  	r2 := NewReader(r.config)
   985  	defer r2.Close()
   986  
   987  	offsets := getOffsets(t, r2.config)
   988  	if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
   989  		t.Errorf("expected %v; got %v", expected, offsets)
   990  	}
   991  }
   992  
   993  func testReaderConsumerGroupReadContentAcrossPartitions(t *testing.T, ctx context.Context, r *Reader) {
   994  	const N = 12
   995  
   996  	client, shutdown := newLocalClient()
   997  	defer shutdown()
   998  
   999  	writer := &Writer{
  1000  		Addr:      TCP(r.config.Brokers...),
  1001  		Topic:     r.config.Topic,
  1002  		Balancer:  &RoundRobin{},
  1003  		BatchSize: 1,
  1004  		Transport: client.Transport,
  1005  	}
  1006  	if err := writer.WriteMessages(ctx, makeTestSequence(N)...); err != nil {
  1007  		t.Fatalf("bad write messages: %v", err)
  1008  	}
  1009  	if err := writer.Close(); err != nil {
  1010  		t.Fatalf("bad write err: %v", err)
  1011  	}
  1012  
  1013  	partitions := map[int]struct{}{}
  1014  	for i := 0; i < N; i++ {
  1015  		m, err := r.FetchMessage(ctx)
  1016  		if err != nil {
  1017  			t.Errorf("bad error: %s", err)
  1018  		}
  1019  		partitions[m.Partition] = struct{}{}
  1020  	}
  1021  
  1022  	if v := len(partitions); v != 3 {
  1023  		t.Errorf("expected messages across 3 partitions; got messages across %v partitions", v)
  1024  	}
  1025  }
  1026  
  1027  func testReaderConsumerGroupRebalance(t *testing.T, ctx context.Context, r *Reader) {
  1028  	r2 := NewReader(r.config)
  1029  	defer r.Close()
  1030  
  1031  	const (
  1032  		N          = 12
  1033  		partitions = 2
  1034  	)
  1035  
  1036  	client, shutdown := newLocalClient()
  1037  	defer shutdown()
  1038  
  1039  	// rebalance should result in 12 message in each of the partitions
  1040  	writer := &Writer{
  1041  		Addr:      TCP(r.config.Brokers...),
  1042  		Topic:     r.config.Topic,
  1043  		Balancer:  &RoundRobin{},
  1044  		BatchSize: 1,
  1045  		Transport: client.Transport,
  1046  	}
  1047  	if err := writer.WriteMessages(ctx, makeTestSequence(N*partitions)...); err != nil {
  1048  		t.Fatalf("bad write messages: %v", err)
  1049  	}
  1050  	if err := writer.Close(); err != nil {
  1051  		t.Fatalf("bad write err: %v", err)
  1052  	}
  1053  
  1054  	// after rebalance, each reader should have a partition to itself
  1055  	for i := 0; i < N; i++ {
  1056  		if _, err := r2.FetchMessage(ctx); err != nil {
  1057  			t.Errorf("expect to read from reader 2")
  1058  		}
  1059  		if _, err := r.FetchMessage(ctx); err != nil {
  1060  			t.Errorf("expect to read from reader 1")
  1061  		}
  1062  	}
  1063  }
  1064  
  1065  func testReaderConsumerGroupRebalanceAcrossTopics(t *testing.T, ctx context.Context, r *Reader) {
  1066  	// create a second reader that shares the groupID, but reads from a different topic
  1067  	client, topic2, shutdown := newLocalClientAndTopic()
  1068  	defer shutdown()
  1069  
  1070  	r2 := NewReader(ReaderConfig{
  1071  		Brokers:           r.config.Brokers,
  1072  		Topic:             topic2,
  1073  		GroupID:           r.config.GroupID,
  1074  		HeartbeatInterval: r.config.HeartbeatInterval,
  1075  		SessionTimeout:    r.config.SessionTimeout,
  1076  		RetentionTime:     r.config.RetentionTime,
  1077  		MinBytes:          r.config.MinBytes,
  1078  		MaxBytes:          r.config.MaxBytes,
  1079  		Logger:            r.config.Logger,
  1080  	})
  1081  	defer r.Close()
  1082  	prepareReader(t, ctx, r2, makeTestSequence(1)...)
  1083  
  1084  	const (
  1085  		N = 12
  1086  	)
  1087  
  1088  	// write messages across both partitions
  1089  	writer := &Writer{
  1090  		Addr:      TCP(r.config.Brokers...),
  1091  		Topic:     r.config.Topic,
  1092  		Balancer:  &RoundRobin{},
  1093  		BatchSize: 1,
  1094  		Transport: client.Transport,
  1095  	}
  1096  	if err := writer.WriteMessages(ctx, makeTestSequence(N)...); err != nil {
  1097  		t.Fatalf("bad write messages: %v", err)
  1098  	}
  1099  	if err := writer.Close(); err != nil {
  1100  		t.Fatalf("bad write err: %v", err)
  1101  	}
  1102  
  1103  	// after rebalance, r2 should read topic2 and r1 should read ALL of the original topic
  1104  	if _, err := r2.FetchMessage(ctx); err != nil {
  1105  		t.Errorf("expect to read from reader 2")
  1106  	}
  1107  
  1108  	// all N messages on the original topic should be read by the original reader
  1109  	for i := 0; i < N; i++ {
  1110  		if _, err := r.FetchMessage(ctx); err != nil {
  1111  			t.Errorf("expect to read from reader 1")
  1112  		}
  1113  	}
  1114  }
  1115  
  1116  func testReaderConsumerGroupRebalanceAcrossManyPartitionsAndConsumers(t *testing.T, ctx context.Context, r *Reader) {
  1117  	// I've rebalanced up to 100 servers, but the rebalance can take upwards
  1118  	// of a minute and that seems too long for unit tests.  Also, setting this
  1119  	// to a larger number seems to make the kafka broker unresponsive.
  1120  	// TODO research if there's a way to reduce rebalance time across many partitions
  1121  	// svls: the described behavior is due to the thundering herd of readers
  1122  	//       hitting the rebalance timeout.  introducing the 100ms sleep in the
  1123  	//       loop below in order to give time for the sync group to finish has
  1124  	//       greatly helped, though we still hit the timeout from time to time.
  1125  	const N = 8
  1126  
  1127  	var readers []*Reader
  1128  
  1129  	for i := 0; i < N-1; i++ {
  1130  		reader := NewReader(r.config)
  1131  		readers = append(readers, reader)
  1132  		time.Sleep(100 * time.Millisecond)
  1133  	}
  1134  	defer func() {
  1135  		for _, r := range readers {
  1136  			r.Close()
  1137  			time.Sleep(100 * time.Millisecond)
  1138  		}
  1139  	}()
  1140  
  1141  	client, shutdown := newLocalClient()
  1142  	defer shutdown()
  1143  
  1144  	// write messages across both partitions
  1145  	writer := &Writer{
  1146  		Addr:      TCP(r.config.Brokers...),
  1147  		Topic:     r.config.Topic,
  1148  		Balancer:  &RoundRobin{},
  1149  		BatchSize: 1,
  1150  		Transport: client.Transport,
  1151  	}
  1152  	if err := writer.WriteMessages(ctx, makeTestSequence(N*3)...); err != nil {
  1153  		t.Fatalf("bad write messages: %v", err)
  1154  	}
  1155  	if err := writer.Close(); err != nil {
  1156  		t.Fatalf("bad write err: %v", err)
  1157  	}
  1158  
  1159  	// all N messages on the original topic should be read by the original reader
  1160  	for i := 0; i < N-1; i++ {
  1161  		if _, err := readers[i].FetchMessage(ctx); err != nil {
  1162  			t.Errorf("reader %v expected to read 1 message", i)
  1163  		}
  1164  	}
  1165  
  1166  	if _, err := r.FetchMessage(ctx); err != nil {
  1167  		t.Errorf("expect to read from original reader")
  1168  	}
  1169  }
  1170  
  1171  func TestOffsetStash(t *testing.T) {
  1172  	const topic = "topic"
  1173  
  1174  	newMessage := func(partition int, offset int64) Message {
  1175  		return Message{
  1176  			Topic:     topic,
  1177  			Partition: partition,
  1178  			Offset:    offset,
  1179  		}
  1180  	}
  1181  
  1182  	tests := map[string]struct {
  1183  		Given    offsetStash
  1184  		Messages []Message
  1185  		Expected offsetStash
  1186  	}{
  1187  		"nil": {},
  1188  		"empty given, single message": {
  1189  			Given:    offsetStash{},
  1190  			Messages: []Message{newMessage(0, 0)},
  1191  			Expected: offsetStash{
  1192  				topic: {0: 1},
  1193  			},
  1194  		},
  1195  		"ignores earlier offsets": {
  1196  			Given: offsetStash{
  1197  				topic: {0: 2},
  1198  			},
  1199  			Messages: []Message{newMessage(0, 0)},
  1200  			Expected: offsetStash{
  1201  				topic: {0: 2},
  1202  			},
  1203  		},
  1204  		"uses latest offset": {
  1205  			Given: offsetStash{},
  1206  			Messages: []Message{
  1207  				newMessage(0, 2),
  1208  				newMessage(0, 3),
  1209  				newMessage(0, 1),
  1210  			},
  1211  			Expected: offsetStash{
  1212  				topic: {0: 4},
  1213  			},
  1214  		},
  1215  		"uses latest offset, across multiple topics": {
  1216  			Given: offsetStash{},
  1217  			Messages: []Message{
  1218  				newMessage(0, 2),
  1219  				newMessage(0, 3),
  1220  				newMessage(0, 1),
  1221  				newMessage(1, 5),
  1222  				newMessage(1, 6),
  1223  			},
  1224  			Expected: offsetStash{
  1225  				topic: {
  1226  					0: 4,
  1227  					1: 7,
  1228  				},
  1229  			},
  1230  		},
  1231  	}
  1232  
  1233  	for label, test := range tests {
  1234  		t.Run(label, func(t *testing.T) {
  1235  			test.Given.merge(makeCommits(test.Messages...))
  1236  			if !reflect.DeepEqual(test.Expected, test.Given) {
  1237  				t.Errorf("expected %v; got %v", test.Expected, test.Given)
  1238  			}
  1239  		})
  1240  	}
  1241  }
  1242  
  1243  type mockOffsetCommitter struct {
  1244  	invocations int
  1245  	failCount   int
  1246  	err         error
  1247  }
  1248  
  1249  func (m *mockOffsetCommitter) offsetCommit(request offsetCommitRequestV2) (offsetCommitResponseV2, error) {
  1250  	m.invocations++
  1251  
  1252  	if m.failCount > 0 {
  1253  		m.failCount--
  1254  		return offsetCommitResponseV2{}, io.EOF
  1255  	}
  1256  
  1257  	return offsetCommitResponseV2{}, nil
  1258  }
  1259  
  1260  func TestValidateReader(t *testing.T) {
  1261  	tests := []struct {
  1262  		config       ReaderConfig
  1263  		errorOccured bool
  1264  	}{
  1265  		{config: ReaderConfig{}, errorOccured: true},
  1266  		{config: ReaderConfig{Brokers: []string{"broker1"}}, errorOccured: true},
  1267  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1"}, errorOccured: false},
  1268  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: -1}, errorOccured: true},
  1269  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: -1}, errorOccured: true},
  1270  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: 5, MaxBytes: -1}, errorOccured: true},
  1271  		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: 5, MaxBytes: 6}, errorOccured: false},
  1272  	}
  1273  	for _, test := range tests {
  1274  		err := test.config.Validate()
  1275  		if test.errorOccured && err == nil {
  1276  			t.Fail()
  1277  		}
  1278  		if !test.errorOccured && err != nil {
  1279  			t.Fail()
  1280  		}
  1281  	}
  1282  }
  1283  
  1284  func TestCommitOffsetsWithRetry(t *testing.T) {
  1285  	offsets := offsetStash{"topic": {0: 0}}
  1286  
  1287  	tests := map[string]struct {
  1288  		Fails       int
  1289  		Invocations int
  1290  		HasError    bool
  1291  	}{
  1292  		"happy path": {
  1293  			Invocations: 1,
  1294  		},
  1295  		"1 retry": {
  1296  			Fails:       1,
  1297  			Invocations: 2,
  1298  		},
  1299  		"out of retries": {
  1300  			Fails:       defaultCommitRetries + 1,
  1301  			Invocations: defaultCommitRetries,
  1302  			HasError:    true,
  1303  		},
  1304  	}
  1305  
  1306  	for label, test := range tests {
  1307  		t.Run(label, func(t *testing.T) {
  1308  			count := 0
  1309  			gen := &Generation{
  1310  				conn: mockCoordinator{
  1311  					offsetCommitFunc: func(offsetCommitRequestV2) (offsetCommitResponseV2, error) {
  1312  						count++
  1313  						if count <= test.Fails {
  1314  							return offsetCommitResponseV2{}, io.EOF
  1315  						}
  1316  						return offsetCommitResponseV2{}, nil
  1317  					},
  1318  				},
  1319  				done:     make(chan struct{}),
  1320  				log:      func(func(Logger)) {},
  1321  				logError: func(func(Logger)) {},
  1322  			}
  1323  
  1324  			r := &Reader{stctx: context.Background()}
  1325  			err := r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries)
  1326  			switch {
  1327  			case test.HasError && err == nil:
  1328  				t.Error("bad err: expected not nil; got nil")
  1329  			case !test.HasError && err != nil:
  1330  				t.Errorf("bad err: expected nil; got %v", err)
  1331  			}
  1332  		})
  1333  	}
  1334  }
  1335  
  1336  // Test that a reader won't continually rebalance when there are more consumers
  1337  // than partitions in a group.
  1338  // https://github.com/rbisecke/kafka-go/issues/200
  1339  func TestRebalanceTooManyConsumers(t *testing.T) {
  1340  	ctx := context.Background()
  1341  	conf := ReaderConfig{
  1342  		Brokers: []string{"localhost:9092"},
  1343  		GroupID: makeGroupID(),
  1344  		Topic:   makeTopic(),
  1345  		MaxWait: time.Second,
  1346  	}
  1347  
  1348  	// Create the first reader and wait for it to become the leader.
  1349  	r1 := NewReader(conf)
  1350  	prepareReader(t, ctx, r1, makeTestSequence(1)...)
  1351  	r1.ReadMessage(ctx)
  1352  	// Clear the stats from the first rebalance.
  1353  	r1.Stats()
  1354  
  1355  	// Second reader should cause one rebalance for each r1 and r2.
  1356  	r2 := NewReader(conf)
  1357  
  1358  	// Wait for rebalances.
  1359  	time.Sleep(5 * time.Second)
  1360  
  1361  	// Before the fix, r2 would cause continuous rebalances,
  1362  	// as it tried to handshake() repeatedly.
  1363  	rebalances := r1.Stats().Rebalances + r2.Stats().Rebalances
  1364  	if rebalances > 2 {
  1365  		t.Errorf("unexpected rebalances to first reader, got %d", rebalances)
  1366  	}
  1367  }
  1368  
  1369  func TestConsumerGroupWithMissingTopic(t *testing.T) {
  1370  	t.Skip("this test doesn't work when the cluster is configured to auto-create topics")
  1371  
  1372  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
  1373  	defer cancel()
  1374  
  1375  	conf := ReaderConfig{
  1376  		Brokers:                []string{"localhost:9092"},
  1377  		GroupID:                makeGroupID(),
  1378  		Topic:                  makeTopic(),
  1379  		MaxWait:                time.Second,
  1380  		PartitionWatchInterval: 100 * time.Millisecond,
  1381  		WatchPartitionChanges:  true,
  1382  	}
  1383  
  1384  	r := NewReader(conf)
  1385  	defer r.Close()
  1386  
  1387  	recvErr := make(chan error, 1)
  1388  	go func() {
  1389  		_, err := r.ReadMessage(ctx)
  1390  		recvErr <- err
  1391  	}()
  1392  
  1393  	time.Sleep(time.Second)
  1394  	client, shutdown := newLocalClientWithTopic(conf.Topic, 1)
  1395  	defer shutdown()
  1396  
  1397  	w := &Writer{
  1398  		Addr:         TCP(r.config.Brokers...),
  1399  		Topic:        r.config.Topic,
  1400  		BatchTimeout: 10 * time.Millisecond,
  1401  		BatchSize:    1,
  1402  		Transport:    client.Transport,
  1403  	}
  1404  	defer w.Close()
  1405  	if err := w.WriteMessages(ctx, Message{}); err != nil {
  1406  		t.Fatalf("write error: %+v", err)
  1407  	}
  1408  
  1409  	if err := <-recvErr; err != nil {
  1410  		t.Fatalf("read error: %+v", err)
  1411  	}
  1412  
  1413  	nMsgs := r.Stats().Messages
  1414  	if nMsgs != 1 {
  1415  		t.Fatalf("expected to receive one message, but got %d", nMsgs)
  1416  	}
  1417  }
  1418  
  1419  func TestConsumerGroupWithTopic(t *testing.T) {
  1420  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  1421  	defer cancel()
  1422  
  1423  	conf := ReaderConfig{
  1424  		Brokers:                []string{"localhost:9092"},
  1425  		GroupID:                makeGroupID(),
  1426  		Topic:                  makeTopic(),
  1427  		MaxWait:                time.Second,
  1428  		PartitionWatchInterval: 100 * time.Millisecond,
  1429  		WatchPartitionChanges:  true,
  1430  		Logger:                 newTestKafkaLogger(t, "Reader:"),
  1431  	}
  1432  
  1433  	r := NewReader(conf)
  1434  	defer r.Close()
  1435  
  1436  	recvErr := make(chan error, len(conf.GroupTopics))
  1437  	go func() {
  1438  		msg, err := r.ReadMessage(ctx)
  1439  		t.Log(msg)
  1440  		recvErr <- err
  1441  	}()
  1442  
  1443  	time.Sleep(conf.MaxWait)
  1444  
  1445  	client, shutdown := newLocalClientWithTopic(conf.Topic, 1)
  1446  	defer shutdown()
  1447  
  1448  	w := &Writer{
  1449  		Addr:         TCP(r.config.Brokers...),
  1450  		Topic:        conf.Topic,
  1451  		BatchTimeout: 10 * time.Millisecond,
  1452  		BatchSize:    1,
  1453  		Transport:    client.Transport,
  1454  		Logger:       newTestKafkaLogger(t, "Writer:"),
  1455  	}
  1456  	defer w.Close()
  1457  	if err := w.WriteMessages(ctx, Message{Value: []byte(conf.Topic)}); err != nil {
  1458  		t.Fatalf("write error: %+v", err)
  1459  	}
  1460  
  1461  	if err := <-recvErr; err != nil {
  1462  		t.Fatalf("read error: %+v", err)
  1463  	}
  1464  
  1465  	nMsgs := r.Stats().Messages
  1466  	if nMsgs != 1 {
  1467  		t.Fatalf("expected to receive 1 message, but got %d", nMsgs)
  1468  	}
  1469  }
  1470  
  1471  func TestConsumerGroupWithGroupTopicsSingle(t *testing.T) {
  1472  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  1473  	defer cancel()
  1474  
  1475  	conf := ReaderConfig{
  1476  		Brokers:                []string{"localhost:9092"},
  1477  		GroupID:                makeGroupID(),
  1478  		GroupTopics:            []string{makeTopic()},
  1479  		MaxWait:                time.Second,
  1480  		PartitionWatchInterval: 100 * time.Millisecond,
  1481  		WatchPartitionChanges:  true,
  1482  		Logger:                 newTestKafkaLogger(t, "Reader:"),
  1483  	}
  1484  
  1485  	r := NewReader(conf)
  1486  	defer r.Close()
  1487  
  1488  	recvErr := make(chan error, len(conf.GroupTopics))
  1489  	go func() {
  1490  		msg, err := r.ReadMessage(ctx)
  1491  		t.Log(msg)
  1492  		recvErr <- err
  1493  	}()
  1494  
  1495  	time.Sleep(conf.MaxWait)
  1496  
  1497  	for i, topic := range conf.GroupTopics {
  1498  		client, shutdown := newLocalClientWithTopic(topic, 1)
  1499  		defer shutdown()
  1500  
  1501  		w := &Writer{
  1502  			Addr:         TCP(r.config.Brokers...),
  1503  			Topic:        topic,
  1504  			BatchTimeout: 10 * time.Millisecond,
  1505  			BatchSize:    1,
  1506  			Transport:    client.Transport,
  1507  			Logger:       newTestKafkaLogger(t, fmt.Sprintf("Writer(%d):", i)),
  1508  		}
  1509  		defer w.Close()
  1510  		if err := w.WriteMessages(ctx, Message{Value: []byte(topic)}); err != nil {
  1511  			t.Fatalf("write error: %+v", err)
  1512  		}
  1513  	}
  1514  
  1515  	if err := <-recvErr; err != nil {
  1516  		t.Fatalf("read error: %+v", err)
  1517  	}
  1518  
  1519  	nMsgs := r.Stats().Messages
  1520  	if nMsgs != int64(len(conf.GroupTopics)) {
  1521  		t.Fatalf("expected to receive %d messages, but got %d", len(conf.GroupTopics), nMsgs)
  1522  	}
  1523  }
  1524  
  1525  func TestConsumerGroupWithGroupTopicsMultple(t *testing.T) {
  1526  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  1527  	defer cancel()
  1528  
  1529  	client, shutdown := newLocalClient()
  1530  	defer shutdown()
  1531  
  1532  	conf := ReaderConfig{
  1533  		Brokers:                []string{"localhost:9092"},
  1534  		GroupID:                makeGroupID(),
  1535  		GroupTopics:            []string{makeTopic(), makeTopic()},
  1536  		MaxWait:                time.Second,
  1537  		PartitionWatchInterval: 100 * time.Millisecond,
  1538  		WatchPartitionChanges:  true,
  1539  		Logger:                 newTestKafkaLogger(t, "Reader:"),
  1540  	}
  1541  
  1542  	r := NewReader(conf)
  1543  
  1544  	w := &Writer{
  1545  		Addr:         TCP(r.config.Brokers...),
  1546  		BatchTimeout: 10 * time.Millisecond,
  1547  		BatchSize:    1,
  1548  		Transport:    client.Transport,
  1549  		Logger:       newTestKafkaLogger(t, "Writer:"),
  1550  	}
  1551  	defer w.Close()
  1552  
  1553  	time.Sleep(time.Second)
  1554  
  1555  	var msgs []Message
  1556  	for _, topic := range conf.GroupTopics {
  1557  		msgs = append(msgs, Message{Topic: topic})
  1558  	}
  1559  	if err := w.WriteMessages(ctx, msgs...); err != nil {
  1560  		t.Logf("write error: %+v", err)
  1561  	}
  1562  
  1563  	wg := new(sync.WaitGroup)
  1564  	wg.Add(len(msgs))
  1565  
  1566  	go func() {
  1567  		wg.Wait()
  1568  		t.Log("closing reader")
  1569  		r.Close()
  1570  	}()
  1571  
  1572  	for {
  1573  		msg, err := r.ReadMessage(ctx)
  1574  		if err != nil {
  1575  			if err == io.EOF {
  1576  				t.Log("reader closed")
  1577  				break
  1578  			}
  1579  
  1580  			t.Fatalf("read error: %+v", err)
  1581  		} else {
  1582  			t.Logf("message read: %+v", msg)
  1583  			wg.Done()
  1584  		}
  1585  	}
  1586  
  1587  	nMsgs := r.Stats().Messages
  1588  	if nMsgs != int64(len(conf.GroupTopics)) {
  1589  		t.Fatalf("expected to receive %d messages, but got %d", len(conf.GroupTopics), nMsgs)
  1590  	}
  1591  }
  1592  
  1593  func getOffsets(t *testing.T, config ReaderConfig) map[int]int64 {
  1594  	// minimal config required to lookup coordinator
  1595  	cg := ConsumerGroup{
  1596  		config: ConsumerGroupConfig{
  1597  			ID:      config.GroupID,
  1598  			Brokers: config.Brokers,
  1599  			Dialer:  config.Dialer,
  1600  		},
  1601  	}
  1602  
  1603  	conn, err := cg.coordinator()
  1604  	if err != nil {
  1605  		t.Errorf("unable to connect to coordinator: %v", err)
  1606  	}
  1607  	defer conn.Close()
  1608  
  1609  	offsets, err := conn.offsetFetch(offsetFetchRequestV1{
  1610  		GroupID: config.GroupID,
  1611  		Topics: []offsetFetchRequestV1Topic{{
  1612  			Topic:      config.Topic,
  1613  			Partitions: []int32{0},
  1614  		}},
  1615  	})
  1616  	if err != nil {
  1617  		t.Errorf("bad fetchOffsets: %v", err)
  1618  	}
  1619  
  1620  	m := map[int]int64{}
  1621  
  1622  	for _, r := range offsets.Responses {
  1623  		if r.Topic == config.Topic {
  1624  			for _, p := range r.PartitionResponses {
  1625  				m[int(p.Partition)] = p.Offset
  1626  			}
  1627  		}
  1628  	}
  1629  
  1630  	return m
  1631  }
  1632  
  1633  const (
  1634  	connTO     = 1 * time.Second
  1635  	connTestTO = 2 * connTO
  1636  )
  1637  
  1638  func TestErrorCannotConnect(t *testing.T) {
  1639  	r := NewReader(ReaderConfig{
  1640  		Brokers:     []string{"localhost:9093"},
  1641  		Dialer:      &Dialer{Timeout: connTO},
  1642  		MaxAttempts: 1,
  1643  		Topic:       makeTopic(),
  1644  	})
  1645  	ctx, cancel := context.WithTimeout(context.Background(), connTestTO)
  1646  	defer cancel()
  1647  
  1648  	_, err := r.FetchMessage(ctx)
  1649  	if err == nil || ctx.Err() != nil {
  1650  		t.Errorf("Reader.FetchMessage must fail when it cannot " +
  1651  			"connect")
  1652  	}
  1653  }
  1654  
  1655  func TestErrorCannotConnectGroupSubscription(t *testing.T) {
  1656  	r := NewReader(ReaderConfig{
  1657  		Brokers:     []string{"localhost:9093"},
  1658  		Dialer:      &Dialer{Timeout: 1 * time.Second},
  1659  		GroupID:     "foobar",
  1660  		MaxAttempts: 1,
  1661  		Topic:       makeTopic(),
  1662  	})
  1663  	ctx, cancel := context.WithTimeout(context.Background(), connTestTO)
  1664  	defer cancel()
  1665  
  1666  	_, err := r.FetchMessage(ctx)
  1667  	if err == nil || ctx.Err() != nil {
  1668  		t.Errorf("Reader.FetchMessage with a group subscription " +
  1669  			"must fail when it cannot connect")
  1670  	}
  1671  }