github.com/hoveychen/kafka-go@v0.4.42/client_test.go (about)

     1  package kafka
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"io"
     8  	"math/rand"
     9  	"net"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/hoveychen/kafka-go/compress"
    14  	ktesting "github.com/hoveychen/kafka-go/testing"
    15  )
    16  
    17  func newLocalClientAndTopic() (*Client, string, func()) {
    18  	topic := makeTopic()
    19  	client, shutdown := newLocalClientWithTopic(topic, 1)
    20  	return client, topic, shutdown
    21  }
    22  
    23  func newLocalClientWithTopic(topic string, partitions int) (*Client, func()) {
    24  	client, shutdown := newLocalClient()
    25  	if err := clientCreateTopic(client, topic, partitions); err != nil {
    26  		shutdown()
    27  		panic(err)
    28  	}
    29  	return client, func() {
    30  		client.DeleteTopics(context.Background(), &DeleteTopicsRequest{
    31  			Topics: []string{topic},
    32  		})
    33  		shutdown()
    34  	}
    35  }
    36  
    37  func clientCreateTopic(client *Client, topic string, partitions int) error {
    38  	_, err := client.CreateTopics(context.Background(), &CreateTopicsRequest{
    39  		Topics: []TopicConfig{{
    40  			Topic:             topic,
    41  			NumPartitions:     partitions,
    42  			ReplicationFactor: 1,
    43  		}},
    44  	})
    45  	if err != nil {
    46  		return err
    47  	}
    48  
    49  	// Topic creation seems to be asynchronous. Metadata for the topic partition
    50  	// layout in the cluster is available in the controller before being synced
    51  	// with the other brokers, which causes "Error:[3] Unknown Topic Or Partition"
    52  	// when sending requests to the partition leaders.
    53  	//
    54  	// This loop will wait up to 2 seconds polling the cluster until no errors
    55  	// are returned.
    56  	for i := 0; i < 20; i++ {
    57  		r, err := client.Fetch(context.Background(), &FetchRequest{
    58  			Topic:     topic,
    59  			Partition: 0,
    60  			Offset:    0,
    61  		})
    62  		if err == nil && r.Error == nil {
    63  			break
    64  		}
    65  		time.Sleep(100 * time.Millisecond)
    66  	}
    67  
    68  	return nil
    69  }
    70  
    71  func clientEndTxn(client *Client, req *EndTxnRequest) error {
    72  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
    73  	defer cancel()
    74  	resp, err := client.EndTxn(ctx, req)
    75  	if err != nil {
    76  		return err
    77  	}
    78  
    79  	return resp.Error
    80  }
    81  
    82  func newLocalClient() (*Client, func()) {
    83  	return newClient(TCP("localhost"))
    84  }
    85  
    86  func newClient(addr net.Addr) (*Client, func()) {
    87  	conns := &ktesting.ConnWaitGroup{
    88  		DialFunc: (&net.Dialer{}).DialContext,
    89  	}
    90  
    91  	transport := &Transport{
    92  		Dial:     conns.Dial,
    93  		Resolver: NewBrokerResolver(nil),
    94  	}
    95  
    96  	client := &Client{
    97  		Addr:      addr,
    98  		Timeout:   5 * time.Second,
    99  		Transport: transport,
   100  	}
   101  
   102  	return client, func() { transport.CloseIdleConnections(); conns.Wait() }
   103  }
   104  
   105  func TestClient(t *testing.T) {
   106  	tests := []struct {
   107  		scenario string
   108  		function func(*testing.T, context.Context, *Client)
   109  	}{
   110  		{
   111  			scenario: "retrieve committed offsets for a consumer group and topic",
   112  			function: testConsumerGroupFetchOffsets,
   113  		},
   114  	}
   115  
   116  	for _, test := range tests {
   117  		testFunc := test.function
   118  		t.Run(test.scenario, func(t *testing.T) {
   119  			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   120  			defer cancel()
   121  
   122  			client, shutdown := newLocalClient()
   123  			defer shutdown()
   124  
   125  			testFunc(t, ctx, client)
   126  		})
   127  	}
   128  }
   129  
   130  func testConsumerGroupFetchOffsets(t *testing.T, ctx context.Context, client *Client) {
   131  	const totalMessages = 144
   132  	const partitions = 12
   133  	const msgPerPartition = totalMessages / partitions
   134  
   135  	topic := makeTopic()
   136  	if err := clientCreateTopic(client, topic, partitions); err != nil {
   137  		t.Fatal(err)
   138  	}
   139  
   140  	groupId := makeGroupID()
   141  	brokers := []string{"localhost:9092"}
   142  
   143  	writer := &Writer{
   144  		Addr:      TCP(brokers...),
   145  		Topic:     topic,
   146  		Balancer:  &RoundRobin{},
   147  		BatchSize: 1,
   148  		Transport: client.Transport,
   149  	}
   150  	if err := writer.WriteMessages(ctx, makeTestSequence(totalMessages)...); err != nil {
   151  		t.Fatalf("bad write messages: %v", err)
   152  	}
   153  	if err := writer.Close(); err != nil {
   154  		t.Fatalf("bad write err: %v", err)
   155  	}
   156  
   157  	r := NewReader(ReaderConfig{
   158  		Brokers:  brokers,
   159  		Topic:    topic,
   160  		GroupID:  groupId,
   161  		MinBytes: 1,
   162  		MaxBytes: 10e6,
   163  		MaxWait:  100 * time.Millisecond,
   164  	})
   165  	defer r.Close()
   166  
   167  	for i := 0; i < totalMessages; i++ {
   168  		m, err := r.FetchMessage(ctx)
   169  		if err != nil {
   170  			t.Fatalf("error fetching message: %s", err)
   171  		}
   172  		if err := r.CommitMessages(context.Background(), m); err != nil {
   173  			t.Fatal(err)
   174  		}
   175  	}
   176  
   177  	offsets, err := client.ConsumerOffsets(ctx, TopicAndGroup{GroupId: groupId, Topic: topic})
   178  	if err != nil {
   179  		t.Fatal(err)
   180  	}
   181  
   182  	if len(offsets) != partitions {
   183  		t.Fatalf("expected %d partitions but only received offsets for %d", partitions, len(offsets))
   184  	}
   185  
   186  	for i := 0; i < partitions; i++ {
   187  		committedOffset := offsets[i]
   188  		if committedOffset != msgPerPartition {
   189  			t.Errorf("expected partition %d with committed offset of %d but received %d", i, msgPerPartition, committedOffset)
   190  		}
   191  	}
   192  }
   193  
   194  func TestClientProduceAndConsume(t *testing.T) {
   195  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   196  	defer cancel()
   197  	// Tests a typical kafka use case, data is produced to a partition,
   198  	// then consumed back sequentially. We use snappy compression because
   199  	// kafka stream are often compressed, and verify that each record
   200  	// produced is exposed to the consumer, and order is preserved.
   201  	client, topic, shutdown := newLocalClientAndTopic()
   202  	defer shutdown()
   203  
   204  	epoch := time.Now()
   205  	seed := int64(0) // deterministic
   206  	prng := rand.New(rand.NewSource(seed))
   207  	offset := int64(0)
   208  
   209  	const numBatches = 100
   210  	const recordsPerBatch = 320
   211  	t.Logf("producing %d batches of %d records...", numBatches, recordsPerBatch)
   212  
   213  	for i := 0; i < numBatches; i++ { // produce 100 batches
   214  		records := make([]Record, recordsPerBatch)
   215  
   216  		for i := range records {
   217  			v := make([]byte, prng.Intn(999)+1)
   218  			io.ReadFull(prng, v)
   219  			records[i].Time = epoch
   220  			records[i].Value = NewBytes(v)
   221  		}
   222  
   223  		res, err := client.Produce(ctx, &ProduceRequest{
   224  			Topic:        topic,
   225  			Partition:    0,
   226  			RequiredAcks: -1,
   227  			Records:      NewRecordReader(records...),
   228  			Compression:  compress.Snappy,
   229  		})
   230  		if err != nil {
   231  			t.Fatal(err)
   232  		}
   233  		if res.Error != nil {
   234  			t.Fatal(res.Error)
   235  		}
   236  		if res.BaseOffset != offset {
   237  			t.Fatalf("records were produced at an unexpected offset, want %d but got %d", offset, res.BaseOffset)
   238  		}
   239  		offset += int64(len(records))
   240  	}
   241  
   242  	prng.Seed(seed)
   243  	offset = 0 // reset
   244  	numFetches := 0
   245  	numRecords := 0
   246  
   247  	for numRecords < (numBatches * recordsPerBatch) {
   248  		res, err := client.Fetch(ctx, &FetchRequest{
   249  			Topic:     topic,
   250  			Partition: 0,
   251  			Offset:    offset,
   252  			MinBytes:  1,
   253  			MaxBytes:  256 * 1024,
   254  			MaxWait:   100 * time.Millisecond, // should only hit on the last fetch
   255  		})
   256  		if err != nil {
   257  			t.Fatal(err)
   258  		}
   259  		if res.Error != nil {
   260  			t.Fatal(err)
   261  		}
   262  
   263  		for {
   264  			r, err := res.Records.ReadRecord()
   265  			if err != nil {
   266  				if !errors.Is(err, io.EOF) {
   267  					t.Fatal(err)
   268  				}
   269  				break
   270  			}
   271  
   272  			if r.Key != nil {
   273  				r.Key.Close()
   274  				t.Error("unexpected non-null key on record at offset", r.Offset)
   275  			}
   276  
   277  			n := prng.Intn(999) + 1
   278  			a := make([]byte, n)
   279  			b := make([]byte, n)
   280  			io.ReadFull(prng, a)
   281  
   282  			_, err = io.ReadFull(r.Value, b)
   283  			r.Value.Close()
   284  			if err != nil {
   285  				t.Fatalf("reading record at offset %d: %v", r.Offset, err)
   286  			}
   287  
   288  			if !bytes.Equal(a, b) {
   289  				t.Fatalf("value of record at offset %d mismatches", r.Offset)
   290  			}
   291  
   292  			if r.Offset != offset {
   293  				t.Fatalf("record at offset %d was expected to have offset %d", r.Offset, offset)
   294  			}
   295  
   296  			offset = r.Offset + 1
   297  			numRecords++
   298  		}
   299  
   300  		numFetches++
   301  	}
   302  
   303  	t.Logf("%d records were read in %d fetches", numRecords, numFetches)
   304  }