github.com/QuangHoangHao/kafka-go@v0.4.36/fetch_test.go (about)

     1  package kafka
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"io"
     7  	"io/ioutil"
     8  	"net"
     9  	"reflect"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/QuangHoangHao/kafka-go/compress"
    14  )
    15  
    16  func produceRecords(t *testing.T, n int, addr net.Addr, topic string, compression compress.Codec) []Record {
    17  	conn, err := (&Dialer{
    18  		Resolver: &net.Resolver{},
    19  	}).DialLeader(context.Background(), addr.Network(), addr.String(), topic, 0)
    20  
    21  	if err != nil {
    22  		t.Fatal("failed to open a new kafka connection:", err)
    23  	}
    24  	defer conn.Close()
    25  
    26  	msgs := makeTestSequence(n)
    27  	if compression == nil {
    28  		_, err = conn.WriteMessages(msgs...)
    29  	} else {
    30  		_, err = conn.WriteCompressedMessages(compression, msgs...)
    31  	}
    32  	if err != nil {
    33  		t.Fatal(err)
    34  	}
    35  
    36  	records := make([]Record, len(msgs))
    37  	for offset, msg := range msgs {
    38  		records[offset] = Record{
    39  			Offset:  int64(offset),
    40  			Key:     NewBytes(msg.Key),
    41  			Value:   NewBytes(msg.Value),
    42  			Headers: msg.Headers,
    43  		}
    44  	}
    45  
    46  	return records
    47  }
    48  
    49  func TestClientFetch(t *testing.T) {
    50  	client, topic, shutdown := newLocalClientAndTopic()
    51  	defer shutdown()
    52  
    53  	records := produceRecords(t, 10, client.Addr, topic, nil)
    54  
    55  	res, err := client.Fetch(context.Background(), &FetchRequest{
    56  		Topic:     topic,
    57  		Partition: 0,
    58  		Offset:    0,
    59  		MinBytes:  1,
    60  		MaxBytes:  64 * 1024,
    61  		MaxWait:   100 * time.Millisecond,
    62  	})
    63  
    64  	if err != nil {
    65  		t.Fatal(err)
    66  	}
    67  
    68  	assertFetchResponse(t, res, &FetchResponse{
    69  		Topic:         topic,
    70  		Partition:     0,
    71  		HighWatermark: 10,
    72  		Records:       NewRecordReader(records...),
    73  	})
    74  }
    75  
    76  func TestClientFetchCompressed(t *testing.T) {
    77  	client, topic, shutdown := newLocalClientAndTopic()
    78  	defer shutdown()
    79  
    80  	records := produceRecords(t, 10, client.Addr, topic, &compress.GzipCodec)
    81  
    82  	res, err := client.Fetch(context.Background(), &FetchRequest{
    83  		Topic:     topic,
    84  		Partition: 0,
    85  		Offset:    0,
    86  		MinBytes:  1,
    87  		MaxBytes:  64 * 1024,
    88  		MaxWait:   100 * time.Millisecond,
    89  	})
    90  
    91  	if err != nil {
    92  		t.Fatal(err)
    93  	}
    94  
    95  	assertFetchResponse(t, res, &FetchResponse{
    96  		Topic:         topic,
    97  		Partition:     0,
    98  		HighWatermark: 10,
    99  		Records:       NewRecordReader(records...),
   100  	})
   101  }
   102  
   103  func assertFetchResponse(t *testing.T, found, expected *FetchResponse) {
   104  	t.Helper()
   105  
   106  	if found.Topic != expected.Topic {
   107  		t.Error("invalid topic found in response:", found.Topic)
   108  	}
   109  
   110  	if found.Partition != expected.Partition {
   111  		t.Error("invalid partition found in response:", found.Partition)
   112  	}
   113  
   114  	if found.HighWatermark != expected.HighWatermark {
   115  		t.Error("invalid high watermark found in response:", found.HighWatermark)
   116  	}
   117  
   118  	if found.Error != nil {
   119  		t.Error("unexpected error found in response:", found.Error)
   120  	}
   121  
   122  	records1, err := readRecords(found.Records)
   123  	if err != nil {
   124  		t.Error("error reading records:", err)
   125  	}
   126  
   127  	records2, err := readRecords(expected.Records)
   128  	if err != nil {
   129  		t.Error("error reading records:", err)
   130  	}
   131  
   132  	assertRecords(t, records1, records2)
   133  }
   134  
   135  type memoryRecord struct {
   136  	offset  int64
   137  	key     []byte
   138  	value   []byte
   139  	headers []Header
   140  }
   141  
   142  func assertRecords(t *testing.T, found, expected []memoryRecord) {
   143  	t.Helper()
   144  	i := 0
   145  
   146  	for i < len(found) && i < len(expected) {
   147  		r1 := found[i]
   148  		r2 := expected[i]
   149  
   150  		if !reflect.DeepEqual(r1, r2) {
   151  			t.Errorf("records at index %d don't match", i)
   152  			t.Logf("expected:\n%#v", r2)
   153  			t.Logf("found:\n%#v", r1)
   154  		}
   155  
   156  		i++
   157  	}
   158  
   159  	for i < len(found) {
   160  		t.Errorf("unexpected record at index %d:\n%+v", i, found[i])
   161  		i++
   162  	}
   163  
   164  	for i < len(expected) {
   165  		t.Errorf("missing record at index %d:\n%+v", i, expected[i])
   166  		i++
   167  	}
   168  }
   169  
   170  func readRecords(records RecordReader) ([]memoryRecord, error) {
   171  	list := []memoryRecord{}
   172  
   173  	for {
   174  		rec, err := records.ReadRecord()
   175  
   176  		if err != nil {
   177  			if errors.Is(err, io.EOF) {
   178  				return list, nil
   179  			}
   180  			return nil, err
   181  		}
   182  
   183  		var (
   184  			offset      = rec.Offset
   185  			key         = rec.Key
   186  			value       = rec.Value
   187  			headers     = rec.Headers
   188  			bytesKey    []byte
   189  			bytesValues []byte
   190  		)
   191  
   192  		if key != nil {
   193  			bytesKey, _ = ioutil.ReadAll(key)
   194  		}
   195  
   196  		if value != nil {
   197  			bytesValues, _ = ioutil.ReadAll(value)
   198  		}
   199  
   200  		list = append(list, memoryRecord{
   201  			offset:  offset,
   202  			key:     bytesKey,
   203  			value:   bytesValues,
   204  			headers: headers,
   205  		})
   206  	}
   207  }
   208  
   209  func TestClientPipeline(t *testing.T) {
   210  	client, topic, shutdown := newLocalClientAndTopic()
   211  	defer shutdown()
   212  
   213  	const numBatches = 100
   214  	const recordsPerBatch = 30
   215  
   216  	unixEpoch := time.Unix(0, 0)
   217  	records := make([]Record, recordsPerBatch)
   218  	content := []byte("1234567890")
   219  
   220  	for i := 0; i < numBatches; i++ {
   221  		for j := range records {
   222  			records[j] = Record{Value: NewBytes(content)}
   223  		}
   224  
   225  		_, err := client.Produce(context.Background(), &ProduceRequest{
   226  			Topic:        topic,
   227  			RequiredAcks: -1,
   228  			Records:      NewRecordReader(records...),
   229  			Compression:  Snappy,
   230  		})
   231  		if err != nil {
   232  			t.Fatal(err)
   233  		}
   234  	}
   235  
   236  	offset := int64(0)
   237  
   238  	for i := 0; i < (numBatches * recordsPerBatch); {
   239  		req := &FetchRequest{
   240  			Topic:    topic,
   241  			Offset:   offset,
   242  			MinBytes: 1,
   243  			MaxBytes: 8192,
   244  			MaxWait:  500 * time.Millisecond,
   245  		}
   246  
   247  		res, err := client.Fetch(context.Background(), req)
   248  		if err != nil {
   249  			t.Fatal(err)
   250  		}
   251  
   252  		if res.Error != nil {
   253  			t.Fatal(res.Error)
   254  		}
   255  
   256  		for {
   257  			r, err := res.Records.ReadRecord()
   258  			if err != nil {
   259  				if errors.Is(err, io.EOF) {
   260  					break
   261  				}
   262  				t.Fatal(err)
   263  			}
   264  
   265  			if r.Key != nil {
   266  				r.Key.Close()
   267  			}
   268  
   269  			if r.Value != nil {
   270  				r.Value.Close()
   271  			}
   272  
   273  			if r.Offset != offset {
   274  				t.Errorf("record at index %d has mismatching offset, want %d but got %d", i, offset, r.Offset)
   275  			}
   276  
   277  			if r.Time.IsZero() || r.Time.Equal(unixEpoch) {
   278  				t.Errorf("record at index %d with offset %d has not timestamp", i, r.Offset)
   279  			}
   280  
   281  			offset = r.Offset + 1
   282  			i++
   283  		}
   284  	}
   285  }