github.com/hack0072008/kafka-go@v1.0.1/fetch_test.go (about) 1 package kafka 2 3 import ( 4 "context" 5 "errors" 6 "io" 7 "io/ioutil" 8 "net" 9 "reflect" 10 "testing" 11 "time" 12 13 "github.com/hack0072008/kafka-go/compress" 14 ) 15 16 func copyRecords(records []Record) []Record { 17 newRecords := make([]Record, len(records)) 18 19 for i := range records { 20 k, _ := ReadAll(records[i].Key) 21 v, _ := ReadAll(records[i].Value) 22 23 records[i].Key = NewBytes(k) 24 records[i].Value = NewBytes(v) 25 26 newRecords[i].Key = NewBytes(k) 27 newRecords[i].Value = NewBytes(v) 28 } 29 30 return newRecords 31 } 32 33 func produceRecords(t *testing.T, n int, addr net.Addr, topic string, compression compress.Codec) []Record { 34 conn, err := (&Dialer{ 35 Resolver: &net.Resolver{}, 36 }).DialLeader(context.Background(), addr.Network(), addr.String(), topic, 0) 37 38 if err != nil { 39 t.Fatal("failed to open a new kafka connection:", err) 40 } 41 defer conn.Close() 42 43 msgs := makeTestSequence(n) 44 if compression == nil { 45 _, err = conn.WriteMessages(msgs...) 46 } else { 47 _, err = conn.WriteCompressedMessages(compression, msgs...) 48 } 49 if err != nil { 50 t.Fatal(err) 51 } 52 53 records := make([]Record, len(msgs)) 54 for offset, msg := range msgs { 55 records[offset] = Record{ 56 Offset: int64(offset), 57 Key: NewBytes(msg.Key), 58 Value: NewBytes(msg.Value), 59 Headers: msg.Headers, 60 } 61 } 62 63 return records 64 } 65 66 func TestClientFetch(t *testing.T) { 67 client, topic, shutdown := newLocalClientAndTopic() 68 defer shutdown() 69 70 records := produceRecords(t, 10, client.Addr, topic, nil) 71 72 res, err := client.Fetch(context.Background(), &FetchRequest{ 73 Topic: topic, 74 Partition: 0, 75 Offset: 0, 76 MinBytes: 1, 77 MaxBytes: 64 * 1024, 78 MaxWait: 100 * time.Millisecond, 79 }) 80 81 if err != nil { 82 t.Fatal(err) 83 } 84 85 assertFetchResponse(t, res, &FetchResponse{ 86 Topic: topic, 87 Partition: 0, 88 HighWatermark: 10, 89 Records: NewRecordReader(records...), 90 }) 91 } 92 93 func TestClientFetchCompressed(t *testing.T) { 94 client, topic, shutdown := newLocalClientAndTopic() 95 defer shutdown() 96 97 records := produceRecords(t, 10, client.Addr, topic, &compress.GzipCodec) 98 99 res, err := client.Fetch(context.Background(), &FetchRequest{ 100 Topic: topic, 101 Partition: 0, 102 Offset: 0, 103 MinBytes: 1, 104 MaxBytes: 64 * 1024, 105 MaxWait: 100 * time.Millisecond, 106 }) 107 108 if err != nil { 109 t.Fatal(err) 110 } 111 112 assertFetchResponse(t, res, &FetchResponse{ 113 Topic: topic, 114 Partition: 0, 115 HighWatermark: 10, 116 Records: NewRecordReader(records...), 117 }) 118 } 119 120 func assertFetchResponse(t *testing.T, found, expected *FetchResponse) { 121 t.Helper() 122 123 if found.Topic != expected.Topic { 124 t.Error("invalid topic found in response:", found.Topic) 125 } 126 127 if found.Partition != expected.Partition { 128 t.Error("invalid partition found in response:", found.Partition) 129 } 130 131 if found.HighWatermark != expected.HighWatermark { 132 t.Error("invalid high watermark found in response:", found.HighWatermark) 133 } 134 135 if found.Error != nil { 136 t.Error("unexpected error found in response:", found.Error) 137 } 138 139 records1, err := readRecords(found.Records) 140 if err != nil { 141 t.Error("error reading records:", err) 142 } 143 144 records2, err := readRecords(expected.Records) 145 if err != nil { 146 t.Error("error reading records:", err) 147 } 148 149 assertRecords(t, records1, records2) 150 } 151 152 type memoryRecord struct { 153 offset int64 154 key []byte 155 value []byte 156 headers []Header 157 } 158 159 func assertRecords(t *testing.T, found, expected []memoryRecord) { 160 t.Helper() 161 i := 0 162 163 for i < len(found) && i < len(expected) { 164 r1 := found[i] 165 r2 := expected[i] 166 167 if !reflect.DeepEqual(r1, r2) { 168 t.Errorf("records at index %d don't match", i) 169 t.Logf("expected:\n%#v", r2) 170 t.Logf("found:\n%#v", r1) 171 } 172 173 i++ 174 } 175 176 for i < len(found) { 177 t.Errorf("unexpected record at index %d:\n%+v", i, found[i]) 178 i++ 179 } 180 181 for i < len(expected) { 182 t.Errorf("missing record at index %d:\n%+v", i, expected[i]) 183 i++ 184 } 185 } 186 187 func readRecords(records RecordReader) ([]memoryRecord, error) { 188 list := []memoryRecord{} 189 190 for { 191 rec, err := records.ReadRecord() 192 193 if err != nil { 194 if errors.Is(err, io.EOF) { 195 return list, nil 196 } 197 return nil, err 198 } 199 200 var ( 201 offset = rec.Offset 202 key = rec.Key 203 value = rec.Value 204 headers = rec.Headers 205 bytesKey []byte 206 bytesValues []byte 207 ) 208 209 if key != nil { 210 bytesKey, _ = ioutil.ReadAll(key) 211 } 212 213 if value != nil { 214 bytesValues, _ = ioutil.ReadAll(value) 215 } 216 217 list = append(list, memoryRecord{ 218 offset: offset, 219 key: bytesKey, 220 value: bytesValues, 221 headers: headers, 222 }) 223 } 224 } 225 226 func TestClientPipeline(t *testing.T) { 227 client, topic, shutdown := newLocalClientAndTopic() 228 defer shutdown() 229 230 const numBatches = 100 231 const recordsPerBatch = 30 232 233 unixEpoch := time.Unix(0, 0) 234 records := make([]Record, recordsPerBatch) 235 content := []byte("1234567890") 236 237 for i := 0; i < numBatches; i++ { 238 for j := range records { 239 records[j] = Record{Value: NewBytes(content)} 240 } 241 242 _, err := client.Produce(context.Background(), &ProduceRequest{ 243 Topic: topic, 244 RequiredAcks: -1, 245 Records: NewRecordReader(records...), 246 Compression: Snappy, 247 }) 248 if err != nil { 249 t.Fatal(err) 250 } 251 } 252 253 offset := int64(0) 254 255 for i := 0; i < (numBatches * recordsPerBatch); { 256 req := &FetchRequest{ 257 Topic: topic, 258 Offset: offset, 259 MinBytes: 1, 260 MaxBytes: 8192, 261 MaxWait: 500 * time.Millisecond, 262 } 263 264 res, err := client.Fetch(context.Background(), req) 265 if err != nil { 266 t.Fatal(err) 267 } 268 269 if res.Error != nil { 270 t.Fatal(res.Error) 271 } 272 273 for { 274 r, err := res.Records.ReadRecord() 275 if err != nil { 276 if err == io.EOF { 277 break 278 } 279 t.Fatal(err) 280 } 281 282 if r.Key != nil { 283 r.Key.Close() 284 } 285 286 if r.Value != nil { 287 r.Value.Close() 288 } 289 290 if r.Offset != offset { 291 t.Errorf("record at index %d has mismatching offset, want %d but got %d", i, offset, r.Offset) 292 } 293 294 if r.Time.IsZero() || r.Time.Equal(unixEpoch) { 295 t.Errorf("record at index %d with offset %d has not timestamp", i, r.Offset) 296 } 297 298 offset = r.Offset + 1 299 i++ 300 } 301 } 302 }