github.com/streamdal/segmentio-kafka-go@v0.4.47-streamdal/read.go (about) 1 package kafka 2 3 import ( 4 "bufio" 5 "errors" 6 "fmt" 7 "io" 8 "reflect" 9 ) 10 11 var errShortRead = errors.New("not enough bytes available to load the response") 12 13 func peekRead(r *bufio.Reader, sz int, n int, f func([]byte)) (int, error) { 14 if n > sz { 15 return sz, errShortRead 16 } 17 b, err := r.Peek(n) 18 if err != nil { 19 return sz, err 20 } 21 f(b) 22 return discardN(r, sz, n) 23 } 24 25 func readInt8(r *bufio.Reader, sz int, v *int8) (int, error) { 26 return peekRead(r, sz, 1, func(b []byte) { *v = makeInt8(b) }) 27 } 28 29 func readInt16(r *bufio.Reader, sz int, v *int16) (int, error) { 30 return peekRead(r, sz, 2, func(b []byte) { *v = makeInt16(b) }) 31 } 32 33 func readInt32(r *bufio.Reader, sz int, v *int32) (int, error) { 34 return peekRead(r, sz, 4, func(b []byte) { *v = makeInt32(b) }) 35 } 36 37 func readInt64(r *bufio.Reader, sz int, v *int64) (int, error) { 38 return peekRead(r, sz, 8, func(b []byte) { *v = makeInt64(b) }) 39 } 40 41 func readVarInt(r *bufio.Reader, sz int, v *int64) (remain int, err error) { 42 // Optimistically assume that most of the time, there will be data buffered 43 // in the reader. If this is not the case, the buffer will be refilled after 44 // consuming zero bytes from the input. 45 input, _ := r.Peek(r.Buffered()) 46 x := uint64(0) 47 s := uint(0) 48 49 for { 50 if len(input) > sz { 51 input = input[:sz] 52 } 53 54 for i, b := range input { 55 if b < 0x80 { 56 x |= uint64(b) << s 57 *v = int64(x>>1) ^ -(int64(x) & 1) 58 n, err := r.Discard(i + 1) 59 return sz - n, err 60 } 61 62 x |= uint64(b&0x7f) << s 63 s += 7 64 } 65 66 // Make room in the input buffer to load more data from the underlying 67 // stream. The x and s variables are left untouched, ensuring that the 68 // varint decoding can continue on the next loop iteration. 69 n, _ := r.Discard(len(input)) 70 sz -= n 71 if sz == 0 { 72 return 0, errShortRead 73 } 74 75 // Fill the buffer: ask for one more byte, but in practice the reader 76 // will load way more from the underlying stream. 77 if _, err := r.Peek(1); err != nil { 78 if errors.Is(err, io.EOF) { 79 err = errShortRead 80 } 81 return sz, err 82 } 83 84 // Grab as many bytes as possible from the buffer, then go on to the 85 // next loop iteration which is going to consume it. 86 input, _ = r.Peek(r.Buffered()) 87 } 88 } 89 90 func readBool(r *bufio.Reader, sz int, v *bool) (int, error) { 91 return peekRead(r, sz, 1, func(b []byte) { *v = b[0] != 0 }) 92 } 93 94 func readString(r *bufio.Reader, sz int, v *string) (int, error) { 95 return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) { 96 *v, remain, err = readNewString(r, sz, n) 97 return 98 }) 99 } 100 101 func readStringWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) { 102 var err error 103 var len int16 104 105 if sz, err = readInt16(r, sz, &len); err != nil { 106 return sz, err 107 } 108 109 n := int(len) 110 if n > sz { 111 return sz, errShortRead 112 } 113 114 return cb(r, sz, n) 115 } 116 117 func readNewString(r *bufio.Reader, sz int, n int) (string, int, error) { 118 b, sz, err := readNewBytes(r, sz, n) 119 return string(b), sz, err 120 } 121 122 func readBytes(r *bufio.Reader, sz int, v *[]byte) (int, error) { 123 return readBytesWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) { 124 *v, remain, err = readNewBytes(r, sz, n) 125 return 126 }) 127 } 128 129 func readBytesWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) { 130 var err error 131 var n int 132 133 if sz, err = readArrayLen(r, sz, &n); err != nil { 134 return sz, err 135 } 136 137 if n > sz { 138 return sz, errShortRead 139 } 140 141 return cb(r, sz, n) 142 } 143 144 func readNewBytes(r *bufio.Reader, sz int, n int) ([]byte, int, error) { 145 var err error 146 var b []byte 147 var shortRead bool 148 149 if n > 0 { 150 if sz < n { 151 n = sz 152 shortRead = true 153 } 154 155 b = make([]byte, n) 156 n, err = io.ReadFull(r, b) 157 b = b[:n] 158 sz -= n 159 160 if err == nil && shortRead { 161 err = errShortRead 162 } 163 } 164 165 return b, sz, err 166 } 167 168 func readArrayLen(r *bufio.Reader, sz int, n *int) (int, error) { 169 var err error 170 var len int32 171 if sz, err = readInt32(r, sz, &len); err != nil { 172 return sz, err 173 } 174 *n = int(len) 175 return sz, nil 176 } 177 178 func readArrayWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int) (int, error)) (int, error) { 179 var err error 180 var len int32 181 182 if sz, err = readInt32(r, sz, &len); err != nil { 183 return sz, err 184 } 185 186 for n := int(len); n > 0; n-- { 187 if sz, err = cb(r, sz); err != nil { 188 break 189 } 190 } 191 192 return sz, err 193 } 194 195 func readStringArray(r *bufio.Reader, sz int, v *[]string) (remain int, err error) { 196 var content []string 197 fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { 198 var value string 199 if fnRemain, fnErr = readString(r, size, &value); fnErr != nil { 200 return 201 } 202 content = append(content, value) 203 return 204 } 205 if remain, err = readArrayWith(r, sz, fn); err != nil { 206 return 207 } 208 209 *v = content 210 return 211 } 212 213 func readMapStringInt32(r *bufio.Reader, sz int, v *map[string][]int32) (remain int, err error) { 214 var len int32 215 if remain, err = readInt32(r, sz, &len); err != nil { 216 return 217 } 218 219 content := make(map[string][]int32, len) 220 for i := 0; i < int(len); i++ { 221 var key string 222 var values []int32 223 224 if remain, err = readString(r, remain, &key); err != nil { 225 return 226 } 227 228 fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { 229 var value int32 230 if fnRemain, fnErr = readInt32(r, size, &value); fnErr != nil { 231 return 232 } 233 values = append(values, value) 234 return 235 } 236 if remain, err = readArrayWith(r, remain, fn); err != nil { 237 return 238 } 239 240 content[key] = values 241 } 242 *v = content 243 244 return 245 } 246 247 func read(r *bufio.Reader, sz int, a interface{}) (int, error) { 248 switch v := a.(type) { 249 case *int8: 250 return readInt8(r, sz, v) 251 case *int16: 252 return readInt16(r, sz, v) 253 case *int32: 254 return readInt32(r, sz, v) 255 case *int64: 256 return readInt64(r, sz, v) 257 case *bool: 258 return readBool(r, sz, v) 259 case *string: 260 return readString(r, sz, v) 261 case *[]byte: 262 return readBytes(r, sz, v) 263 } 264 switch v := reflect.ValueOf(a).Elem(); v.Kind() { 265 case reflect.Struct: 266 return readStruct(r, sz, v) 267 case reflect.Slice: 268 return readSlice(r, sz, v) 269 default: 270 panic(fmt.Sprintf("unsupported type: %T", a)) 271 } 272 } 273 274 func readStruct(r *bufio.Reader, sz int, v reflect.Value) (int, error) { 275 var err error 276 for i, n := 0, v.NumField(); i != n; i++ { 277 if sz, err = read(r, sz, v.Field(i).Addr().Interface()); err != nil { 278 return sz, err 279 } 280 } 281 return sz, nil 282 } 283 284 func readSlice(r *bufio.Reader, sz int, v reflect.Value) (int, error) { 285 var err error 286 var len int32 287 288 if sz, err = readInt32(r, sz, &len); err != nil { 289 return sz, err 290 } 291 292 if n := int(len); n < 0 { 293 v.Set(reflect.Zero(v.Type())) 294 } else { 295 v.Set(reflect.MakeSlice(v.Type(), n, n)) 296 297 for i := 0; i != n; i++ { 298 if sz, err = read(r, sz, v.Index(i).Addr().Interface()); err != nil { 299 return sz, err 300 } 301 } 302 } 303 304 return sz, nil 305 } 306 307 func readFetchResponseHeaderV2(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { 308 var n int32 309 var p struct { 310 Partition int32 311 ErrorCode int16 312 HighwaterMarkOffset int64 313 MessageSetSize int32 314 } 315 316 if remain, err = readInt32(r, size, &throttle); err != nil { 317 return 318 } 319 320 if remain, err = readInt32(r, remain, &n); err != nil { 321 return 322 } 323 324 // This error should never trigger, unless there's a bug in the kafka client 325 // or server. 326 if n != 1 { 327 err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) 328 return 329 } 330 331 // We ignore the topic name because we've requests messages for a single 332 // topic, unless there's a bug in the kafka server we will have received 333 // the name of the topic that we requested. 334 if remain, err = discardString(r, remain); err != nil { 335 return 336 } 337 338 if remain, err = readInt32(r, remain, &n); err != nil { 339 return 340 } 341 342 // This error should never trigger, unless there's a bug in the kafka client 343 // or server. 344 if n != 1 { 345 err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) 346 return 347 } 348 349 if remain, err = read(r, remain, &p); err != nil { 350 return 351 } 352 353 if p.ErrorCode != 0 { 354 err = Error(p.ErrorCode) 355 return 356 } 357 358 // This error should never trigger, unless there's a bug in the kafka client 359 // or server. 360 if remain != int(p.MessageSetSize) { 361 err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", p.MessageSetSize, remain) 362 return 363 } 364 365 watermark = p.HighwaterMarkOffset 366 return 367 } 368 369 func readFetchResponseHeaderV5(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { 370 var n int32 371 type AbortedTransaction struct { 372 ProducerId int64 373 FirstOffset int64 374 } 375 var p struct { 376 Partition int32 377 ErrorCode int16 378 HighwaterMarkOffset int64 379 LastStableOffset int64 380 LogStartOffset int64 381 } 382 var messageSetSize int32 383 var abortedTransactions []AbortedTransaction 384 385 if remain, err = readInt32(r, size, &throttle); err != nil { 386 return 387 } 388 389 if remain, err = readInt32(r, remain, &n); err != nil { 390 return 391 } 392 393 // This error should never trigger, unless there's a bug in the kafka client 394 // or server. 395 if n != 1 { 396 err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) 397 return 398 } 399 400 // We ignore the topic name because we've requests messages for a single 401 // topic, unless there's a bug in the kafka server we will have received 402 // the name of the topic that we requested. 403 if remain, err = discardString(r, remain); err != nil { 404 return 405 } 406 407 if remain, err = readInt32(r, remain, &n); err != nil { 408 return 409 } 410 411 // This error should never trigger, unless there's a bug in the kafka client 412 // or server. 413 if n != 1 { 414 err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) 415 return 416 } 417 418 if remain, err = read(r, remain, &p); err != nil { 419 return 420 } 421 422 var abortedTransactionLen int 423 if remain, err = readArrayLen(r, remain, &abortedTransactionLen); err != nil { 424 return 425 } 426 427 if abortedTransactionLen == -1 { 428 abortedTransactions = nil 429 } else { 430 abortedTransactions = make([]AbortedTransaction, abortedTransactionLen) 431 for i := 0; i < abortedTransactionLen; i++ { 432 if remain, err = read(r, remain, &abortedTransactions[i]); err != nil { 433 return 434 } 435 } 436 } 437 438 if p.ErrorCode != 0 { 439 err = Error(p.ErrorCode) 440 return 441 } 442 443 remain, err = readInt32(r, remain, &messageSetSize) 444 if err != nil { 445 return 446 } 447 448 // This error should never trigger, unless there's a bug in the kafka client 449 // or server. 450 if remain != int(messageSetSize) { 451 err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", messageSetSize, remain) 452 return 453 } 454 455 watermark = p.HighwaterMarkOffset 456 return 457 458 } 459 460 func readFetchResponseHeaderV10(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { 461 var n int32 462 var errorCode int16 463 type AbortedTransaction struct { 464 ProducerId int64 465 FirstOffset int64 466 } 467 var p struct { 468 Partition int32 469 ErrorCode int16 470 HighwaterMarkOffset int64 471 LastStableOffset int64 472 LogStartOffset int64 473 } 474 var messageSetSize int32 475 var abortedTransactions []AbortedTransaction 476 477 if remain, err = readInt32(r, size, &throttle); err != nil { 478 return 479 } 480 481 if remain, err = readInt16(r, remain, &errorCode); err != nil { 482 return 483 } 484 if errorCode != 0 { 485 err = Error(errorCode) 486 return 487 } 488 489 if remain, err = discardInt32(r, remain); err != nil { 490 return 491 } 492 493 if remain, err = readInt32(r, remain, &n); err != nil { 494 return 495 } 496 497 // This error should never trigger, unless there's a bug in the kafka client 498 // or server. 499 if n != 1 { 500 err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) 501 return 502 } 503 504 // We ignore the topic name because we've requests messages for a single 505 // topic, unless there's a bug in the kafka server we will have received 506 // the name of the topic that we requested. 507 if remain, err = discardString(r, remain); err != nil { 508 return 509 } 510 511 if remain, err = readInt32(r, remain, &n); err != nil { 512 return 513 } 514 515 // This error should never trigger, unless there's a bug in the kafka client 516 // or server. 517 if n != 1 { 518 err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) 519 return 520 } 521 522 if remain, err = read(r, remain, &p); err != nil { 523 return 524 } 525 526 var abortedTransactionLen int 527 if remain, err = readArrayLen(r, remain, &abortedTransactionLen); err != nil { 528 return 529 } 530 531 if abortedTransactionLen == -1 { 532 abortedTransactions = nil 533 } else { 534 abortedTransactions = make([]AbortedTransaction, abortedTransactionLen) 535 for i := 0; i < abortedTransactionLen; i++ { 536 if remain, err = read(r, remain, &abortedTransactions[i]); err != nil { 537 return 538 } 539 } 540 } 541 542 if p.ErrorCode != 0 { 543 err = Error(p.ErrorCode) 544 return 545 } 546 547 remain, err = readInt32(r, remain, &messageSetSize) 548 if err != nil { 549 return 550 } 551 552 // This error should never trigger, unless there's a bug in the kafka client 553 // or server. 554 if remain != int(messageSetSize) { 555 err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", messageSetSize, remain) 556 return 557 } 558 559 watermark = p.HighwaterMarkOffset 560 return 561 562 }