github.com/QuangHoangHao/kafka-go@v0.4.36/recordbatch.go (about)

     1  package kafka
     2  
     3  import (
     4  	"bytes"
     5  	"time"
     6  )
     7  
     8  const recordBatchHeaderSize int32 = 0 +
     9  	8 + // base offset
    10  	4 + // batch length
    11  	4 + // partition leader epoch
    12  	1 + // magic
    13  	4 + // crc
    14  	2 + // attributes
    15  	4 + // last offset delta
    16  	8 + // first timestamp
    17  	8 + // max timestamp
    18  	8 + // producer id
    19  	2 + // producer epoch
    20  	4 + // base sequence
    21  	4 // msg count
    22  
    23  func recordBatchSize(msgs ...Message) (size int32) {
    24  	size = recordBatchHeaderSize
    25  	baseTime := msgs[0].Time
    26  
    27  	for i := range msgs {
    28  		msg := &msgs[i]
    29  		msz := recordSize(msg, msg.Time.Sub(baseTime), int64(i))
    30  		size += int32(msz + varIntLen(int64(msz)))
    31  	}
    32  
    33  	return
    34  }
    35  
    36  func compressRecordBatch(codec CompressionCodec, msgs ...Message) (compressed *bytes.Buffer, attributes int16, size int32, err error) {
    37  	compressed = acquireBuffer()
    38  	compressor := codec.NewWriter(compressed)
    39  	wb := &writeBuffer{w: compressor}
    40  
    41  	for i, msg := range msgs {
    42  		wb.writeRecord(0, msgs[0].Time, int64(i), msg)
    43  	}
    44  
    45  	if err = compressor.Close(); err != nil {
    46  		releaseBuffer(compressed)
    47  		return
    48  	}
    49  
    50  	attributes = int16(codec.Code())
    51  	size = recordBatchHeaderSize + int32(compressed.Len())
    52  	return
    53  }
    54  
    55  type recordBatch struct {
    56  	// required input parameters
    57  	codec      CompressionCodec
    58  	attributes int16
    59  	msgs       []Message
    60  
    61  	// parameters calculated during init
    62  	compressed *bytes.Buffer
    63  	size       int32
    64  }
    65  
    66  func newRecordBatch(codec CompressionCodec, msgs ...Message) (r *recordBatch, err error) {
    67  	r = &recordBatch{
    68  		codec: codec,
    69  		msgs:  msgs,
    70  	}
    71  	if r.codec == nil {
    72  		r.size = recordBatchSize(r.msgs...)
    73  	} else {
    74  		r.compressed, r.attributes, r.size, err = compressRecordBatch(r.codec, r.msgs...)
    75  	}
    76  	return
    77  }
    78  
    79  func (r *recordBatch) writeTo(wb *writeBuffer) {
    80  	wb.writeInt32(r.size)
    81  
    82  	baseTime := r.msgs[0].Time
    83  	lastTime := r.msgs[len(r.msgs)-1].Time
    84  	if r.compressed != nil {
    85  		wb.writeRecordBatch(r.attributes, r.size, len(r.msgs), baseTime, lastTime, func(wb *writeBuffer) {
    86  			wb.Write(r.compressed.Bytes())
    87  		})
    88  		releaseBuffer(r.compressed)
    89  	} else {
    90  		wb.writeRecordBatch(r.attributes, r.size, len(r.msgs), baseTime, lastTime, func(wb *writeBuffer) {
    91  			for i, msg := range r.msgs {
    92  				wb.writeRecord(0, r.msgs[0].Time, int64(i), msg)
    93  			}
    94  		})
    95  	}
    96  }
    97  
    98  func recordSize(msg *Message, timestampDelta time.Duration, offsetDelta int64) int {
    99  	return 1 + // attributes
   100  		varIntLen(int64(milliseconds(timestampDelta))) +
   101  		varIntLen(offsetDelta) +
   102  		varBytesLen(msg.Key) +
   103  		varBytesLen(msg.Value) +
   104  		varArrayLen(len(msg.Headers), func(i int) int {
   105  			h := &msg.Headers[i]
   106  			return varStringLen(h.Key) + varBytesLen(h.Value)
   107  		})
   108  }