github.com/streamdal/segmentio-kafka-go@v0.4.47-streamdal/produce.go (about)

     1  package kafka
     2  
     3  import (
     4  	"bufio"
     5  	"context"
     6  	"encoding"
     7  	"errors"
     8  	"fmt"
     9  	"net"
    10  	"strconv"
    11  	"time"
    12  
    13  	"github.com/segmentio/kafka-go/protocol"
    14  	produceAPI "github.com/segmentio/kafka-go/protocol/produce"
    15  )
    16  
    17  type RequiredAcks int
    18  
    19  const (
    20  	RequireNone RequiredAcks = 0
    21  	RequireOne  RequiredAcks = 1
    22  	RequireAll  RequiredAcks = -1
    23  )
    24  
    25  func (acks RequiredAcks) String() string {
    26  	switch acks {
    27  	case RequireNone:
    28  		return "none"
    29  	case RequireOne:
    30  		return "one"
    31  	case RequireAll:
    32  		return "all"
    33  	default:
    34  		return "unknown"
    35  	}
    36  }
    37  
    38  func (acks RequiredAcks) MarshalText() ([]byte, error) {
    39  	return []byte(acks.String()), nil
    40  }
    41  
    42  func (acks *RequiredAcks) UnmarshalText(b []byte) error {
    43  	switch string(b) {
    44  	case "none":
    45  		*acks = RequireNone
    46  	case "one":
    47  		*acks = RequireOne
    48  	case "all":
    49  		*acks = RequireAll
    50  	default:
    51  		x, err := strconv.ParseInt(string(b), 10, 64)
    52  		parsed := RequiredAcks(x)
    53  		if err != nil || (parsed != RequireNone && parsed != RequireOne && parsed != RequireAll) {
    54  			return fmt.Errorf("required acks must be one of none, one, or all, not %q", b)
    55  		}
    56  		*acks = parsed
    57  	}
    58  	return nil
    59  }
    60  
    61  var (
    62  	_ encoding.TextMarshaler   = RequiredAcks(0)
    63  	_ encoding.TextUnmarshaler = (*RequiredAcks)(nil)
    64  )
    65  
    66  // ProduceRequest represents a request sent to a kafka broker to produce records
    67  // to a topic partition.
    68  type ProduceRequest struct {
    69  	// Address of the kafka broker to send the request to.
    70  	Addr net.Addr
    71  
    72  	// The topic to produce the records to.
    73  	Topic string
    74  
    75  	// The partition to produce the records to.
    76  	Partition int
    77  
    78  	// The level of required acknowledgements to ask the kafka broker for.
    79  	RequiredAcks RequiredAcks
    80  
    81  	// The message format version used when encoding the records.
    82  	//
    83  	// By default, the client automatically determine which version should be
    84  	// used based on the version of the Produce API supported by the server.
    85  	MessageVersion int
    86  
    87  	// An optional transaction id when producing to the kafka broker is part of
    88  	// a transaction.
    89  	TransactionalID string
    90  
    91  	// The sequence of records to produce to the topic partition.
    92  	Records RecordReader
    93  
    94  	// An optional compression algorithm to apply to the batch of records sent
    95  	// to the kafka broker.
    96  	Compression Compression
    97  }
    98  
    99  // ProduceResponse represents a response from a kafka broker to a produce
   100  // request.
   101  type ProduceResponse struct {
   102  	// The amount of time that the broker throttled the request.
   103  	Throttle time.Duration
   104  
   105  	// An error that may have occurred while attempting to produce the records.
   106  	//
   107  	// The error contains both the kafka error code, and an error message
   108  	// returned by the kafka broker. Programs may use the standard errors.Is
   109  	// function to test the error against kafka error codes.
   110  	Error error
   111  
   112  	// Offset of the first record that was written to the topic partition.
   113  	//
   114  	// This field will be zero if the kafka broker did not support Produce API
   115  	// version 3 or above.
   116  	BaseOffset int64
   117  
   118  	// Time at which the broker wrote the records to the topic partition.
   119  	//
   120  	// This field will be zero if the kafka broker did not support Produce API
   121  	// version 2 or above.
   122  	LogAppendTime time.Time
   123  
   124  	// First offset in the topic partition that the records were written to.
   125  	//
   126  	// This field will be zero if the kafka broker did not support Produce
   127  	// API version 5 or above (or if the first offset is zero).
   128  	LogStartOffset int64
   129  
   130  	// If errors occurred writing specific records, they will be reported in
   131  	// this map.
   132  	//
   133  	// This field will always be empty if the kafka broker did not support the
   134  	// Produce API in version 8 or above.
   135  	RecordErrors map[int]error
   136  }
   137  
   138  // Produce sends a produce request to a kafka broker and returns the response.
   139  //
   140  // If the request contained no records, an error wrapping protocol.ErrNoRecord
   141  // is returned.
   142  //
   143  // When the request is configured with RequiredAcks=none, both the response and
   144  // the error will be nil on success.
   145  func (c *Client) Produce(ctx context.Context, req *ProduceRequest) (*ProduceResponse, error) {
   146  	attributes := protocol.Attributes(req.Compression) & 0x7
   147  
   148  	m, err := c.roundTrip(ctx, req.Addr, &produceAPI.Request{
   149  		TransactionalID: req.TransactionalID,
   150  		Acks:            int16(req.RequiredAcks),
   151  		Timeout:         c.timeoutMs(ctx, defaultProduceTimeout),
   152  		Topics: []produceAPI.RequestTopic{{
   153  			Topic: req.Topic,
   154  			Partitions: []produceAPI.RequestPartition{{
   155  				Partition: int32(req.Partition),
   156  				RecordSet: protocol.RecordSet{
   157  					Attributes: attributes,
   158  					Records:    req.Records,
   159  				},
   160  			}},
   161  		}},
   162  	})
   163  
   164  	switch {
   165  	case err == nil:
   166  	case errors.Is(err, protocol.ErrNoRecord):
   167  		return new(ProduceResponse), nil
   168  	default:
   169  		return nil, fmt.Errorf("kafka.(*Client).Produce: %w", err)
   170  	}
   171  
   172  	if req.RequiredAcks == RequireNone {
   173  		return nil, nil
   174  	}
   175  
   176  	res := m.(*produceAPI.Response)
   177  	if len(res.Topics) == 0 {
   178  		return nil, fmt.Errorf("kafka.(*Client).Produce: %w", protocol.ErrNoTopic)
   179  	}
   180  	topic := &res.Topics[0]
   181  	if len(topic.Partitions) == 0 {
   182  		return nil, fmt.Errorf("kafka.(*Client).Produce: %w", protocol.ErrNoPartition)
   183  	}
   184  	partition := &topic.Partitions[0]
   185  
   186  	ret := &ProduceResponse{
   187  		Throttle:       makeDuration(res.ThrottleTimeMs),
   188  		Error:          makeError(partition.ErrorCode, partition.ErrorMessage),
   189  		BaseOffset:     partition.BaseOffset,
   190  		LogAppendTime:  makeTime(partition.LogAppendTime),
   191  		LogStartOffset: partition.LogStartOffset,
   192  	}
   193  
   194  	if len(partition.RecordErrors) != 0 {
   195  		ret.RecordErrors = make(map[int]error, len(partition.RecordErrors))
   196  
   197  		for _, recErr := range partition.RecordErrors {
   198  			ret.RecordErrors[int(recErr.BatchIndex)] = errors.New(recErr.BatchIndexErrorMessage)
   199  		}
   200  	}
   201  
   202  	return ret, nil
   203  }
   204  
   205  type produceRequestV2 struct {
   206  	RequiredAcks int16
   207  	Timeout      int32
   208  	Topics       []produceRequestTopicV2
   209  }
   210  
   211  func (r produceRequestV2) size() int32 {
   212  	return 2 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
   213  }
   214  
   215  func (r produceRequestV2) writeTo(wb *writeBuffer) {
   216  	wb.writeInt16(r.RequiredAcks)
   217  	wb.writeInt32(r.Timeout)
   218  	wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
   219  }
   220  
   221  type produceRequestTopicV2 struct {
   222  	TopicName  string
   223  	Partitions []produceRequestPartitionV2
   224  }
   225  
   226  func (t produceRequestTopicV2) size() int32 {
   227  	return sizeofString(t.TopicName) +
   228  		sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
   229  }
   230  
   231  func (t produceRequestTopicV2) writeTo(wb *writeBuffer) {
   232  	wb.writeString(t.TopicName)
   233  	wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
   234  }
   235  
   236  type produceRequestPartitionV2 struct {
   237  	Partition      int32
   238  	MessageSetSize int32
   239  	MessageSet     messageSet
   240  }
   241  
   242  func (p produceRequestPartitionV2) size() int32 {
   243  	return 4 + 4 + p.MessageSet.size()
   244  }
   245  
   246  func (p produceRequestPartitionV2) writeTo(wb *writeBuffer) {
   247  	wb.writeInt32(p.Partition)
   248  	wb.writeInt32(p.MessageSetSize)
   249  	p.MessageSet.writeTo(wb)
   250  }
   251  
   252  type produceResponsePartitionV2 struct {
   253  	Partition int32
   254  	ErrorCode int16
   255  	Offset    int64
   256  	Timestamp int64
   257  }
   258  
   259  func (p produceResponsePartitionV2) size() int32 {
   260  	return 4 + 2 + 8 + 8
   261  }
   262  
   263  func (p produceResponsePartitionV2) writeTo(wb *writeBuffer) {
   264  	wb.writeInt32(p.Partition)
   265  	wb.writeInt16(p.ErrorCode)
   266  	wb.writeInt64(p.Offset)
   267  	wb.writeInt64(p.Timestamp)
   268  }
   269  
   270  func (p *produceResponsePartitionV2) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
   271  	if remain, err = readInt32(r, sz, &p.Partition); err != nil {
   272  		return
   273  	}
   274  	if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil {
   275  		return
   276  	}
   277  	if remain, err = readInt64(r, remain, &p.Offset); err != nil {
   278  		return
   279  	}
   280  	if remain, err = readInt64(r, remain, &p.Timestamp); err != nil {
   281  		return
   282  	}
   283  	return
   284  }
   285  
   286  type produceResponsePartitionV7 struct {
   287  	Partition   int32
   288  	ErrorCode   int16
   289  	Offset      int64
   290  	Timestamp   int64
   291  	StartOffset int64
   292  }
   293  
   294  func (p produceResponsePartitionV7) size() int32 {
   295  	return 4 + 2 + 8 + 8 + 8
   296  }
   297  
   298  func (p produceResponsePartitionV7) writeTo(wb *writeBuffer) {
   299  	wb.writeInt32(p.Partition)
   300  	wb.writeInt16(p.ErrorCode)
   301  	wb.writeInt64(p.Offset)
   302  	wb.writeInt64(p.Timestamp)
   303  	wb.writeInt64(p.StartOffset)
   304  }
   305  
   306  func (p *produceResponsePartitionV7) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
   307  	if remain, err = readInt32(r, sz, &p.Partition); err != nil {
   308  		return
   309  	}
   310  	if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil {
   311  		return
   312  	}
   313  	if remain, err = readInt64(r, remain, &p.Offset); err != nil {
   314  		return
   315  	}
   316  	if remain, err = readInt64(r, remain, &p.Timestamp); err != nil {
   317  		return
   318  	}
   319  	if remain, err = readInt64(r, remain, &p.StartOffset); err != nil {
   320  		return
   321  	}
   322  	return
   323  }