github.com/rbisecke/kafka-go@v0.4.27/produce.go (about)

     1  package kafka
     2  
     3  import (
     4  	"bufio"
     5  	"context"
     6  	"encoding"
     7  	"errors"
     8  	"fmt"
     9  	"net"
    10  	"strconv"
    11  	"time"
    12  
    13  	"github.com/rbisecke/kafka-go/protocol"
    14  	produceAPI "github.com/rbisecke/kafka-go/protocol/produce"
    15  )
    16  
    17  type RequiredAcks int
    18  
    19  const (
    20  	RequireNone RequiredAcks = 0
    21  	RequireOne  RequiredAcks = 1
    22  	RequireAll  RequiredAcks = -1
    23  )
    24  
    25  func (acks RequiredAcks) String() string {
    26  	switch acks {
    27  	case RequireNone:
    28  		return "none"
    29  	case RequireOne:
    30  		return "one"
    31  	case RequireAll:
    32  		return "all"
    33  	default:
    34  		return "unknown"
    35  	}
    36  }
    37  
    38  func (acks RequiredAcks) MarshalText() ([]byte, error) {
    39  	return []byte(acks.String()), nil
    40  }
    41  
    42  func (acks *RequiredAcks) UnmarshalText(b []byte) error {
    43  	switch string(b) {
    44  	case "none":
    45  		*acks = RequireNone
    46  	case "one":
    47  		*acks = RequireOne
    48  	case "all":
    49  		*acks = RequireAll
    50  	default:
    51  		x, err := strconv.ParseInt(string(b), 10, 64)
    52  		parsed := RequiredAcks(x)
    53  		if err != nil || (parsed != RequireNone && parsed != RequireOne && parsed != RequireAll) {
    54  			return fmt.Errorf("required acks must be one of none, one, or all, not %q", b)
    55  		}
    56  		*acks = parsed
    57  	}
    58  	return nil
    59  }
    60  
    61  var (
    62  	_ encoding.TextMarshaler   = RequiredAcks(0)
    63  	_ encoding.TextUnmarshaler = (*RequiredAcks)(nil)
    64  )
    65  
    66  // ProduceRequest represents a request sent to a kafka broker to produce records
    67  // to a topic partition.
    68  type ProduceRequest struct {
    69  	// Address of the kafka broker to send the request to.
    70  	Addr net.Addr
    71  
    72  	// The topic to produce the records to.
    73  	Topic string
    74  
    75  	// The partition to produce the records to.
    76  	Partition int
    77  
    78  	// The level of required acknowledgements to ask the kafka broker for.
    79  	RequiredAcks RequiredAcks
    80  
    81  	// The message format version used when encoding the records.
    82  	//
    83  	// By default, the client automatically determine which version should be
    84  	// used based on the version of the Produce API supported by the server.
    85  	MessageVersion int
    86  
    87  	// An optional transaction id when producing to the kafka broker is part of
    88  	// a transaction.
    89  	TransactionalID string
    90  
    91  	// The sequence of records to produce to the topic partition.
    92  	Records RecordReader
    93  
    94  	// An optional compression algorithm to apply to the batch of records sent
    95  	// to the kafka broker.
    96  	Compression Compression
    97  }
    98  
    99  // ProduceResponse represents a response from a kafka broker to a produce
   100  // request.
   101  type ProduceResponse struct {
   102  	// The amount of time that the broker throttled the request.
   103  	Throttle time.Duration
   104  
   105  	// An error that may have occurred while attempting to produce the records.
   106  	//
   107  	// The error contains both the kafka error code, and an error message
   108  	// returned by the kafka broker. Programs may use the standard errors.Is
   109  	// function to test the error against kafka error codes.
   110  	Error error
   111  
   112  	// Offset of the first record that was written to the topic partition.
   113  	//
   114  	// This field will be zero if the kafka broker did no support the Produce
   115  	// API in version 3 or above.
   116  	BaseOffset int64
   117  
   118  	// Time at which the broker wrote the records to the topic partition.
   119  	//
   120  	// This field will be zero if the kafka broker did no support the Produce
   121  	// API in version 2 or above.
   122  	LogAppendTime time.Time
   123  
   124  	// First offset in the topic partition that the records were written to.
   125  	//
   126  	// This field will be zero if the kafka broker did no support the Produce
   127  	// API in version 5 or above (or if the first offset is zero).
   128  	LogStartOffset int64
   129  
   130  	// If errors occurred writing specific records, they will be reported in
   131  	// this map.
   132  	//
   133  	// This field will always be empty if the kafka broker did no support the
   134  	// Produce API in version 8 or above.
   135  	RecordErrors map[int]error
   136  }
   137  
   138  // Produce sends a produce request to a kafka broker and returns the response.
   139  //
   140  // If the request contained no records, an error wrapping protocol.ErrNoRecord
   141  // is returned.
   142  //
   143  // When the request is configured with RequiredAcks=none, both the response and
   144  // the error will be nil on success.
   145  func (c *Client) Produce(ctx context.Context, req *ProduceRequest) (*ProduceResponse, error) {
   146  	attributes := protocol.Attributes(req.Compression) & 0x7
   147  
   148  	m, err := c.roundTrip(ctx, req.Addr, &produceAPI.Request{
   149  		TransactionalID: req.TransactionalID,
   150  		Acks:            int16(req.RequiredAcks),
   151  		Timeout:         c.timeoutMs(ctx, defaultProduceTimeout),
   152  		Topics: []produceAPI.RequestTopic{{
   153  			Topic: req.Topic,
   154  			Partitions: []produceAPI.RequestPartition{{
   155  				Partition: int32(req.Partition),
   156  				RecordSet: protocol.RecordSet{
   157  					Attributes: attributes,
   158  					Records:    req.Records,
   159  				},
   160  			}},
   161  		}},
   162  	})
   163  
   164  	switch {
   165  	case err == nil:
   166  	case errors.Is(err, protocol.ErrNoRecord):
   167  		return new(ProduceResponse), nil
   168  	default:
   169  		return nil, fmt.Errorf("kafka.(*Client).Produce: %w", err)
   170  	}
   171  
   172  	if req.RequiredAcks == RequireNone {
   173  		return nil, nil
   174  	}
   175  
   176  	res := m.(*produceAPI.Response)
   177  	if len(res.Topics) == 0 {
   178  		return nil, fmt.Errorf("kafka.(*Client).Produce: %w", protocol.ErrNoTopic)
   179  	}
   180  	topic := &res.Topics[0]
   181  	if len(topic.Partitions) == 0 {
   182  		return nil, fmt.Errorf("kafka.(*Client).Produce: %w", protocol.ErrNoPartition)
   183  	}
   184  	partition := &topic.Partitions[0]
   185  
   186  	ret := &ProduceResponse{
   187  		Throttle:       makeDuration(res.ThrottleTimeMs),
   188  		Error:          makeError(partition.ErrorCode, partition.ErrorMessage),
   189  		BaseOffset:     partition.BaseOffset,
   190  		LogAppendTime:  makeTime(partition.LogAppendTime),
   191  		LogStartOffset: partition.LogStartOffset,
   192  	}
   193  
   194  	if len(partition.RecordErrors) != 0 {
   195  		ret.RecordErrors = make(map[int]error, len(partition.RecordErrors))
   196  
   197  		for _, recErr := range partition.RecordErrors {
   198  			ret.RecordErrors[int(recErr.BatchIndex)] = errors.New(recErr.BatchIndexErrorMessage)
   199  		}
   200  	}
   201  
   202  	return ret, nil
   203  }
   204  
   205  type produceRequestV2 struct {
   206  	RequiredAcks int16
   207  	Timeout      int32
   208  	Topics       []produceRequestTopicV2
   209  }
   210  
   211  func (r produceRequestV2) size() int32 {
   212  	return 2 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
   213  }
   214  
   215  func (r produceRequestV2) writeTo(wb *writeBuffer) {
   216  	wb.writeInt16(r.RequiredAcks)
   217  	wb.writeInt32(r.Timeout)
   218  	wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
   219  }
   220  
   221  type produceRequestTopicV2 struct {
   222  	TopicName  string
   223  	Partitions []produceRequestPartitionV2
   224  }
   225  
   226  func (t produceRequestTopicV2) size() int32 {
   227  	return sizeofString(t.TopicName) +
   228  		sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
   229  }
   230  
   231  func (t produceRequestTopicV2) writeTo(wb *writeBuffer) {
   232  	wb.writeString(t.TopicName)
   233  	wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
   234  }
   235  
   236  type produceRequestPartitionV2 struct {
   237  	Partition      int32
   238  	MessageSetSize int32
   239  	MessageSet     messageSet
   240  }
   241  
   242  func (p produceRequestPartitionV2) size() int32 {
   243  	return 4 + 4 + p.MessageSet.size()
   244  }
   245  
   246  func (p produceRequestPartitionV2) writeTo(wb *writeBuffer) {
   247  	wb.writeInt32(p.Partition)
   248  	wb.writeInt32(p.MessageSetSize)
   249  	p.MessageSet.writeTo(wb)
   250  }
   251  
   252  type produceResponseV2 struct {
   253  	ThrottleTime int32
   254  	Topics       []produceResponseTopicV2
   255  }
   256  
   257  func (r produceResponseV2) size() int32 {
   258  	return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
   259  }
   260  
   261  func (r produceResponseV2) writeTo(wb *writeBuffer) {
   262  	wb.writeInt32(r.ThrottleTime)
   263  	wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
   264  }
   265  
   266  type produceResponseTopicV2 struct {
   267  	TopicName  string
   268  	Partitions []produceResponsePartitionV2
   269  }
   270  
   271  func (t produceResponseTopicV2) size() int32 {
   272  	return sizeofString(t.TopicName) +
   273  		sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
   274  }
   275  
   276  func (t produceResponseTopicV2) writeTo(wb *writeBuffer) {
   277  	wb.writeString(t.TopicName)
   278  	wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
   279  }
   280  
   281  type produceResponsePartitionV2 struct {
   282  	Partition int32
   283  	ErrorCode int16
   284  	Offset    int64
   285  	Timestamp int64
   286  }
   287  
   288  func (p produceResponsePartitionV2) size() int32 {
   289  	return 4 + 2 + 8 + 8
   290  }
   291  
   292  func (p produceResponsePartitionV2) writeTo(wb *writeBuffer) {
   293  	wb.writeInt32(p.Partition)
   294  	wb.writeInt16(p.ErrorCode)
   295  	wb.writeInt64(p.Offset)
   296  	wb.writeInt64(p.Timestamp)
   297  }
   298  
   299  func (p *produceResponsePartitionV2) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
   300  	if remain, err = readInt32(r, sz, &p.Partition); err != nil {
   301  		return
   302  	}
   303  	if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil {
   304  		return
   305  	}
   306  	if remain, err = readInt64(r, remain, &p.Offset); err != nil {
   307  		return
   308  	}
   309  	if remain, err = readInt64(r, remain, &p.Timestamp); err != nil {
   310  		return
   311  	}
   312  	return
   313  }
   314  
   315  type produceResponsePartitionV7 struct {
   316  	Partition   int32
   317  	ErrorCode   int16
   318  	Offset      int64
   319  	Timestamp   int64
   320  	StartOffset int64
   321  }
   322  
   323  func (p produceResponsePartitionV7) size() int32 {
   324  	return 4 + 2 + 8 + 8 + 8
   325  }
   326  
   327  func (p produceResponsePartitionV7) writeTo(wb *writeBuffer) {
   328  	wb.writeInt32(p.Partition)
   329  	wb.writeInt16(p.ErrorCode)
   330  	wb.writeInt64(p.Offset)
   331  	wb.writeInt64(p.Timestamp)
   332  	wb.writeInt64(p.StartOffset)
   333  }
   334  
   335  func (p *produceResponsePartitionV7) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
   336  	if remain, err = readInt32(r, sz, &p.Partition); err != nil {
   337  		return
   338  	}
   339  	if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil {
   340  		return
   341  	}
   342  	if remain, err = readInt64(r, remain, &p.Offset); err != nil {
   343  		return
   344  	}
   345  	if remain, err = readInt64(r, remain, &p.Timestamp); err != nil {
   346  		return
   347  	}
   348  	if remain, err = readInt64(r, remain, &p.StartOffset); err != nil {
   349  		return
   350  	}
   351  	return
   352  }