github.com/confluentinc/confluent-kafka-go@v1.9.2/kafka/producer.go (about)

     1  package kafka
     2  
     3  /**
     4   * Copyright 2016 Confluent Inc.
     5   *
     6   * Licensed under the Apache License, Version 2.0 (the "License");
     7   * you may not use this file except in compliance with the License.
     8   * You may obtain a copy of the License at
     9   *
    10   * http://www.apache.org/licenses/LICENSE-2.0
    11   *
    12   * Unless required by applicable law or agreed to in writing, software
    13   * distributed under the License is distributed on an "AS IS" BASIS,
    14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    15   * See the License for the specific language governing permissions and
    16   * limitations under the License.
    17   */
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math"
    23  	"time"
    24  	"unsafe"
    25  )
    26  
    27  /*
    28  #include <stdlib.h>
    29  #include "select_rdkafka.h"
    30  #include "glue_rdkafka.h"
    31  
    32  
    33  #ifdef RD_KAFKA_V_HEADERS
    34  // Convert tmphdrs to chdrs (created by this function).
    35  // If tmphdr.size == -1: value is considered Null
    36  //    tmphdr.size == 0:  value is considered empty (ignored)
    37  //    tmphdr.size > 0:   value is considered non-empty
    38  //
    39  // WARNING: The header keys and values will be freed by this function.
    40  void tmphdrs_to_chdrs (tmphdr_t *tmphdrs, size_t tmphdrsCnt,
    41                         rd_kafka_headers_t **chdrs) {
    42     size_t i;
    43  
    44     *chdrs = rd_kafka_headers_new(tmphdrsCnt);
    45  
    46     for (i = 0 ; i < tmphdrsCnt ; i++) {
    47        rd_kafka_header_add(*chdrs,
    48                            tmphdrs[i].key, -1,
    49                            tmphdrs[i].size == -1 ? NULL :
    50                            (tmphdrs[i].size == 0 ? "" : tmphdrs[i].val),
    51                            tmphdrs[i].size == -1 ? 0 : tmphdrs[i].size);
    52        if (tmphdrs[i].size > 0)
    53           free((void *)tmphdrs[i].val);
    54        free((void *)tmphdrs[i].key);
    55     }
    56  }
    57  
    58  #else
    59  void free_tmphdrs (tmphdr_t *tmphdrs, size_t tmphdrsCnt) {
    60     size_t i;
    61     for (i = 0 ; i < tmphdrsCnt ; i++) {
    62        if (tmphdrs[i].size > 0)
    63           free((void *)tmphdrs[i].val);
    64        free((void *)tmphdrs[i].key);
    65     }
    66  }
    67  #endif
    68  
    69  
    70  rd_kafka_resp_err_t do_produce (rd_kafka_t *rk,
    71            rd_kafka_topic_t *rkt, int32_t partition,
    72            int msgflags,
    73            int valIsNull, void *val, size_t val_len,
    74            int keyIsNull, void *key, size_t key_len,
    75            int64_t timestamp,
    76            tmphdr_t *tmphdrs, size_t tmphdrsCnt,
    77            uintptr_t cgoid) {
    78    void *valp = valIsNull ? NULL : val;
    79    void *keyp = keyIsNull ? NULL : key;
    80  #ifdef RD_KAFKA_V_TIMESTAMP
    81  rd_kafka_resp_err_t err;
    82  #ifdef RD_KAFKA_V_HEADERS
    83    rd_kafka_headers_t *hdrs = NULL;
    84  #endif
    85  #endif
    86  
    87  
    88    if (tmphdrsCnt > 0) {
    89  #ifdef RD_KAFKA_V_HEADERS
    90       tmphdrs_to_chdrs(tmphdrs, tmphdrsCnt, &hdrs);
    91  #else
    92       free_tmphdrs(tmphdrs, tmphdrsCnt);
    93       return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
    94  #endif
    95    }
    96  
    97  
    98  #ifdef RD_KAFKA_V_TIMESTAMP
    99    err = rd_kafka_producev(rk,
   100          RD_KAFKA_V_RKT(rkt),
   101          RD_KAFKA_V_PARTITION(partition),
   102          RD_KAFKA_V_MSGFLAGS(msgflags),
   103          RD_KAFKA_V_VALUE(valp, val_len),
   104          RD_KAFKA_V_KEY(keyp, key_len),
   105          RD_KAFKA_V_TIMESTAMP(timestamp),
   106  #ifdef RD_KAFKA_V_HEADERS
   107          RD_KAFKA_V_HEADERS(hdrs),
   108  #endif
   109          RD_KAFKA_V_OPAQUE((void *)cgoid),
   110          RD_KAFKA_V_END);
   111  #ifdef RD_KAFKA_V_HEADERS
   112    if (err && hdrs)
   113      rd_kafka_headers_destroy(hdrs);
   114  #endif
   115    return err;
   116  #else
   117    if (timestamp)
   118        return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
   119    if (rd_kafka_produce(rkt, partition, msgflags,
   120                         valp, val_len,
   121                         keyp, key_len,
   122                         (void *)cgoid) == -1)
   123        return rd_kafka_last_error();
   124    else
   125        return RD_KAFKA_RESP_ERR_NO_ERROR;
   126  #endif
   127  }
   128  */
   129  import "C"
   130  
   131  // Producer implements a High-level Apache Kafka Producer instance
   132  type Producer struct {
   133  	events         chan Event
   134  	produceChannel chan *Message
   135  	handle         handle
   136  
   137  	// Terminates the poller() goroutine
   138  	pollerTermChan chan bool
   139  }
   140  
   141  // String returns a human readable name for a Producer instance
   142  func (p *Producer) String() string {
   143  	return p.handle.String()
   144  }
   145  
   146  // get_handle implements the Handle interface
   147  func (p *Producer) gethandle() *handle {
   148  	return &p.handle
   149  }
   150  
   151  func (p *Producer) produce(msg *Message, msgFlags int, deliveryChan chan Event) error {
   152  	if msg == nil || msg.TopicPartition.Topic == nil || len(*msg.TopicPartition.Topic) == 0 {
   153  		return newErrorFromString(ErrInvalidArg, "")
   154  	}
   155  
   156  	crkt := p.handle.getRkt(*msg.TopicPartition.Topic)
   157  
   158  	// Three problems:
   159  	//  1) There's a difference between an empty Value or Key (length 0, proper pointer) and
   160  	//     a null Value or Key (length 0, null pointer).
   161  	//  2) we need to be able to send a null Value or Key, but the unsafe.Pointer(&slice[0])
   162  	//     dereference can't be performed on a nil slice.
   163  	//  3) cgo's pointer checking requires the unsafe.Pointer(slice..) call to be made
   164  	//     in the call to the C function.
   165  	//
   166  	// Solution:
   167  	//  Keep track of whether the Value or Key were nil (1), but let the valp and keyp pointers
   168  	//  point to a 1-byte slice (but the length to send is still 0) so that the dereference (2)
   169  	//  works.
   170  	//  Then perform the unsafe.Pointer() on the valp and keyp pointers (which now either point
   171  	//  to the original msg.Value and msg.Key or to the 1-byte slices) in the call to C (3).
   172  	//
   173  	var valp []byte
   174  	var keyp []byte
   175  	oneByte := []byte{0}
   176  	var valIsNull C.int
   177  	var keyIsNull C.int
   178  	var valLen int
   179  	var keyLen int
   180  
   181  	if msg.Value == nil {
   182  		valIsNull = 1
   183  		valLen = 0
   184  		valp = oneByte
   185  	} else {
   186  		valLen = len(msg.Value)
   187  		if valLen > 0 {
   188  			valp = msg.Value
   189  		} else {
   190  			valp = oneByte
   191  		}
   192  	}
   193  
   194  	if msg.Key == nil {
   195  		keyIsNull = 1
   196  		keyLen = 0
   197  		keyp = oneByte
   198  	} else {
   199  		keyLen = len(msg.Key)
   200  		if keyLen > 0 {
   201  			keyp = msg.Key
   202  		} else {
   203  			keyp = oneByte
   204  		}
   205  	}
   206  
   207  	var cgoid int
   208  
   209  	// Per-message state that needs to be retained through the C code:
   210  	//   delivery channel (if specified)
   211  	//   message opaque   (if specified)
   212  	// Since these cant be passed as opaque pointers to the C code,
   213  	// due to cgo constraints, we add them to a per-producer map for lookup
   214  	// when the C code triggers the callbacks or events.
   215  	if deliveryChan != nil || msg.Opaque != nil {
   216  		cgoid = p.handle.cgoPut(cgoDr{deliveryChan: deliveryChan, opaque: msg.Opaque})
   217  	}
   218  
   219  	var timestamp int64
   220  	if !msg.Timestamp.IsZero() {
   221  		timestamp = msg.Timestamp.UnixNano() / 1000000
   222  	}
   223  
   224  	// Convert headers to C-friendly tmphdrs
   225  	var tmphdrs []C.tmphdr_t
   226  	tmphdrsCnt := len(msg.Headers)
   227  
   228  	if tmphdrsCnt > 0 {
   229  		tmphdrs = make([]C.tmphdr_t, tmphdrsCnt)
   230  
   231  		for n, hdr := range msg.Headers {
   232  			// Make a copy of the key
   233  			// to avoid runtime panic with
   234  			// foreign Go pointers in cgo.
   235  			tmphdrs[n].key = C.CString(hdr.Key)
   236  			if hdr.Value != nil {
   237  				tmphdrs[n].size = C.ssize_t(len(hdr.Value))
   238  				if tmphdrs[n].size > 0 {
   239  					// Make a copy of the value
   240  					// to avoid runtime panic with
   241  					// foreign Go pointers in cgo.
   242  					tmphdrs[n].val = C.CBytes(hdr.Value)
   243  				}
   244  			} else {
   245  				// null value
   246  				tmphdrs[n].size = C.ssize_t(-1)
   247  			}
   248  		}
   249  	} else {
   250  		// no headers, need a dummy tmphdrs of size 1 to avoid index
   251  		// out of bounds panic in do_produce() call below.
   252  		// tmphdrsCnt will be 0.
   253  		tmphdrs = []C.tmphdr_t{{nil, nil, 0}}
   254  	}
   255  
   256  	cErr := C.do_produce(p.handle.rk, crkt,
   257  		C.int32_t(msg.TopicPartition.Partition),
   258  		C.int(msgFlags)|C.RD_KAFKA_MSG_F_COPY,
   259  		valIsNull, unsafe.Pointer(&valp[0]), C.size_t(valLen),
   260  		keyIsNull, unsafe.Pointer(&keyp[0]), C.size_t(keyLen),
   261  		C.int64_t(timestamp),
   262  		(*C.tmphdr_t)(unsafe.Pointer(&tmphdrs[0])), C.size_t(tmphdrsCnt),
   263  		(C.uintptr_t)(cgoid))
   264  	if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
   265  		if cgoid != 0 {
   266  			p.handle.cgoGet(cgoid)
   267  		}
   268  		return newError(cErr)
   269  	}
   270  
   271  	return nil
   272  }
   273  
   274  // Produce single message.
   275  // This is an asynchronous call that enqueues the message on the internal
   276  // transmit queue, thus returning immediately.
   277  // The delivery report will be sent on the provided deliveryChan if specified,
   278  // or on the Producer object's Events() channel if not.
   279  // msg.Timestamp requires librdkafka >= 0.9.4 (else returns ErrNotImplemented),
   280  // api.version.request=true, and broker >= 0.10.0.0.
   281  // msg.Headers requires librdkafka >= 0.11.4 (else returns ErrNotImplemented),
   282  // api.version.request=true, and broker >= 0.11.0.0.
   283  // Returns an error if message could not be enqueued.
   284  func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error {
   285  	return p.produce(msg, 0, deliveryChan)
   286  }
   287  
   288  // Produce a batch of messages.
   289  // These batches do not relate to the message batches sent to the broker, the latter
   290  // are collected on the fly internally in librdkafka.
   291  // WARNING: This is an experimental API.
   292  // NOTE: timestamps and headers are not supported with this API.
   293  func (p *Producer) produceBatch(topic string, msgs []*Message, msgFlags int) error {
   294  	crkt := p.handle.getRkt(topic)
   295  
   296  	cmsgs := make([]C.rd_kafka_message_t, len(msgs))
   297  	for i, m := range msgs {
   298  		p.handle.messageToC(m, &cmsgs[i])
   299  	}
   300  	r := C.rd_kafka_produce_batch(crkt, C.RD_KAFKA_PARTITION_UA, C.int(msgFlags)|C.RD_KAFKA_MSG_F_FREE,
   301  		(*C.rd_kafka_message_t)(&cmsgs[0]), C.int(len(msgs)))
   302  	if r == -1 {
   303  		return newError(C.rd_kafka_last_error())
   304  	}
   305  
   306  	return nil
   307  }
   308  
   309  // Events returns the Events channel (read)
   310  func (p *Producer) Events() chan Event {
   311  	return p.events
   312  }
   313  
   314  // Logs returns the Log channel (if enabled), else nil
   315  func (p *Producer) Logs() chan LogEvent {
   316  	return p.handle.logs
   317  }
   318  
   319  // ProduceChannel returns the produce *Message channel (write)
   320  func (p *Producer) ProduceChannel() chan *Message {
   321  	return p.produceChannel
   322  }
   323  
   324  // Len returns the number of messages and requests waiting to be transmitted to the broker
   325  // as well as delivery reports queued for the application.
   326  // Includes messages on ProduceChannel.
   327  func (p *Producer) Len() int {
   328  	return len(p.produceChannel) + len(p.events) + int(C.rd_kafka_outq_len(p.handle.rk))
   329  }
   330  
   331  // Flush and wait for outstanding messages and requests to complete delivery.
   332  // Includes messages on ProduceChannel.
   333  // Runs until value reaches zero or on timeoutMs.
   334  // Returns the number of outstanding events still un-flushed.
   335  func (p *Producer) Flush(timeoutMs int) int {
   336  	termChan := make(chan bool) // unused stand-in termChan
   337  
   338  	d, _ := time.ParseDuration(fmt.Sprintf("%dms", timeoutMs))
   339  	tEnd := time.Now().Add(d)
   340  	for p.Len() > 0 {
   341  		remain := tEnd.Sub(time.Now()).Seconds()
   342  		if remain <= 0.0 {
   343  			return p.Len()
   344  		}
   345  
   346  		p.handle.eventPoll(p.events,
   347  			int(math.Min(100, remain*1000)), 1000, termChan)
   348  	}
   349  
   350  	return 0
   351  }
   352  
   353  // Close a Producer instance.
   354  // The Producer object or its channels are no longer usable after this call.
   355  func (p *Producer) Close() {
   356  	// Wait for poller() (signaled by closing pollerTermChan)
   357  	// and channel_producer() (signaled by closing ProduceChannel)
   358  	close(p.pollerTermChan)
   359  	close(p.produceChannel)
   360  	p.handle.waitGroup.Wait()
   361  
   362  	close(p.events)
   363  
   364  	p.handle.cleanup()
   365  
   366  	C.rd_kafka_destroy(p.handle.rk)
   367  }
   368  
   369  const (
   370  	// PurgeInFlight purges messages in-flight to or from the broker.
   371  	// Purging these messages will void any future acknowledgements from the
   372  	// broker, making it impossible for the application to know if these
   373  	// messages were successfully delivered or not.
   374  	// Retrying these messages may lead to duplicates.
   375  	PurgeInFlight = int(C.RD_KAFKA_PURGE_F_INFLIGHT)
   376  
   377  	// PurgeQueue Purge messages in internal queues.
   378  	PurgeQueue = int(C.RD_KAFKA_PURGE_F_QUEUE)
   379  
   380  	// PurgeNonBlocking Don't wait for background thread queue purging to finish.
   381  	PurgeNonBlocking = int(C.RD_KAFKA_PURGE_F_NON_BLOCKING)
   382  )
   383  
   384  // Purge messages currently handled by this producer instance.
   385  //
   386  // flags is a combination of PurgeQueue, PurgeInFlight and PurgeNonBlocking.
   387  //
   388  // The application will need to call Poll(), Flush() or read the Events() channel
   389  // after this call to serve delivery reports for the purged messages.
   390  //
   391  // Messages purged from internal queues fail with the delivery report
   392  // error code set to ErrPurgeQueue, while purged messages that
   393  // are in-flight to or from the broker will fail with the error code set to
   394  // ErrPurgeInflight.
   395  //
   396  // Warning: Purging messages that are in-flight to or from the broker
   397  // will ignore any sub-sequent acknowledgement for these messages
   398  // received from the broker, effectively making it impossible
   399  // for the application to know if the messages were successfully
   400  // produced or not. This may result in duplicate messages if the
   401  // application retries these messages at a later time.
   402  //
   403  // Note: This call may block for a short time while background thread
   404  // queues are purged.
   405  //
   406  // Returns nil on success, ErrInvalidArg if the purge flags are invalid or unknown.
   407  func (p *Producer) Purge(flags int) error {
   408  	cErr := C.rd_kafka_purge(p.handle.rk, C.int(flags))
   409  	if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
   410  		return newError(cErr)
   411  	}
   412  
   413  	return nil
   414  }
   415  
   416  // NewProducer creates a new high-level Producer instance.
   417  //
   418  // conf is a *ConfigMap with standard librdkafka configuration properties.
   419  //
   420  // Supported special configuration properties (type, default):
   421  //   go.batch.producer (bool, false) - EXPERIMENTAL: Enable batch producer (for increased performance).
   422  //                                     These batches do not relate to Kafka message batches in any way.
   423  //                                     Note: timestamps and headers are not supported with this interface.
   424  //   go.delivery.reports (bool, true) - Forward per-message delivery reports to the
   425  //                                      Events() channel.
   426  //   go.delivery.report.fields (string, "key,value") - Comma separated list of fields to enable for delivery reports.
   427  //                                       Allowed values: all, none (or empty string), key, value, headers
   428  //                                       Warning: There is a performance penalty to include headers in the delivery report.
   429  //   go.events.channel.size (int, 1000000) - Events().
   430  //   go.produce.channel.size (int, 1000000) - ProduceChannel() buffer size (in number of messages)
   431  //   go.logs.channel.enable (bool, false) - Forward log to Logs() channel.
   432  //   go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true.
   433  //
   434  func NewProducer(conf *ConfigMap) (*Producer, error) {
   435  
   436  	err := versionCheck()
   437  	if err != nil {
   438  		return nil, err
   439  	}
   440  
   441  	p := &Producer{}
   442  
   443  	// before we do anything with the configuration, create a copy such that
   444  	// the original is not mutated.
   445  	confCopy := conf.clone()
   446  
   447  	v, err := confCopy.extract("delivery.report.only.error", false)
   448  	if v == true {
   449  		// FIXME: The filtering of successful DRs must be done in
   450  		//        the Go client to avoid cgoDr memory leaks.
   451  		return nil, newErrorFromString(ErrUnsupportedFeature,
   452  			"delivery.report.only.error=true is not currently supported by the Go client")
   453  	}
   454  
   455  	v, err = confCopy.extract("go.batch.producer", false)
   456  	if err != nil {
   457  		return nil, err
   458  	}
   459  	batchProducer := v.(bool)
   460  
   461  	v, err = confCopy.extract("go.delivery.reports", true)
   462  	if err != nil {
   463  		return nil, err
   464  	}
   465  	p.handle.fwdDr = v.(bool)
   466  
   467  	v, err = confCopy.extract("go.delivery.report.fields", "key,value")
   468  	if err != nil {
   469  		return nil, err
   470  	}
   471  
   472  	p.handle.msgFields, err = newMessageFieldsFrom(v)
   473  	if err != nil {
   474  		return nil, err
   475  	}
   476  
   477  	v, err = confCopy.extract("go.events.channel.size", 1000000)
   478  	if err != nil {
   479  		return nil, err
   480  	}
   481  	eventsChanSize := v.(int)
   482  
   483  	v, err = confCopy.extract("go.produce.channel.size", 1000000)
   484  	if err != nil {
   485  		return nil, err
   486  	}
   487  	produceChannelSize := v.(int)
   488  
   489  	logsChanEnable, logsChan, err := confCopy.extractLogConfig()
   490  	if err != nil {
   491  		return nil, err
   492  	}
   493  
   494  	if int(C.rd_kafka_version()) < 0x01000000 {
   495  		// produce.offset.report is no longer used in librdkafka >= v1.0.0
   496  		v, _ = confCopy.extract("{topic}.produce.offset.report", nil)
   497  		if v == nil {
   498  			// Enable offset reporting by default, unless overriden.
   499  			confCopy.SetKey("{topic}.produce.offset.report", true)
   500  		}
   501  	}
   502  
   503  	// Convert ConfigMap to librdkafka conf_t
   504  	cConf, err := confCopy.convert()
   505  	if err != nil {
   506  		return nil, err
   507  	}
   508  
   509  	cErrstr := (*C.char)(C.malloc(C.size_t(256)))
   510  	defer C.free(unsafe.Pointer(cErrstr))
   511  
   512  	C.rd_kafka_conf_set_events(cConf, C.RD_KAFKA_EVENT_DR|C.RD_KAFKA_EVENT_STATS|C.RD_KAFKA_EVENT_ERROR|C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH)
   513  
   514  	// Create librdkafka producer instance
   515  	p.handle.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256)
   516  	if p.handle.rk == nil {
   517  		return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr)
   518  	}
   519  
   520  	p.handle.p = p
   521  	p.handle.setup()
   522  	p.handle.rkq = C.rd_kafka_queue_get_main(p.handle.rk)
   523  	p.events = make(chan Event, eventsChanSize)
   524  	p.produceChannel = make(chan *Message, produceChannelSize)
   525  	p.pollerTermChan = make(chan bool)
   526  
   527  	if logsChanEnable {
   528  		p.handle.setupLogQueue(logsChan, p.pollerTermChan)
   529  	}
   530  
   531  	p.handle.waitGroup.Add(1)
   532  	go func() {
   533  		poller(p, p.pollerTermChan)
   534  		p.handle.waitGroup.Done()
   535  	}()
   536  
   537  	// non-batch or batch producer, only one must be used
   538  	var producer func(*Producer)
   539  	if batchProducer {
   540  		producer = channelBatchProducer
   541  	} else {
   542  		producer = channelProducer
   543  	}
   544  
   545  	p.handle.waitGroup.Add(1)
   546  	go func() {
   547  		producer(p)
   548  		p.handle.waitGroup.Done()
   549  	}()
   550  
   551  	return p, nil
   552  }
   553  
   554  // channel_producer serves the ProduceChannel channel
   555  func channelProducer(p *Producer) {
   556  	for m := range p.produceChannel {
   557  		err := p.produce(m, C.RD_KAFKA_MSG_F_BLOCK, nil)
   558  		if err != nil {
   559  			m.TopicPartition.Error = err
   560  			p.events <- m
   561  		}
   562  	}
   563  }
   564  
   565  // channelBatchProducer serves the ProduceChannel channel and attempts to
   566  // improve cgo performance by using the produceBatch() interface.
   567  func channelBatchProducer(p *Producer) {
   568  	var buffered = make(map[string][]*Message)
   569  	bufferedCnt := 0
   570  	const batchSize int = 1000000
   571  	totMsgCnt := 0
   572  	totBatchCnt := 0
   573  
   574  	for m := range p.produceChannel {
   575  		buffered[*m.TopicPartition.Topic] = append(buffered[*m.TopicPartition.Topic], m)
   576  		bufferedCnt++
   577  
   578  	loop2:
   579  		for true {
   580  			select {
   581  			case m, ok := <-p.produceChannel:
   582  				if !ok {
   583  					break loop2
   584  				}
   585  				if m == nil {
   586  					panic("nil message received on ProduceChannel")
   587  				}
   588  				if m.TopicPartition.Topic == nil {
   589  					panic(fmt.Sprintf("message without Topic received on ProduceChannel: %v", m))
   590  				}
   591  				buffered[*m.TopicPartition.Topic] = append(buffered[*m.TopicPartition.Topic], m)
   592  				bufferedCnt++
   593  				if bufferedCnt >= batchSize {
   594  					break loop2
   595  				}
   596  			default:
   597  				break loop2
   598  			}
   599  		}
   600  
   601  		totBatchCnt++
   602  		totMsgCnt += len(buffered)
   603  
   604  		for topic, buffered2 := range buffered {
   605  			err := p.produceBatch(topic, buffered2, C.RD_KAFKA_MSG_F_BLOCK)
   606  			if err != nil {
   607  				for _, m = range buffered2 {
   608  					m.TopicPartition.Error = err
   609  					p.events <- m
   610  				}
   611  			}
   612  		}
   613  
   614  		buffered = make(map[string][]*Message)
   615  		bufferedCnt = 0
   616  	}
   617  }
   618  
   619  // poller polls the rd_kafka_t handle for events until signalled for termination
   620  func poller(p *Producer, termChan chan bool) {
   621  	for {
   622  		select {
   623  		case _ = <-termChan:
   624  			return
   625  
   626  		default:
   627  			_, term := p.handle.eventPoll(p.events, 100, 1000, termChan)
   628  			if term {
   629  				return
   630  			}
   631  			break
   632  		}
   633  	}
   634  }
   635  
   636  // GetMetadata queries broker for cluster and topic metadata.
   637  // If topic is non-nil only information about that topic is returned, else if
   638  // allTopics is false only information about locally used topics is returned,
   639  // else information about all topics is returned.
   640  // GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API.
   641  func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) {
   642  	return getMetadata(p, topic, allTopics, timeoutMs)
   643  }
   644  
   645  // QueryWatermarkOffsets returns the broker's low and high offsets for the given topic
   646  // and partition.
   647  func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) {
   648  	return queryWatermarkOffsets(p, topic, partition, timeoutMs)
   649  }
   650  
   651  // OffsetsForTimes looks up offsets by timestamp for the given partitions.
   652  //
   653  // The returned offset for each partition is the earliest offset whose
   654  // timestamp is greater than or equal to the given timestamp in the
   655  // corresponding partition. If the provided timestamp exceeds that of the
   656  // last message in the partition, a value of -1 will be returned.
   657  //
   658  // The timestamps to query are represented as `.Offset` in the `times`
   659  // argument and the looked up offsets are represented as `.Offset` in the returned
   660  // `offsets` list.
   661  //
   662  // The function will block for at most timeoutMs milliseconds.
   663  //
   664  // Duplicate Topic+Partitions are not supported.
   665  // Per-partition errors may be returned in the `.Error` field.
   666  func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) {
   667  	return offsetsForTimes(p, times, timeoutMs)
   668  }
   669  
   670  // GetFatalError returns an Error object if the client instance has raised a fatal error, else nil.
   671  func (p *Producer) GetFatalError() error {
   672  	return getFatalError(p)
   673  }
   674  
   675  // TestFatalError triggers a fatal error in the underlying client.
   676  // This is to be used strictly for testing purposes.
   677  func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode {
   678  	return testFatalError(p, code, str)
   679  }
   680  
   681  // SetOAuthBearerToken sets the the data to be transmitted
   682  // to a broker during SASL/OAUTHBEARER authentication. It will return nil
   683  // on success, otherwise an error if:
   684  // 1) the token data is invalid (meaning an expiration time in the past
   685  // or either a token value or an extension key or value that does not meet
   686  // the regular expression requirements as per
   687  // https://tools.ietf.org/html/rfc7628#section-3.1);
   688  // 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
   689  // 3) SASL/OAUTHBEARER is supported but is not configured as the client's
   690  // authentication mechanism.
   691  func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error {
   692  	return p.handle.setOAuthBearerToken(oauthBearerToken)
   693  }
   694  
   695  // SetOAuthBearerTokenFailure sets the error message describing why token
   696  // retrieval/setting failed; it also schedules a new token refresh event for 10
   697  // seconds later so the attempt may be retried. It will return nil on
   698  // success, otherwise an error if:
   699  // 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
   700  // 2) SASL/OAUTHBEARER is supported but is not configured as the client's
   701  // authentication mechanism.
   702  func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error {
   703  	return p.handle.setOAuthBearerTokenFailure(errstr)
   704  }
   705  
   706  // Transactional API
   707  
   708  // InitTransactions Initializes transactions for the producer instance.
   709  //
   710  // This function ensures any transactions initiated by previous instances
   711  // of the producer with the same `transactional.id` are completed.
   712  // If the previous instance failed with a transaction in progress the
   713  // previous transaction will be aborted.
   714  // This function needs to be called before any other transactional or
   715  // produce functions are called when the `transactional.id` is configured.
   716  //
   717  // If the last transaction had begun completion (following transaction commit)
   718  // but not yet finished, this function will await the previous transaction's
   719  // completion.
   720  //
   721  // When any previous transactions have been fenced this function
   722  // will acquire the internal producer id and epoch, used in all future
   723  // transactional messages issued by this producer instance.
   724  //
   725  // Upon successful return from this function the application has to perform at
   726  // least one of the following operations within `transaction.timeout.ms` to
   727  // avoid timing out the transaction on the broker:
   728  //  * `Produce()` (et.al)
   729  //  * `SendOffsetsToTransaction()`
   730  //  * `CommitTransaction()`
   731  //  * `AbortTransaction()`
   732  //
   733  // Parameters:
   734  //  * `ctx` - The maximum time to block, or nil for indefinite.
   735  //            On timeout the operation may continue in the background,
   736  //            depending on state, and it is okay to call `InitTransactions()`
   737  //            again.
   738  //
   739  // Returns nil on success or an error on failure.
   740  // Check whether the returned error object permits retrying
   741  // by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal
   742  // error has been raised by calling `err.(kafka.Error).IsFatal()`.
   743  func (p *Producer) InitTransactions(ctx context.Context) error {
   744  	cError := C.rd_kafka_init_transactions(p.handle.rk,
   745  		cTimeoutFromContext(ctx))
   746  	if cError != nil {
   747  		return newErrorFromCErrorDestroy(cError)
   748  	}
   749  
   750  	return nil
   751  }
   752  
   753  // BeginTransaction starts a new transaction.
   754  //
   755  // `InitTransactions()` must have been called successfully (once)
   756  // before this function is called.
   757  //
   758  // Any messages produced, offsets sent (`SendOffsetsToTransaction()`),
   759  // etc, after the successful return of this function will be part of
   760  // the transaction and committed or aborted atomatically.
   761  //
   762  // Finish the transaction by calling `CommitTransaction()` or
   763  // abort the transaction by calling `AbortTransaction()`.
   764  //
   765  // Returns nil on success or an error object on failure.
   766  // Check whether a fatal error has been raised by
   767  // calling `err.(kafka.Error).IsFatal()`.
   768  //
   769  // Note: With the transactional producer, `Produce()`, et.al, are only
   770  // allowed during an on-going transaction, as started with this function.
   771  // Any produce call outside an on-going transaction, or for a failed
   772  // transaction, will fail.
   773  func (p *Producer) BeginTransaction() error {
   774  	cError := C.rd_kafka_begin_transaction(p.handle.rk)
   775  	if cError != nil {
   776  		return newErrorFromCErrorDestroy(cError)
   777  	}
   778  
   779  	return nil
   780  }
   781  
   782  // SendOffsetsToTransaction sends a list of topic partition offsets to the
   783  // consumer group coordinator for `consumerMetadata`, and marks the offsets
   784  // as part part of the current transaction.
   785  // These offsets will be considered committed only if the transaction is
   786  // committed successfully.
   787  //
   788  // The offsets should be the next message your application will consume,
   789  // i.e., the last processed message's offset + 1 for each partition.
   790  // Either track the offsets manually during processing or use
   791  // `consumer.Position()` (on the consumer) to get the current offsets for
   792  // the partitions assigned to the consumer.
   793  //
   794  // Use this method at the end of a consume-transform-produce loop prior
   795  // to committing the transaction with `CommitTransaction()`.
   796  //
   797  // Parameters:
   798  //  * `ctx` - The maximum amount of time to block, or nil for indefinite.
   799  //  * `offsets` - List of offsets to commit to the consumer group upon
   800  //                successful commit of the transaction. Offsets should be
   801  //                the next message to consume, e.g., last processed message + 1.
   802  //  * `consumerMetadata` - The current consumer group metadata as returned by
   803  //                `consumer.GetConsumerGroupMetadata()` on the consumer
   804  //                instance the provided offsets were consumed from.
   805  //
   806  // Note: The consumer must disable auto commits (set `enable.auto.commit` to false on the consumer).
   807  //
   808  // Note: Logical and invalid offsets (e.g., OffsetInvalid) in
   809  // `offsets` will be ignored. If there are no valid offsets in
   810  // `offsets` the function will return nil and no action will be taken.
   811  //
   812  // Returns nil on success or an error object on failure.
   813  // Check whether the returned error object permits retrying
   814  // by calling `err.(kafka.Error).IsRetriable()`, or whether an abortable
   815  // or fatal error has been raised by calling
   816  // `err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()`
   817  // respectively.
   818  func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error {
   819  	var cOffsets *C.rd_kafka_topic_partition_list_t
   820  	if offsets != nil {
   821  		cOffsets = newCPartsFromTopicPartitions(offsets)
   822  		defer C.rd_kafka_topic_partition_list_destroy(cOffsets)
   823  	}
   824  
   825  	cgmd, err := deserializeConsumerGroupMetadata(consumerMetadata.serialized)
   826  	if err != nil {
   827  		return err
   828  	}
   829  	defer C.rd_kafka_consumer_group_metadata_destroy(cgmd)
   830  
   831  	cError := C.rd_kafka_send_offsets_to_transaction(
   832  		p.handle.rk,
   833  		cOffsets,
   834  		cgmd,
   835  		cTimeoutFromContext(ctx))
   836  	if cError != nil {
   837  		return newErrorFromCErrorDestroy(cError)
   838  	}
   839  
   840  	return nil
   841  }
   842  
   843  // CommitTransaction commits the current transaction.
   844  //
   845  // Any outstanding messages will be flushed (delivered) before actually
   846  // committing the transaction.
   847  //
   848  // If any of the outstanding messages fail permanently the current
   849  // transaction will enter the abortable error state and this
   850  // function will return an abortable error, in this case the application
   851  // must call `AbortTransaction()` before attempting a new
   852  // transaction with `BeginTransaction()`.
   853  //
   854  // Parameters:
   855  //  * `ctx` - The maximum amount of time to block, or nil for indefinite.
   856  //
   857  // Note: This function will block until all outstanding messages are
   858  // delivered and the transaction commit request has been successfully
   859  // handled by the transaction coordinator, or until the `ctx` expires,
   860  // which ever comes first. On timeout the application may
   861  // call the function again.
   862  //
   863  // Note: Will automatically call `Flush()` to ensure all queued
   864  // messages are delivered before attempting to commit the transaction.
   865  // The application MUST serve the `producer.Events()` channel for delivery
   866  // reports in a separate go-routine during this time.
   867  //
   868  // Returns nil on success or an error object on failure.
   869  // Check whether the returned error object permits retrying
   870  // by calling `err.(kafka.Error).IsRetriable()`, or whether an abortable
   871  // or fatal error has been raised by calling
   872  // `err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()`
   873  // respectively.
   874  func (p *Producer) CommitTransaction(ctx context.Context) error {
   875  	cError := C.rd_kafka_commit_transaction(p.handle.rk,
   876  		cTimeoutFromContext(ctx))
   877  	if cError != nil {
   878  		return newErrorFromCErrorDestroy(cError)
   879  	}
   880  
   881  	return nil
   882  }
   883  
   884  // AbortTransaction aborts the ongoing transaction.
   885  //
   886  // This function should also be used to recover from non-fatal abortable
   887  // transaction errors.
   888  //
   889  // Any outstanding messages will be purged and fail with
   890  // `ErrPurgeInflight` or `ErrPurgeQueue`.
   891  //
   892  // Parameters:
   893  //  * `ctx` - The maximum amount of time to block, or nil for indefinite.
   894  //
   895  // Note: This function will block until all outstanding messages are purged
   896  // and the transaction abort request has been successfully
   897  // handled by the transaction coordinator, or until the `ctx` expires,
   898  // which ever comes first. On timeout the application may
   899  // call the function again.
   900  //
   901  // Note: Will automatically call `Purge()` and `Flush()` to ensure all queued
   902  // and in-flight messages are purged before attempting to abort the transaction.
   903  // The application MUST serve the `producer.Events()` channel for delivery
   904  // reports in a separate go-routine during this time.
   905  //
   906  // Returns nil on success or an error object on failure.
   907  // Check whether the returned error object permits retrying
   908  // by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal error
   909  // has been raised by calling `err.(kafka.Error).IsFatal()`.
   910  func (p *Producer) AbortTransaction(ctx context.Context) error {
   911  	cError := C.rd_kafka_abort_transaction(p.handle.rk,
   912  		cTimeoutFromContext(ctx))
   913  	if cError != nil {
   914  		return newErrorFromCErrorDestroy(cError)
   915  	}
   916  
   917  	return nil
   918  }