github.com/rudderlabs/rudder-go-kit@v0.30.0/kafkaclient/consumer.go (about)

     1  package client
     2  
     3  import (
     4  	"context"
     5  	"time"
     6  
     7  	"github.com/segmentio/kafka-go"
     8  )
     9  
    10  type ConsumerStartOffset int64
    11  
    12  const (
    13  	// LastOffset is the most recent offset available for a partition
    14  	LastOffset ConsumerStartOffset = iota
    15  	// FirstOffset is the least recent offset available for a partition
    16  	FirstOffset
    17  )
    18  
    19  type ConsumerConfig struct {
    20  	GroupID             string
    21  	Partition           int
    22  	StartOffset         ConsumerStartOffset
    23  	CommitInterval      time.Duration
    24  	FetchBatchesMaxWait time.Duration
    25  	Logger              Logger
    26  	ErrorLogger         Logger
    27  }
    28  
    29  // Consumer provides a high-level API for reading messages from Kafka
    30  type Consumer struct {
    31  	reader *kafka.Reader
    32  }
    33  
    34  // NewConsumer instantiates a new consumer.
    35  func (c *Client) NewConsumer(topic string, conf ConsumerConfig) *Consumer { // skipcq: CRT-P0003
    36  	var readerConf kafka.ReaderConfig
    37  
    38  	readerConf.Brokers = c.addresses
    39  	readerConf.Topic = topic
    40  	readerConf.Dialer = c.dialer
    41  
    42  	readerConf.GroupID = conf.GroupID
    43  	readerConf.Partition = conf.Partition
    44  	readerConf.CommitInterval = conf.CommitInterval
    45  	readerConf.MaxWait = conf.FetchBatchesMaxWait
    46  	readerConf.StartOffset = kafka.FirstOffset
    47  	if conf.StartOffset == LastOffset {
    48  		readerConf.StartOffset = kafka.LastOffset
    49  	}
    50  
    51  	readerConf.Logger = conf.Logger
    52  	readerConf.ErrorLogger = conf.ErrorLogger
    53  
    54  	return &Consumer{
    55  		reader: kafka.NewReader(readerConf),
    56  	}
    57  }
    58  
    59  // Close tries to close the consumer, but it will return sooner if the context is canceled.
    60  // A routine in background will still try to close the producer since the underlying library does not support
    61  // contexts on Close().
    62  func (c *Consumer) Close(ctx context.Context) error {
    63  	done := make(chan error, 1)
    64  	go func() {
    65  		done <- c.reader.Close()
    66  	}()
    67  
    68  	select {
    69  	case <-ctx.Done():
    70  		return ctx.Err()
    71  	case err := <-done:
    72  		return err
    73  	}
    74  }
    75  
    76  // Receive reads and returns the next message from the consumer.
    77  // The method blocks until a message becomes available, or an error occurs.
    78  // The program may also specify a context to asynchronously cancel the blocking operation.
    79  func (c *Consumer) Receive(ctx context.Context) (Message, error) {
    80  	msg, err := c.reader.ReadMessage(ctx)
    81  	if err != nil {
    82  		return Message{}, err
    83  	}
    84  
    85  	var headers []MessageHeader
    86  	if l := len(msg.Headers); l > 0 {
    87  		headers = make([]MessageHeader, l)
    88  		for i := range msg.Headers {
    89  			headers[i] = MessageHeader{
    90  				Key:   msg.Headers[i].Key,
    91  				Value: msg.Headers[i].Value,
    92  			}
    93  		}
    94  	}
    95  
    96  	return Message{
    97  		Key:       msg.Key,
    98  		Value:     msg.Value,
    99  		Topic:     msg.Topic,
   100  		Partition: int32(msg.Partition),
   101  		Offset:    msg.Offset,
   102  		Headers:   headers,
   103  		Timestamp: msg.Time,
   104  	}, nil
   105  }
   106  
   107  func (c *Consumer) Ack(ctx context.Context, msgs ...Message) error {
   108  	internalMsgs := make([]kafka.Message, 0, len(msgs))
   109  	for _, msg := range msgs {
   110  		internalMsgs = append(internalMsgs, kafka.Message{
   111  			Topic:     msg.Topic,
   112  			Partition: int(msg.Partition),
   113  			Offset:    msg.Offset,
   114  		})
   115  	}
   116  	return c.reader.CommitMessages(ctx, internalMsgs...)
   117  }