github.com/Goboolean/common@v0.0.0-20231130153141-cb54596b217d/pkg/kafka/producer.go (about)

     1  package kafka
     2  
     3  import (
     4  	"context"
     5  	"sync"
     6  	"time"
     7  
     8  	"github.com/Goboolean/common/pkg/resolver"
     9  	"github.com/confluentinc/confluent-kafka-go/kafka"
    10  	"github.com/confluentinc/confluent-kafka-go/schemaregistry"
    11  	"github.com/confluentinc/confluent-kafka-go/schemaregistry/serde"
    12  	"github.com/confluentinc/confluent-kafka-go/schemaregistry/serde/protobuf"
    13  	"google.golang.org/protobuf/proto"
    14  	log "github.com/sirupsen/logrus"
    15  )
    16  
    17  
    18  
    19  type Serializer interface {
    20  	Serialize(topic string, v interface{}) ([]byte, error)
    21  }
    22  
    23  type ProtoSerializer struct {}
    24  func (s *ProtoSerializer) Serialize(topic string, v interface{}) ([]byte, error) {
    25  	return proto.Marshal(v.(proto.Message))
    26  }
    27  
    28  func defaultSerializer() Serializer {
    29  	return &ProtoSerializer{}
    30  }
    31  
    32  
    33  
    34  type Producer struct {
    35  	producer *kafka.Producer
    36  	serial   Serializer
    37  	registry schemaregistry.Client
    38  
    39  	wg     sync.WaitGroup
    40  	ctx    context.Context
    41  	cancel context.CancelFunc
    42  }
    43  
    44  // example:
    45  // p, err := NewProducer(&resolver.ConfigMap{
    46  //   "BOOTSTRAP_HOST": os.Getenv("KAFKA_BOOTSTRAP_HOST"),
    47  //   "REGISTRY_HOST":  os.Getenv("KAFKA_REGISTRY_HOST"), // optional
    48  // })
    49  func NewProducer(c *resolver.ConfigMap) (*Producer, error) {
    50  
    51  	bootstrap_host, err := c.GetStringKey("BOOTSTRAP_HOST")
    52  	if err != nil {
    53  		return nil, err
    54  	}
    55  
    56  	registry_host, exists, err := c.GetStringKeyOptional("REGISTRY_HOST")
    57  	if err != nil {
    58  		return nil, err
    59  	}
    60  
    61  	p, err := kafka.NewProducer(&kafka.ConfigMap{
    62  		"bootstrap.servers":   bootstrap_host,
    63  		"acks":                -1,
    64  		"go.delivery.reports": true,
    65  	})
    66  
    67  	ctx, cancel := context.WithCancel(context.Background())
    68  
    69  	instance := &Producer{
    70  		producer: p,
    71  		wg: sync.WaitGroup{},
    72  		ctx: ctx,
    73  		cancel: cancel,
    74  	}
    75  
    76  	if exists {
    77  		r, err := schemaregistry.NewClient(schemaregistry.NewConfig(registry_host))
    78  		if err != nil {
    79  			return nil, err
    80  		}		
    81  
    82  		s, err := protobuf.NewSerializer(r, serde.ValueSerde, protobuf.NewSerializerConfig())
    83  		if err != nil {
    84  			return nil, err
    85  		}
    86  		instance.serial = s
    87  
    88  	} else {
    89  		instance.serial = defaultSerializer()
    90  	}
    91  
    92  	instance.traceEvent(ctx, &instance.wg)
    93  	return instance, nil
    94  }
    95  
    96  
    97  // The schema argument can be provided by protobuf generated struct,
    98  // initialized with default values.
    99  // This function returns the ID of the schema in the registry.
   100  // TODO: implement a returning ID logic.
   101  func (p *Producer) Register(topic string, schema proto.Message) (int64, error) {
   102  	_, err := p.serial.Serialize(topic, schema)
   103  	return 0, err
   104  }
   105  
   106  
   107  func (p *Producer) Produce(topic string, msg proto.Message) error {
   108  	payload, err := p.serial.Serialize(topic, msg)
   109  	if err != nil {
   110  		return err
   111  	}
   112  
   113  	if err = p.producer.Produce(&kafka.Message{
   114  		TopicPartition: kafka.TopicPartition{Topic: &topic},
   115  		Value:          payload,
   116  	}, nil); err != nil {
   117  		return err
   118  	}
   119  
   120  	return nil
   121  }
   122  
   123  
   124  func (p *Producer) Flush(ctx context.Context) (int, error) {
   125  
   126  	deadline, ok := ctx.Deadline()
   127  	if !ok {
   128  		return 0, ErrDeadlineSettingRequired
   129  	}
   130  
   131  	left := p.producer.Flush(int(time.Until(deadline).Milliseconds()))
   132  	if left != 0 {
   133  		return left, ErrFailedToFlush
   134  	}
   135  
   136  	return 0, nil
   137  }
   138  
   139  
   140  func (p *Producer) traceEvent(ctx context.Context, wg *sync.WaitGroup) {
   141  
   142  	go func() {
   143  		wg.Add(1)
   144  		defer wg.Done()
   145  
   146  		for e := range p.producer.Events() {
   147  			switch ev := e.(type) {
   148  			case *kafka.Message:
   149  				if ev.TopicPartition.Error != nil {
   150  					log.WithFields(log.Fields{
   151  						"topic": *ev.TopicPartition.Topic,
   152  						"data":  ev.Value,
   153  						"error":  ev.TopicPartition.Error,
   154  					}).Error("Producer failed to deliver event to kafka")
   155  				} else {
   156  					log.WithFields(log.Fields{
   157  						"topic": *ev.TopicPartition.Topic,
   158  						"data":  ev.Value,
   159  						"partition":  ev.TopicPartition.Partition,
   160  						"offset": ev.TopicPartition.Offset,
   161  					}).Trace("Producer delivered event to kafka")
   162  				}
   163  			}
   164  		}
   165  	}()
   166  }
   167  
   168  
   169  func (p *Producer) Close() {
   170  	p.producer.Close()
   171  	p.cancel()
   172  	p.wg.Wait()
   173  }
   174  
   175  
   176  func (p *Producer) Ping(ctx context.Context) error {
   177  	// It requires ctx to be deadline set, otherwise it will return error
   178  	// It will return error if there is no response within deadline
   179  	deadline, ok := ctx.Deadline()
   180  	if !ok {
   181  		return ErrDeadlineSettingRequired
   182  	}
   183  
   184  	remaining := time.Until(deadline)
   185  	_, err := p.producer.GetMetadata(nil, true, int(remaining.Milliseconds()))
   186  	if err != nil {
   187  		return err
   188  	}
   189  
   190  	if p.registry != nil {
   191  		_, err := p.registry.GetAllVersions("schema")
   192  		if err != nil {
   193  			return err
   194  		}
   195  	}
   196  
   197  	return nil
   198  }