github.com/aristanetworks/goarista@v0.0.0-20240514173732-cca2755bbd44/kafka/producer/producer.go (about)

     1  // Copyright (c) 2016 Arista Networks, Inc.
     2  // Use of this source code is governed by the Apache License 2.0
     3  // that can be found in the COPYING file.
     4  
     5  package producer
     6  
     7  import (
     8  	"os"
     9  	"sync"
    10  
    11  	"github.com/aristanetworks/goarista/kafka"
    12  	"github.com/aristanetworks/goarista/kafka/gnmi"
    13  
    14  	"github.com/IBM/sarama"
    15  	"github.com/aristanetworks/glog"
    16  	"google.golang.org/protobuf/proto"
    17  )
    18  
    19  // Producer forwards messages recvd on a channel to kafka.
    20  type Producer interface {
    21  	Start()
    22  	Write(proto.Message)
    23  	Stop()
    24  }
    25  
    26  type producer struct {
    27  	notifsChan    chan proto.Message
    28  	kafkaProducer sarama.AsyncProducer
    29  	encoder       kafka.MessageEncoder
    30  	done          chan struct{}
    31  	wg            sync.WaitGroup
    32  }
    33  
    34  // New creates new Kafka producer
    35  func New(encoder kafka.MessageEncoder,
    36  	kafkaAddresses []string, kafkaConfig *sarama.Config) (Producer, error) {
    37  
    38  	if kafkaConfig == nil {
    39  		kafkaConfig := sarama.NewConfig()
    40  		hostname, err := os.Hostname()
    41  		if err != nil {
    42  			hostname = ""
    43  		}
    44  		kafkaConfig.ClientID = hostname
    45  		kafkaConfig.Producer.Compression = sarama.CompressionSnappy
    46  		kafkaConfig.Producer.Return.Successes = true
    47  		kafkaConfig.Producer.RequiredAcks = sarama.WaitForAll
    48  	}
    49  
    50  	kafkaProducer, err := sarama.NewAsyncProducer(kafkaAddresses, kafkaConfig)
    51  	if err != nil {
    52  		return nil, err
    53  	}
    54  
    55  	p := &producer{
    56  		notifsChan:    make(chan proto.Message),
    57  		kafkaProducer: kafkaProducer,
    58  		encoder:       encoder,
    59  		done:          make(chan struct{}),
    60  		wg:            sync.WaitGroup{},
    61  	}
    62  	return p, nil
    63  }
    64  
    65  // Start makes producer to start processing writes.
    66  // This method is non-blocking.
    67  func (p *producer) Start() {
    68  	p.wg.Add(3)
    69  	go p.handleSuccesses()
    70  	go p.handleErrors()
    71  	go p.run()
    72  }
    73  
    74  func (p *producer) run() {
    75  	defer p.wg.Done()
    76  	for {
    77  		select {
    78  		case batch, open := <-p.notifsChan:
    79  			if !open {
    80  				return
    81  			}
    82  			err := p.produceNotifications(batch)
    83  			if err != nil {
    84  				if _, ok := err.(gnmi.UnhandledSubscribeResponseError); !ok {
    85  					panic(err)
    86  				}
    87  			}
    88  		case <-p.done:
    89  			return
    90  		}
    91  	}
    92  }
    93  
    94  func (p *producer) Write(msg proto.Message) {
    95  	select {
    96  	case p.notifsChan <- msg:
    97  	case <-p.done:
    98  		// TODO: This should probably return an EOF error, but that
    99  		// would change the API
   100  	}
   101  }
   102  
   103  func (p *producer) Stop() {
   104  	close(p.done)
   105  	p.wg.Wait()
   106  	p.kafkaProducer.Close()
   107  }
   108  
   109  func (p *producer) produceNotifications(protoMessage proto.Message) error {
   110  	messages, err := p.encoder.Encode(protoMessage)
   111  	if err != nil {
   112  		return err
   113  	}
   114  	for _, m := range messages {
   115  		select {
   116  		case <-p.done:
   117  			return nil
   118  		case p.kafkaProducer.Input() <- m:
   119  			glog.V(9).Infof("Message produced to Kafka: %v", m)
   120  		}
   121  	}
   122  	return nil
   123  }
   124  
   125  // handleSuccesses reads from the producer's successes channel and collects some
   126  // information for monitoring
   127  func (p *producer) handleSuccesses() {
   128  	defer p.wg.Done()
   129  	for {
   130  		select {
   131  		case msg, open := <-p.kafkaProducer.Successes():
   132  			if !open {
   133  				return
   134  			}
   135  			p.encoder.HandleSuccess(msg)
   136  		case <-p.done:
   137  			return
   138  		}
   139  	}
   140  }
   141  
   142  // handleErrors reads from the producer's errors channel and collects some information
   143  // for monitoring
   144  func (p *producer) handleErrors() {
   145  	defer p.wg.Done()
   146  	for {
   147  		select {
   148  		case msg, open := <-p.kafkaProducer.Errors():
   149  			if !open {
   150  				return
   151  			}
   152  			p.encoder.HandleError(msg)
   153  		case <-p.done:
   154  			return
   155  		}
   156  	}
   157  }