github.com/moleculer-go/moleculer@v0.3.3/transit/kafka/kafka.go (about)

     1  package kafka
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"strings"
     8  	"time"
     9  
    10  	"github.com/moleculer-go/moleculer"
    11  	"github.com/moleculer-go/moleculer/serializer"
    12  	"github.com/moleculer-go/moleculer/transit"
    13  	"github.com/segmentio/kafka-go"
    14  
    15  	log "github.com/sirupsen/logrus"
    16  )
    17  
    18  var DefaultConfig = KafkaOptions{
    19  	partition: 0,
    20  }
    21  
    22  type subscriber struct {
    23  	command string
    24  	nodeID  string
    25  	handler transit.TransportHandler
    26  }
    27  
    28  type subscription struct {
    29  	doneChannel chan bool
    30  }
    31  
    32  type KafkaTransporter struct {
    33  	prefix     string
    34  	opts       *KafkaOptions
    35  	logger     *log.Entry
    36  	serializer serializer.Serializer
    37  
    38  	connectionEnable bool
    39  	nodeID           string
    40  	subscribers      []subscriber
    41  	subscriptions    []*subscription
    42  	publishers       map[string]*kafka.Writer
    43  }
    44  
    45  type KafkaOptions struct {
    46  	Url        string
    47  	Addr       string
    48  	Name       string
    49  	Logger     *log.Entry
    50  	Serializer serializer.Serializer
    51  
    52  	partition int
    53  }
    54  
    55  func mergeConfigs(baseConfig KafkaOptions, userConfig KafkaOptions) KafkaOptions {
    56  
    57  	if len(userConfig.Url) != 0 {
    58  		baseConfig.Url = userConfig.Url
    59  	}
    60  
    61  	if len(userConfig.Addr) != 0 {
    62  		baseConfig.Addr = strings.Replace(userConfig.Url, "kafka://", "", 1)
    63  	} else {
    64  		baseConfig.Addr = userConfig.Addr
    65  	}
    66  
    67  	if userConfig.Logger != nil {
    68  		baseConfig.Logger = userConfig.Logger
    69  	}
    70  
    71  	if userConfig.Logger != nil {
    72  		baseConfig.Logger = userConfig.Logger
    73  	}
    74  
    75  	if userConfig.partition != 0 {
    76  		baseConfig.partition = userConfig.partition
    77  	}
    78  
    79  	return baseConfig
    80  }
    81  
    82  func CreateKafkaTransporter(options KafkaOptions) transit.Transport {
    83  	options = mergeConfigs(DefaultConfig, options)
    84  
    85  	return &KafkaTransporter{
    86  		opts:       &options,
    87  		logger:     options.Logger,
    88  		publishers: make(map[string]*kafka.Writer),
    89  	}
    90  }
    91  
    92  func (t *KafkaTransporter) Connect() chan error {
    93  	endChan := make(chan error)
    94  	go func() {
    95  		t.logger.Debug("Kafka Connect() - url: ", t.opts.Url)
    96  
    97  		topic := t.topicName("PING", t.nodeID)
    98  		_, err := kafka.DialLeader(context.Background(), "tcp", t.opts.Addr, topic, t.opts.partition)
    99  
   100  		if err != nil {
   101  			t.logger.Error("Kafka Connect() - Error: ", err, " url: ", t.opts.Url)
   102  			endChan <- errors.New(fmt.Sprint("Error connection to Kafka. error: ", err, " url: ", t.opts.Url))
   103  			return
   104  		}
   105  
   106  		for _, subscriber := range t.subscribers {
   107  			t.subscribeInternal(subscriber)
   108  		}
   109  		t.connectionEnable = true
   110  		endChan <- nil
   111  	}()
   112  	return endChan
   113  }
   114  
   115  func (t *KafkaTransporter) Subscribe(command, nodeID string, handler transit.TransportHandler) {
   116  	if !t.connectionEnable {
   117  		panic("KafkaTransporter disconnected")
   118  	}
   119  	subscriber := subscriber{command, nodeID, handler}
   120  	t.subscribers = append(t.subscribers, subscriber)
   121  	t.subscribeInternal(subscriber)
   122  }
   123  
   124  func (t *KafkaTransporter) subscribeInternal(subscriber subscriber) {
   125  	topic := t.topicName(subscriber.command, subscriber.nodeID)
   126  	doneChannel := make(chan bool)
   127  	autoDelete := t.getQueueOptions(subscriber.command)
   128  
   129  	if subscriber.nodeID == "" {
   130  		go t.doConsume(topic, subscriber.handler, autoDelete, doneChannel)
   131  	} else {
   132  		queueName := t.prefix + "." + subscriber.command + "." + t.nodeID
   133  		go t.doConsume(queueName, subscriber.handler, autoDelete, doneChannel)
   134  	}
   135  	t.subscriptions = append(t.subscriptions, &subscription{
   136  		doneChannel: doneChannel,
   137  	})
   138  }
   139  
   140  func (t *KafkaTransporter) getQueueOptions(command string) (autoDelete bool) {
   141  	switch command {
   142  	// Requests and responses don't expire.
   143  	case "REQ", "RES", "EVENT", "EVENTLB":
   144  		autoDelete = false
   145  
   146  	// Packet types meant for internal use
   147  	case "HEARTBEAT", "DISCOVER", "DISCONNECT", "INFO", "PING", "PONG":
   148  		autoDelete = true
   149  	}
   150  	return
   151  }
   152  
   153  func (t *KafkaTransporter) doConsume(
   154  	queueName string, handler transit.TransportHandler, autoDelete bool, doneChannel chan bool) {
   155  	reader := kafka.NewReader(kafka.ReaderConfig{
   156  		Brokers:         []string{t.opts.Addr},
   157  		Topic:           queueName,
   158  		GroupID:         t.nodeID,
   159  		Partition:       t.opts.partition,
   160  		ReadLagInterval: -1,
   161  	})
   162  	defer t.closeReader(reader)
   163  
   164  	messageChannel := make(chan []byte)
   165  	errorChannel := make(chan error)
   166  	stopRead := make(chan bool)
   167  
   168  	ctx := context.Background()
   169  
   170  	go func() {
   171  		for {
   172  			select {
   173  			case <-stopRead:
   174  				return
   175  			default:
   176  				var msg kafka.Message
   177  				var err error
   178  				if autoDelete {
   179  					msg, err = reader.ReadMessage(ctx)
   180  				} else {
   181  					msg, err = reader.FetchMessage(ctx)
   182  				}
   183  				if err != nil {
   184  					errorChannel <- err
   185  					continue
   186  				}
   187  				messageChannel <- msg.Value
   188  			}
   189  		}
   190  	}()
   191  
   192  	for {
   193  		select {
   194  		case err := <-errorChannel:
   195  			if err != nil {
   196  				t.logger.Error("failed to read messages:", err)
   197  			}
   198  		case msg := <-messageChannel:
   199  			payload := t.serializer.BytesToPayload(&msg)
   200  			handler(payload)
   201  		case <-doneChannel:
   202  			stopRead <- true
   203  			return
   204  		}
   205  	}
   206  }
   207  
   208  func (t *KafkaTransporter) closeReader(reader *kafka.Reader) {
   209  	if err := reader.Close(); err != nil {
   210  		t.logger.Error("Could not close topic reader:", err)
   211  	}
   212  }
   213  
   214  func (t *KafkaTransporter) Disconnect() chan error {
   215  	errChan := make(chan error)
   216  	go func() {
   217  		for _, subscription := range t.subscriptions {
   218  			subscription.doneChannel <- true
   219  		}
   220  
   221  		for _, publisher := range t.publishers {
   222  			t.closeWriter(publisher)
   223  		}
   224  		t.connectionEnable = false
   225  		errChan <- nil
   226  	}()
   227  
   228  	return errChan
   229  }
   230  
   231  func (t *KafkaTransporter) Publish(command, nodeID string, message moleculer.Payload) {
   232  	if !t.connectionEnable {
   233  		panic("KafkaTransporter disconnected")
   234  	}
   235  	topic := t.topicName(command, nodeID)
   236  
   237  	data := t.serializer.PayloadToBytes(message)
   238  	t.publishMessage(data, topic)
   239  }
   240  
   241  func (t *KafkaTransporter) publishMessage(message []byte, topic string) {
   242  	writer := t.publishers[topic]
   243  	if writer == nil {
   244  		writer = &kafka.Writer{
   245  			Addr:         kafka.TCP(t.opts.Addr),
   246  			Topic:        topic,
   247  			Balancer:     &kafka.LeastBytes{},
   248  			BatchTimeout: 10 * time.Millisecond,
   249  		}
   250  		t.publishers[topic] = writer
   251  	}
   252  
   253  	err := writer.WriteMessages(context.Background(),
   254  		kafka.Message{
   255  			Value: message,
   256  		},
   257  	)
   258  
   259  	if err != nil {
   260  		t.logger.Error("failed to write messages:", err)
   261  		return
   262  	}
   263  }
   264  
   265  func (t *KafkaTransporter) closeWriter(writer *kafka.Writer) {
   266  	if err := writer.Close(); err != nil {
   267  		t.logger.Fatal("Could not close topic writer:", err)
   268  	}
   269  }
   270  
   271  func (t *KafkaTransporter) SetPrefix(prefix string) {
   272  	t.prefix = prefix
   273  }
   274  
   275  func (t *KafkaTransporter) SetNodeID(nodeID string) {
   276  	t.nodeID = nodeID
   277  }
   278  
   279  func (t *KafkaTransporter) SetSerializer(serializer serializer.Serializer) {
   280  	t.serializer = serializer
   281  }
   282  
   283  func (t *KafkaTransporter) topicName(command string, nodeID string) string {
   284  	parts := []string{t.prefix, command}
   285  	if nodeID != "" {
   286  		parts = append(parts, nodeID)
   287  	}
   288  	return strings.Join(parts, ".")
   289  }