github.com/kaydxh/golang@v0.0.131/pkg/mq/kafka/kafka.go (about)

     1  /*
     2   *Copyright (c) 2023, kaydxh
     3   *
     4   *Permission is hereby granted, free of charge, to any person obtaining a copy
     5   *of this software and associated documentation files (the "Software"), to deal
     6   *in the Software without restriction, including without limitation the rights
     7   *to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     8   *copies of the Software, and to permit persons to whom the Software is
     9   *furnished to do so, subject to the following conditions:
    10   *
    11   *The above copyright notice and this permission notice shall be included in all
    12   *copies or substantial portions of the Software.
    13   *
    14   *THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15   *IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16   *FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17   *AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18   *LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19   *OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    20   *SOFTWARE.
    21   */
    22  package kafka
    23  
    24  import (
    25  	"fmt"
    26  	"net"
    27  	"strconv"
    28  	"sync"
    29  	"time"
    30  
    31  	errors_ "github.com/kaydxh/golang/go/errors"
    32  	time_ "github.com/kaydxh/golang/go/time"
    33  	mq_ "github.com/kaydxh/golang/pkg/mq"
    34  	kafka "github.com/segmentio/kafka-go"
    35  	"golang.org/x/net/context"
    36  )
    37  
    38  type MQConfig struct {
    39  	Brokers []string
    40  }
    41  
    42  type MQOptions struct {
    43  	SaslUsername string
    44  	SaslPassword string
    45  
    46  	dialTimeout         time.Duration
    47  	reconnectBackOff    time.Duration
    48  	reconnectBackOffMax time.Duration
    49  
    50  	producerOpts ProducerOptions
    51  	consumerOpts ConsumerOptions
    52  }
    53  
    54  type ProducerOptions struct {
    55  	// The default is to use a target batch size of 100 messages.
    56  	batchSize int
    57  
    58  	// Limit the maximum size of a request in bytes before being
    59  	// sent to
    60  	// a partition.
    61  	//
    62  	// The default is to use a kafka default value of
    63  	// 1048576.
    64  	batchBytes int
    65  
    66  	// Time limit on how often incomplete message batches will be
    67  	// flushed to
    68  	// kafka.
    69  	//
    70  	// The default is to flush at least every second.
    71  	batchTimeout time.Duration
    72  }
    73  
    74  type ConsumerOptions struct {
    75  	groupID   string
    76  	partition int
    77  
    78  	// MinBytes indicates to the broker the minimum batch size that the consumer
    79  	// will accept. Setting a high minimum when consuming from a low-volume topic
    80  	// may result in delayed delivery when the broker does not have enough data to
    81  	// satisfy the defined minimum.
    82  	//
    83  	// Default: 1
    84  	minBytes int
    85  
    86  	// MaxBytes indicates to the broker the maximum batch size that the consumer
    87  	// will accept. The broker will truncate a message to satisfy this maximum, so
    88  	// choose a value that is high enough for your largest message size.
    89  	//
    90  	// Default: 1MB
    91  	maxBytes int
    92  
    93  	// Maximum amount of time to wait for new data to come when fetching batches
    94  	// of messages from kafka.
    95  	//
    96  	// Default: 10s
    97  	maxWait time.Duration
    98  }
    99  
   100  type MQ struct {
   101  	*kafka.Conn
   102  	producers map[string]*Producer // key topic
   103  	consumers map[string]*Consumer // key topic
   104  
   105  	producerLock sync.Mutex
   106  	consumerLock sync.Mutex
   107  
   108  	Conf MQConfig
   109  	opts MQOptions
   110  }
   111  
   112  func NewMQ(conf MQConfig, opts ...MQOption) *MQ {
   113  	c := &MQ{
   114  		Conf:      conf,
   115  		producers: make(map[string]*Producer),
   116  		consumers: make(map[string]*Consumer),
   117  	}
   118  	c.ApplyOptions(opts...)
   119  
   120  	return c
   121  
   122  }
   123  
   124  func newController(broker string) (*kafka.Conn, error) {
   125  	conn, err := kafka.Dial("tcp", broker)
   126  	if err != nil {
   127  		return nil, err
   128  	}
   129  	defer conn.Close()
   130  
   131  	controller, err := conn.Controller()
   132  	if err != nil {
   133  		return nil, err
   134  	}
   135  	controllerConn, err := kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
   136  	if err != nil {
   137  		return nil, err
   138  	}
   139  
   140  	return controllerConn, nil
   141  }
   142  
   143  func (q *MQ) InstallMQ(
   144  	ctx context.Context,
   145  	maxWaitInterval time.Duration,
   146  	failAfter time.Duration,
   147  ) (*MQ, error) {
   148  	exp := time_.NewExponentialBackOff(
   149  		time_.WithExponentialBackOffOptionMaxInterval(maxWaitInterval),
   150  		time_.WithExponentialBackOffOptionMaxElapsedTime(failAfter),
   151  	)
   152  
   153  	var (
   154  		errs []error
   155  		conn *kafka.Conn
   156  	)
   157  	err := time_.BackOffUntilWithContext(ctx, func(ctx context.Context) (err_ error) {
   158  		for _, broker := range q.Conf.Brokers {
   159  			conn, err_ = newController(broker)
   160  			if err_ != nil {
   161  				errs = append(errs, err_)
   162  				continue
   163  			}
   164  			return nil
   165  		}
   166  		return fmt.Errorf("failed to connect kafka: %v, err: %v", q.Conf.Brokers, errors_.NewAggregate(errs))
   167  	}, exp, true, false)
   168  	if err != nil {
   169  		return nil, fmt.Errorf("get kafka connection fail after: %v", failAfter)
   170  	}
   171  
   172  	q.Conn = conn
   173  
   174  	return q, nil
   175  }
   176  
   177  func (q *MQ) AsProducers(ctx context.Context, topics ...string) (producers []*Producer, err error) {
   178  	for _, topic := range topics {
   179  
   180  		fn := func() (*Producer, error) {
   181  
   182  			dialer := &kafka.Dialer{
   183  				Timeout:   q.opts.dialTimeout,
   184  				DualStack: true,
   185  			}
   186  			producer, err := NewProducer(kafka.WriterConfig{
   187  				Brokers:  q.Conf.Brokers,
   188  				Topic:    topic,
   189  				Balancer: &kafka.Hash{},
   190  				Dialer:   dialer,
   191  			})
   192  			if err != nil {
   193  				return nil, err
   194  			}
   195  
   196  			q.producerLock.Lock()
   197  			defer q.producerLock.Unlock()
   198  			q.producers[topic] = producer
   199  			return producer, nil
   200  		}
   201  
   202  		exp := time_.NewExponentialBackOff(
   203  			time_.WithExponentialBackOffOptionMaxInterval(q.opts.reconnectBackOff),
   204  			time_.WithExponentialBackOffOptionMaxElapsedTime(q.opts.reconnectBackOffMax),
   205  		)
   206  		err := time_.BackOffUntilWithContext(ctx, func(ctx context.Context) (err_ error) {
   207  			producer, err_ := fn()
   208  			if err_ != nil {
   209  				return err_
   210  			}
   211  			producers = append(producers, producer)
   212  			return nil
   213  		}, exp, true, false)
   214  		if err != nil {
   215  			return nil, fmt.Errorf("create producer for %v fail after: %v", topic, q.opts.reconnectBackOffMax.Milliseconds())
   216  		}
   217  
   218  	}
   219  
   220  	return producers, nil
   221  }
   222  
   223  func (q *MQ) GetProducer(topic string) (*Producer, error) {
   224  	q.producerLock.Lock()
   225  	defer q.producerLock.Unlock()
   226  	producer, ok := q.producers[topic]
   227  	if ok {
   228  		return producer, nil
   229  	}
   230  
   231  	return nil, fmt.Errorf("not exist producer %v", topic)
   232  }
   233  
   234  func (q *MQ) Send(ctx context.Context, topic string, msgs ...kafka.Message) error {
   235  	p, err := q.GetProducer(topic)
   236  	if err != nil {
   237  		return err
   238  	}
   239  
   240  	return p.Send(ctx, msgs...)
   241  }
   242  
   243  func (q *MQ) AsConsumers(ctx context.Context, topics ...string) (consumers []*Consumer, err error) {
   244  	for _, topic := range topics {
   245  
   246  		checkFn := func() bool {
   247  			q.consumerLock.Lock()
   248  			defer q.consumerLock.Unlock()
   249  			_, ok := q.consumers[topic]
   250  			if ok {
   251  				return true
   252  			}
   253  			return false
   254  		}
   255  
   256  		exist := checkFn()
   257  		if exist {
   258  			continue
   259  		}
   260  
   261  		fn := func() (*Consumer, error) {
   262  
   263  			dialer := &kafka.Dialer{
   264  				Timeout:   q.opts.dialTimeout,
   265  				DualStack: true,
   266  			}
   267  			consumer, err := NewConsumer(kafka.ReaderConfig{
   268  				Brokers:  q.Conf.Brokers,
   269  				GroupID:  q.opts.consumerOpts.groupID,
   270  				Topic:    topic,
   271  				Dialer:   dialer,
   272  				MinBytes: q.opts.consumerOpts.minBytes,
   273  				MaxBytes: q.opts.consumerOpts.maxBytes,
   274  				MaxWait:  q.opts.consumerOpts.maxWait,
   275  				//	CommitInterval: time.Second, // flushes commits to Kafka every second
   276  			})
   277  			if err != nil {
   278  				return nil, err
   279  			}
   280  
   281  			q.consumerLock.Lock()
   282  			defer q.consumerLock.Unlock()
   283  			q.consumers[topic] = consumer
   284  			return consumer, nil
   285  		}
   286  
   287  		exp := time_.NewExponentialBackOff(
   288  			time_.WithExponentialBackOffOptionMaxInterval(q.opts.reconnectBackOff),
   289  			time_.WithExponentialBackOffOptionMaxElapsedTime(q.opts.reconnectBackOffMax),
   290  		)
   291  		err := time_.BackOffUntilWithContext(ctx, func(ctx context.Context) (err_ error) {
   292  			consumer, err_ := fn()
   293  			if err_ != nil {
   294  				return err_
   295  			}
   296  			consumers = append(consumers, consumer)
   297  			return nil
   298  		}, exp, true, false)
   299  		if err != nil {
   300  			return nil, fmt.Errorf("create consumer for %v fail after: %v", topic, q.opts.reconnectBackOffMax.Milliseconds())
   301  		}
   302  
   303  	}
   304  
   305  	return consumers, nil
   306  }
   307  
   308  func (q *MQ) GetConsumer(topic string) (*Consumer, error) {
   309  	q.consumerLock.Lock()
   310  	defer q.consumerLock.Unlock()
   311  	consumer, ok := q.consumers[topic]
   312  	if ok {
   313  		return consumer, nil
   314  	}
   315  
   316  	return nil, fmt.Errorf("not exist consumer %v", topic)
   317  }
   318  
   319  func (q *MQ) ReadStream(ctx context.Context, topic string) <-chan mq_.Message {
   320  	c, err := q.GetConsumer(topic)
   321  	if err != nil {
   322  		return nil
   323  	}
   324  
   325  	return c.ReadStream(ctx)
   326  }
   327  
   328  func (q *MQ) Close() {
   329  	for _, producer := range q.producers {
   330  		producer.Close()
   331  	}
   332  	for _, consumer := range q.consumers {
   333  		consumer.Close()
   334  	}
   335  	if q.Conn != nil {
   336  		q.Conn.Close()
   337  	}
   338  }