github.com/teng231/kafclient@v1.2.9/consumer.go (about)

     1  package kafclient
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"errors"
     7  	"log"
     8  	"strings"
     9  	"sync"
    10  	"time"
    11  
    12  	"github.com/Shopify/sarama"
    13  )
    14  
    15  func newConsumerGroup(consumerGroup string, reconnect chan bool, brokerURLs ...string) (sarama.ConsumerGroup, error) {
    16  	config := sarama.NewConfig()
    17  	// config.ClientID = clientid
    18  	config.Consumer.Return.Errors = true
    19  	config.Consumer.Group.Session.Timeout = 20 * time.Second
    20  	config.Consumer.Group.Heartbeat.Interval = 6 * time.Second
    21  	config.Consumer.MaxProcessingTime = 500 * time.Millisecond
    22  	// config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin
    23  	// config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.BalanceStrategyRoundRobin}
    24  	version, err := sarama.ParseKafkaVersion(kafkaVersion)
    25  	if err != nil {
    26  		log.Printf("Error parsing Kafka version: %v", err)
    27  		return nil, err
    28  	}
    29  	config.Version = version
    30  	// config.Producer.Partitioner =
    31  	config.Consumer.Offsets.Initial = sarama.OffsetOldest
    32  
    33  	// Start with a client
    34  	client, err := sarama.NewClient(brokerURLs, config)
    35  	if err != nil {
    36  		return nil, err
    37  	}
    38  
    39  	// Start a new consumer group
    40  	group, err := sarama.NewConsumerGroupFromClient(consumerGroup, client)
    41  	if err != nil {
    42  		client.Close()
    43  		return nil, err
    44  	}
    45  	go func() {
    46  		for err := range group.Errors() {
    47  			log.Println("ERROR:", err)
    48  			sarama.NewConsumerGroupFromClient(consumerGroup, client)
    49  		}
    50  	}()
    51  	return group, nil
    52  }
    53  
    54  func newConsumer(brokerURLs ...string) (sarama.Consumer, error) {
    55  	config := sarama.NewConfig()
    56  	version, err := sarama.ParseKafkaVersion(kafkaVersion)
    57  	if err != nil {
    58  		log.Printf("Error parsing Kafka version: %v", err)
    59  		return nil, err
    60  	}
    61  	config.Version = version
    62  	// config.Producer.Partitioner =
    63  	// config.Consumer.Offsets.Initial = sarama.OffsetOldest
    64  	config.Consumer.Return.Errors = true
    65  	config.Consumer.Offsets.AutoCommit.Enable = true
    66  	config.Consumer.Offsets.AutoCommit.Interval = 500 * time.Millisecond
    67  	consumer, err := sarama.NewConsumer(brokerURLs, config)
    68  	if err != nil {
    69  		return nil, err
    70  	}
    71  	return consumer, nil
    72  }
    73  
    74  func (ps *Client) createTopic(topic string) error {
    75  	config := sarama.NewConfig()
    76  	config.Version = ps.kafkaVersion
    77  	admin, err := sarama.NewClusterAdmin(ps.brokerURLs, config)
    78  	if err != nil {
    79  		log.Println("[warning]: ", err, ps.brokerURLs)
    80  		return err
    81  	}
    82  	detail := &sarama.TopicDetail{
    83  		NumPartitions:     int32(NUM_PARTITION),
    84  		ReplicationFactor: int16(REPLICATION_FACTOR),
    85  	}
    86  	err = admin.CreateTopic(topic, detail, false)
    87  	if err != nil {
    88  		log.Println("[psub]:", err)
    89  	}
    90  	log.Print(detail)
    91  	return err
    92  }
    93  
    94  func (ps *Client) InitConsumerGroup(consumerGroup string, brokerURLs ...string) error {
    95  	client, err := newConsumerGroup(consumerGroup, ps.reconnect, brokerURLs...)
    96  	if err != nil {
    97  		return err
    98  	}
    99  	ps.group = client
   100  	ps.brokerURLs = brokerURLs
   101  	ps.consumerGroup = consumerGroup
   102  	// ps.reconnect = make(chan bool)
   103  	return nil
   104  }
   105  func (ps *Client) InitConsumer(brokerURLs ...string) error {
   106  	client, err := newConsumer(brokerURLs...)
   107  	if err != nil {
   108  		return err
   109  	}
   110  	ps.consumer = client
   111  	ps.brokerURLs = brokerURLs
   112  	return nil
   113  }
   114  
   115  func (ps *Client) OnScanMessages(topics []string, bufMessage chan Message) error {
   116  	done := make(chan bool)
   117  	for _, topic := range topics {
   118  		if strings.Contains(topic, "__consumer_offsets") {
   119  			continue
   120  		}
   121  		partitions, _ := ps.consumer.Partitions(topic)
   122  		log.Printf("number partitions: %v", len(partitions))
   123  
   124  		// this only consumes partition no 1, you would probably want to consume all partitions
   125  		for _, partition := range partitions {
   126  			partitionConsumer, err := ps.consumer.ConsumePartition(topic, partition, 0)
   127  			if nil != err {
   128  				log.Printf("Topic %v Partitions: %v", topic, partition)
   129  				continue
   130  			}
   131  			defer func() {
   132  				if err := partitionConsumer.Close(); err != nil {
   133  					log.Print(err)
   134  				}
   135  			}()
   136  			go func(topic string, partitionConsumer sarama.PartitionConsumer) {
   137  				for {
   138  					select {
   139  					case consumerError := <-partitionConsumer.Errors():
   140  						log.Print(consumerError.Err)
   141  						done <- true
   142  					case msg := <-partitionConsumer.Messages():
   143  						messageHandler(msg, bufMessage)
   144  						partitionConsumer.HighWaterMarkOffset()
   145  					}
   146  				}
   147  			}(topic, partitionConsumer)
   148  		}
   149  	}
   150  	<-done
   151  	return nil
   152  }
   153  
   154  func BodyParse(bin []byte, p interface{}) error {
   155  	return json.Unmarshal(bin, p)
   156  }
   157  
   158  func (ps *Client) ListTopics(brokers ...string) ([]string, error) {
   159  	config := sarama.NewConfig()
   160  	config.Consumer.Return.Errors = true
   161  	cluster, err := sarama.NewConsumer(brokers, config)
   162  	if err != nil {
   163  		return nil, err
   164  	}
   165  	defer func() {
   166  		cluster.Close()
   167  		config = nil
   168  	}()
   169  	return cluster.Topics()
   170  }
   171  func (ps *Client) OnAsyncSubscribe(topics []*Topic, numberPuller int, buf chan Message) error {
   172  	// ps.onAsyncSubscribe(topics, numberPuller, buf)
   173  	var err error
   174  	for {
   175  		err = ps.onAsyncSubscribe(topics, numberPuller, buf)
   176  		if err != nil {
   177  			log.Print(err)
   178  		}
   179  		time.Sleep(5 * time.Second)
   180  		log.Print("try reconnecting ....")
   181  		ps.InitConsumerGroup(ps.consumerGroup, ps.brokerURLs...)
   182  
   183  	}
   184  	// return err
   185  }
   186  
   187  // onAsyncSubscribe listener
   188  func (ps *Client) onAsyncSubscribe(topics []*Topic, numberPuller int, buf chan Message) error {
   189  	if len(topics) == 0 {
   190  		log.Print("not found topics")
   191  		return nil
   192  	}
   193  	txtTopics := []string{}
   194  	autoCommit := map[string]bool{}
   195  	allTopics, err := ps.ListTopics(ps.brokerURLs...)
   196  	if err != nil {
   197  		log.Print("can't not list topics existed")
   198  		return err
   199  	}
   200  	mTopic := make(map[string]bool)
   201  	for _, topic := range allTopics {
   202  		mTopic[topic] = true
   203  	}
   204  	for _, topic := range topics {
   205  		if strings.Contains(topic.Name, "__consumer_offsets") {
   206  			continue
   207  		}
   208  		if topic.IsNeedManualCreateTopic {
   209  			if _, has := mTopic[topic.Name]; has {
   210  				ps.createTopic(topic.Name)
   211  			}
   212  		}
   213  
   214  		txtTopics = append(txtTopics, topic.Name)
   215  		autoCommit[topic.Name] = topic.AutoCommit
   216  		if topic.AutoCommit {
   217  			log.Print("don't forget commit topic: ", topic.Name)
   218  		}
   219  	}
   220  	consumer := &ConsumerGroupHandle{
   221  		wg:         &sync.WaitGroup{},
   222  		bufMessage: buf,
   223  		lock:       make(chan bool),
   224  		autoCommit: autoCommit,
   225  	}
   226  	ctx, cancel := context.WithCancel(context.Background())
   227  	consumer.wg.Add(numberPuller)
   228  	for i := 0; i < numberPuller; i++ {
   229  		go func() {
   230  			defer consumer.wg.Done()
   231  			for {
   232  				// `Consume` should be called inside an infinite loop, when a
   233  				// server-side rebalance happens, the consumer session will need to be
   234  				// recreated to get the new claims
   235  				err := ps.group.Consume(ctx, txtTopics, consumer)
   236  				if err != nil {
   237  					log.Printf("[psub]: %v", err)
   238  					consumer.lock <- true
   239  					log.Print("con gi nua dau 4445555")
   240  
   241  					break
   242  				}
   243  				// // check if context was cancelled, signaling that the consumer should stop
   244  				// if ctx.Err() != nil {
   245  				// 	log.Print("con gi nua dau 4444 ", ctx.Err())
   246  
   247  				// 	return
   248  				// }
   249  				// consumer.lock = make(chan bool)
   250  				// log.Print("con gi nua dau 222")
   251  
   252  			}
   253  		}()
   254  	}
   255  
   256  	log.Print("[kafka] start all worker")
   257  	<-consumer.lock
   258  	cancel()
   259  	consumer.wg.Wait()
   260  	if err := ps.group.Close(); err != nil {
   261  		log.Printf("Error closing client: %v", err)
   262  		return err
   263  	}
   264  	log.Print("con gi nua dau")
   265  	return nil
   266  }
   267  
   268  func messageHandler(m *sarama.ConsumerMessage, bufMessage chan Message) error {
   269  	if len(m.Value) == 0 {
   270  		// Returning nil will automatically send a FIN command to NSQ to mark the message as processed.
   271  		return errors.New("message error")
   272  	}
   273  	msg := Message{
   274  		Topic:     m.Topic,
   275  		Body:      m.Value,
   276  		Offset:    m.Offset,
   277  		Partition: int(m.Partition),
   278  		Timestamp: m.Timestamp.Unix(),
   279  	}
   280  	if len(m.Headers) != 0 {
   281  		headers := map[string]string{}
   282  		for _, header := range m.Headers {
   283  			headers[string(header.Key)] = string(header.Value)
   284  		}
   285  		msg.Headers = headers
   286  	}
   287  	bufMessage <- msg
   288  	return nil
   289  }
   290  
   291  // Setup is run at the beginning of a new session, before ConsumeClaim
   292  func (consumer *ConsumerGroupHandle) Setup(ss sarama.ConsumerGroupSession) error {
   293  	// Mark the consumer as ready
   294  	log.Print("done: ", ss.MemberID())
   295  	// close(consumer.lock)
   296  	return nil
   297  }
   298  
   299  // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
   300  func (consumer *ConsumerGroupHandle) Cleanup(ss sarama.ConsumerGroupSession) error {
   301  	log.Print("sarama clearuppp: ", ss.MemberID())
   302  	return nil
   303  }
   304  
   305  // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
   306  func (consumer *ConsumerGroupHandle) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
   307  	// NOTE:
   308  	// Do not move the code below to a goroutine.
   309  	// The `ConsumeClaim` itself is called within a goroutine, see:
   310  	// https://github.com/Shopify/sarama/blob/master/consumer_group.go#L27-L29
   311  	for {
   312  		select {
   313  		case m := <-claim.Messages():
   314  			if len(m.Value) == 0 {
   315  				// Returning nil will automatically send a FIN command to NSQ to mark the message as processed.
   316  				return errors.New("message error")
   317  			}
   318  			msg := Message{
   319  				Topic:     m.Topic,
   320  				Body:      m.Value,
   321  				Offset:    m.Offset,
   322  				Partition: int(m.Partition),
   323  				Timestamp: m.Timestamp.Unix(),
   324  			}
   325  			if len(m.Headers) != 0 {
   326  				headers := map[string]string{}
   327  				for _, header := range m.Headers {
   328  					headers[string(header.Key)] = string(header.Value)
   329  				}
   330  				msg.Headers = headers
   331  			}
   332  			if consumer.autoCommit[m.Topic] {
   333  				session.MarkOffset(m.Topic, m.Partition, m.Offset, "")
   334  				session.MarkMessage(m, "")
   335  				consumer.bufMessage <- msg
   336  				msg.Commit = func() {}
   337  				continue
   338  			}
   339  			msg.Commit = func() {
   340  				session.MarkOffset(m.Topic, m.Partition, m.Offset, "")
   341  				session.MarkMessage(m, "")
   342  			}
   343  			consumer.bufMessage <- msg
   344  
   345  			// Should return when `session.Context()` is done.
   346  		// If not, will raise `ErrRebalanceInProgress` or `read tcp <ip>:<port>: i/o timeout` when kafka rebalance. see:
   347  		// https://github.com/Shopify/sarama/issues/1192
   348  		case <-session.Context().Done():
   349  			return nil
   350  		}
   351  	}
   352  }
   353  
   354  func (ps *Client) Close() error {
   355  	if ps.consumer != nil {
   356  		if err := ps.consumer.Close(); err != nil {
   357  			return err
   358  		}
   359  	}
   360  	// var err error
   361  	// ps.mProducer.Range(func(k interface{}, sp interface{}) bool {
   362  	// 	if sp == nil {
   363  	// 		return true
   364  	// 	}
   365  	// 	err = sp.(sarama.SyncProducer).Close()
   366  	// 	if err != nil {
   367  	// 		log.Print("close error: ", err.Error())
   368  	// 	}
   369  	// 	return true
   370  	// })
   371  	return nil
   372  }