github.com/gogf/gkafka@v1.0.1-0.20190702070843-033a14468069/gkafka.go (about)

     1  // Copyright 2018 gf Author(https://github.com/gogf/gf). All Rights Reserved.
     2  //
     3  // This Source Code Form is subject to the terms of the MIT License.
     4  // If a copy of the MIT was not distributed with this file,
     5  // You can obtain one at https://github.com/gogf/gf.
     6  
     7  // Package gkafka provides producer and consumer client for kafka server.
     8  package gkafka
     9  
    10  import (
    11      "fmt"
    12      "github.com/gogf/gkafka/third/github.com/Shopify/sarama"
    13      "github.com/gogf/gkafka/third/github.com/johngcn/sarama-cluster"
    14      "os"
    15      "strings"
    16      "time"
    17  )
    18  
    19  var (
    20      // 当使用Topics方法获取所有topic后,进行过滤忽略的topic,多个以','号分隔
    21      ignoreTopics = map[string]bool {
    22          "__consumer_offsets" : true,
    23      }
    24  )
    25  
    26  // Kafka client config based on sarama.Config
    27  type Config struct {
    28      sarama.Config
    29      GroupId        string // group id for consumer.
    30      Servers        string // server list, multiple servers joined by ','.
    31      Topics         string // topic list, multiple topics joined by ','.
    32      AutoMarkOffset bool   // auto mark message read after consumer message from server
    33  }
    34  
    35  // Kafka Client(Consumer/SyncProducer/AsyncProducer)
    36  type Client struct {
    37      Config        *Config
    38      consumer      *cluster.Consumer
    39      rawConsumer   sarama.Consumer
    40      syncProducer  sarama.SyncProducer
    41      asyncProducer sarama.AsyncProducer
    42  }
    43  
    44  // Kafka Message.
    45  type Message struct {
    46      Value          []byte
    47      Key            []byte
    48      Topic          string
    49      Partition      int
    50      Offset         int
    51      client         *Client
    52      consumerMsg    *sarama.ConsumerMessage
    53  }
    54  
    55  
    56  // New a kafka client.
    57  func NewClient(config *Config) *Client {
    58      return &Client {
    59          Config : config,
    60      }
    61  }
    62  
    63  // NewConfig creates and returns a default configuration object.
    64  func NewConfig() *Config {
    65      config       := &Config{}
    66      config.Config = *sarama.NewConfig()
    67  
    68      // default config for consumer
    69      config.Consumer.Return.Errors          = true
    70      config.Consumer.Offsets.Initial        = sarama.OffsetOldest
    71      config.Consumer.Offsets.CommitInterval = 1 * time.Second
    72  
    73      // default config for producer
    74      config.Producer.Return.Errors          = true
    75      config.Producer.Return.Successes       = true
    76      config.Producer.Timeout                = 5 * time.Second
    77  
    78      config.AutoMarkOffset                  = true
    79      return config
    80  }
    81  
    82  // Close closes the client.
    83  func (client *Client) Close() {
    84      if client.rawConsumer != nil {
    85          client.rawConsumer.Close()
    86      }
    87      if client.consumer != nil {
    88          client.consumer.Close()
    89      }
    90      if client.syncProducer != nil {
    91          client.syncProducer.Close()
    92      }
    93      if client.asyncProducer != nil {
    94          client.asyncProducer.Close()
    95      }
    96  }
    97  
    98  // Topics returns all topics from kafka server.
    99  func (client *Client) Topics() ([]string, error) {
   100      if c, err := sarama.NewConsumer(strings.Split(client.Config.Servers, ","), &client.Config.Config); err != nil {
   101          return nil, err
   102      } else {
   103          if topics, err := c.Topics(); err == nil {
   104              for k, v := range topics {
   105                  if _, ok := ignoreTopics[v]; ok {
   106                      topics = append(topics[ : k], topics[k + 1 : ]...)
   107                  }
   108              }
   109              c.Close()
   110              return topics, nil
   111          } else {
   112              return nil, err
   113          }
   114      }
   115  }
   116  
   117  // initConsumer initializes the client.
   118  func (client *Client) initConsumer() error {
   119      if client.consumer == nil {
   120          config       := cluster.NewConfig()
   121          config.Config = client.Config.Config
   122          config.Group.Return.Notifications = false
   123          c, err := cluster.NewConsumer(strings.Split(client.Config.Servers, ","), client.Config.GroupId, strings.Split(client.Config.Topics, ","), config)
   124          if err != nil {
   125              return err
   126          } else {
   127              client.consumer = c
   128          }
   129      }
   130      return nil
   131  }
   132  
   133  // MarkOffset marks the start offset for consumer.
   134  func (client *Client) MarkOffset(topic string, partition int, offset int, metadata...string) error {
   135      if err := client.initConsumer(); err != nil {
   136          return err
   137      }
   138      meta := ""
   139      if len(metadata) > 0 {
   140          meta = metadata[0]
   141      }
   142      client.consumer.MarkPartitionOffset(topic, int32(partition), int64(offset), meta)
   143      return nil
   144  }
   145  
   146  
   147  // Receive receives message from kafka from specified topics in config, in BLOCKING way.
   148  // It will handle offset tracking automatically.
   149  func (client *Client) Receive() (*Message, error) {
   150      if err := client.initConsumer(); err != nil {
   151          return nil, err
   152      }
   153      errorsChan  := client.consumer.Errors()
   154      notifyChan  := client.consumer.Notifications()
   155      messageChan := client.consumer.Messages()
   156      for {
   157          select {
   158              case msg := <- messageChan:
   159                  if client.Config.AutoMarkOffset {
   160                      client.consumer.MarkOffset(msg, "")
   161                  }
   162                  return &Message {
   163                      Value       : msg.Value,
   164                      Key         : msg.Key,
   165                      Topic       : msg.Topic,
   166                      Partition   : int(msg.Partition),
   167                      Offset      : int(msg.Offset),
   168                      client      : client,
   169                      consumerMsg : msg,
   170                  }, nil
   171  
   172              case err := <-errorsChan:
   173                  if err != nil {
   174                      return nil, err
   175                  }
   176  
   177              case <-notifyChan:
   178          }
   179      }
   180  }
   181  
   182  // SyncSend sends data to kafka in synchronized way.
   183  func (client *Client) SyncSend(message *Message) error {
   184      if client.syncProducer == nil {
   185          if p, err := sarama.NewSyncProducer(strings.Split(client.Config.Servers, ","), &client.Config.Config); err != nil {
   186              return err
   187          } else {
   188              client.syncProducer = p
   189          }
   190      }
   191      for _, topic := range strings.Split(client.Config.Topics, ",") {
   192          msg := messageToProducerMessage(message)
   193          msg.Topic = topic
   194          if _, _, err := client.syncProducer.SendMessage(msg); err != nil {
   195              return err
   196          }
   197      }
   198      return nil
   199  }
   200  
   201  // AsyncSend sends data to kafka in asynchronized way(concurrent safe).
   202  func (client *Client) AsyncSend(message *Message) error {
   203      if client.asyncProducer == nil {
   204          if p, err := sarama.NewAsyncProducer(strings.Split(client.Config.Servers, ","), &client.Config.Config); err != nil {
   205              return err
   206          } else {
   207              client.asyncProducer = p
   208              go func(p sarama.AsyncProducer) {
   209                 errors  := p.Errors()
   210                 success := p.Successes()
   211                 for {
   212                     select {
   213                         case err := <-errors:
   214                             if err != nil {
   215                                 fmt.Fprintln(os.Stderr, err)
   216                             }
   217                         case <-success:
   218                     }
   219                 }
   220              }(client.asyncProducer)
   221          }
   222      }
   223  
   224      for _, topic := range strings.Split(client.Config.Topics, ",") {
   225          msg      := messageToProducerMessage(message)
   226          msg.Topic = topic
   227          client.asyncProducer.Input() <- msg
   228      }
   229      return nil
   230  }
   231  
   232  // messageToProducerMessage converts *gkafka.Message to *sarama.ProducerMessage
   233  func messageToProducerMessage(message *Message) *sarama.ProducerMessage {
   234      return &sarama.ProducerMessage {
   235          Topic     : message.Topic,
   236          Key       : sarama.ByteEncoder(message.Key),
   237          Value     : sarama.ByteEncoder(message.Value),
   238          Partition : int32(message.Partition),
   239          Offset    : int64(message.Offset),
   240      }
   241  }