github.com/sereiner/library@v0.0.0-20200518095232-1fa3e640cc5f/mq/kafka/kafka.producer.go (about)

     1  package kafka
     2  
     3  import (
     4  	"errors"
     5  	"strings"
     6  	"sync"
     7  	"time"
     8  
     9  	"fmt"
    10  
    11  	"github.com/Shopify/sarama"
    12  	"github.com/sereiner/library/concurrent/cmap"
    13  	logger "github.com/sereiner/library/log"
    14  	"github.com/sereiner/library/mq"
    15  )
    16  
    17  //KafkaProducer Producer
    18  type KafkaProducer struct {
    19  	address    string
    20  	messages   chan *mq.ProcuderMessage
    21  	backupMsg  chan *mq.ProcuderMessage
    22  	queues     cmap.ConcurrentMap
    23  	connecting bool
    24  	closeCh    chan struct{}
    25  	done       bool
    26  	once       sync.Once
    27  	lk         sync.Mutex
    28  	header     []string
    29  	*mq.OptionConf
    30  }
    31  type kafkaProducer struct {
    32  	producer sarama.SyncProducer
    33  	msgQueue chan *sarama.ProducerMessage
    34  }
    35  
    36  //NewKafkaProducer 创建新的producer
    37  func NewKafkaProducer(address string, opts ...mq.Option) (producer *KafkaProducer, err error) {
    38  	producer = &KafkaProducer{address: address}
    39  	producer.queues = cmap.New(2)
    40  	producer.OptionConf = &mq.OptionConf{}
    41  	producer.messages = make(chan *mq.ProcuderMessage, 10000)
    42  	producer.backupMsg = make(chan *mq.ProcuderMessage, 100)
    43  	producer.closeCh = make(chan struct{})
    44  	for _, opt := range opts {
    45  		opt(producer.OptionConf)
    46  	}
    47  	if producer.Logger == nil {
    48  		producer.Logger = logger.GetSession("mq.producer", logger.CreateSession())
    49  	}
    50  	return
    51  }
    52  
    53  //Connect  循环连接服务器
    54  func (producer *KafkaProducer) Connect() error {
    55  	go producer.sendLoop()
    56  	return nil
    57  }
    58  
    59  //sendLoop 循环发送消息
    60  func (producer *KafkaProducer) sendLoop() {
    61  	if producer.done {
    62  		producer.disconnect()
    63  		return
    64  	}
    65  	if producer.Retry {
    66  	Loop1:
    67  		for {
    68  			select {
    69  			case msg, ok := <-producer.backupMsg:
    70  				if !ok {
    71  					break Loop1
    72  				}
    73  				pd, ok := producer.queues.Get(msg.Queue)
    74  				if !ok {
    75  					select {
    76  					case producer.backupMsg <- msg:
    77  					default:
    78  						producer.Logger.Errorf("重试发送失败,备份队列已满无法放入队列(%s):%s", msg.Queue, msg.Data)
    79  					}
    80  					continue
    81  				}
    82  				producerConn := pd.(*kafkaProducer)
    83  				_, _, err := producerConn.producer.SendMessage(&sarama.ProducerMessage{Topic: msg.Queue, Partition: 0, Value: sarama.StringEncoder(msg.Data)})
    84  				if err != nil {
    85  					select {
    86  					case producer.backupMsg <- msg:
    87  					default:
    88  						producer.Logger.Errorf("发送失败,备份队列已满无法放入队列(%s):%s", msg.Queue, msg.Data)
    89  					}
    90  				}
    91  			case msg, ok := <-producer.messages:
    92  				if !ok {
    93  					break Loop1
    94  				}
    95  				pd, ok := producer.queues.Get(msg.Queue)
    96  				if !ok {
    97  					select {
    98  					case producer.backupMsg <- msg:
    99  					default:
   100  						producer.Logger.Errorf("消息无法放入备份队列(%s):%s", msg.Queue, msg.Data)
   101  					}
   102  					producer.Logger.Errorf("消息无法从缓存中获取producer:%s,%s", msg.Queue, msg.Data)
   103  					continue
   104  				}
   105  				producerConn := pd.(*kafkaProducer)
   106  				_, _, err := producerConn.producer.SendMessage(&sarama.ProducerMessage{Topic: msg.Queue, Partition: 0, Value: sarama.StringEncoder(msg.Data)})
   107  				if err != nil {
   108  					select {
   109  					case producer.backupMsg <- msg:
   110  					default:
   111  						producer.Logger.Errorf("消息无法放入备份队列(%s):%s", msg.Queue, msg.Data)
   112  					}
   113  				}
   114  			}
   115  		}
   116  	} else {
   117  	Loop2:
   118  		for {
   119  			select {
   120  			case msg, ok := <-producer.messages:
   121  				fmt.Println("send.msg:", msg)
   122  				if !ok {
   123  					break Loop2
   124  				}
   125  				pd, ok := producer.queues.Get(msg.Queue)
   126  				if !ok {
   127  					select {
   128  					case producer.backupMsg <- msg:
   129  					default:
   130  						producer.Logger.Errorf("消息无法放入备份队列(%s):%s", msg.Queue, msg.Data)
   131  					}
   132  					producer.Logger.Errorf("消息无法从缓存中获取producer:%s,%s", msg.Queue, msg.Data)
   133  					continue
   134  				}
   135  				producerConn := pd.(*kafkaProducer)
   136  				//, Timestamp: msg.Timeout
   137  				_, _, err := producerConn.producer.SendMessage(&sarama.ProducerMessage{Topic: msg.Queue, Partition: 0, Value: sarama.StringEncoder(msg.Data)})
   138  				if err != nil {
   139  					select {
   140  					case producer.backupMsg <- msg:
   141  					default:
   142  						producer.Logger.Errorf("消息无法放入备份队列(%s):%s", msg.Queue, msg.Data)
   143  					}
   144  				}
   145  			}
   146  		}
   147  	}
   148  	if producer.done { //关闭连接
   149  		producer.disconnect()
   150  		return
   151  	}
   152  }
   153  func (producer *KafkaProducer) disconnect() {
   154  
   155  }
   156  
   157  //GetBackupMessage 获取备份数据
   158  func (producer *KafkaProducer) GetBackupMessage() chan *mq.ProcuderMessage {
   159  	return producer.backupMsg
   160  }
   161  
   162  //Send 发送消息
   163  func (producer *KafkaProducer) Send(queue string, msg string, timeout time.Duration) (err error) {
   164  	if producer.done {
   165  		return errors.New("mq producer 已关闭")
   166  	}
   167  	producer.queues.SetIfAbsentCb(queue, func(i ...interface{}) (interface{}, error) {
   168  		var err error
   169  		c := &kafkaProducer{}
   170  		config := sarama.NewConfig()
   171  		config.Producer.RequiredAcks = sarama.WaitForAll
   172  		config.Producer.Partitioner = sarama.NewManualPartitioner
   173  
   174  		c.producer, err = sarama.NewSyncProducer(strings.Split(producer.address, ","), config)
   175  		c.msgQueue = make(chan *sarama.ProducerMessage, 10)
   176  
   177  		//&sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)}
   178  		return c, err
   179  	})
   180  
   181  	pm := &mq.ProcuderMessage{Queue: queue, Data: msg, Timeout: timeout}
   182  	select {
   183  	case producer.messages <- pm:
   184  		return nil
   185  	default:
   186  		return errors.New("producer无法连接到MQ服务器,消息队列已满无法发送")
   187  	}
   188  }
   189  
   190  //Close 关闭当前连接
   191  func (producer *KafkaProducer) Close() {
   192  	producer.done = true
   193  	producer.once.Do(func() {
   194  		close(producer.closeCh)
   195  		close(producer.messages)
   196  		close(producer.backupMsg)
   197  	})
   198  
   199  }
   200  
   201  type kafkaProducerResolver struct {
   202  }
   203  
   204  func (s *kafkaProducerResolver) Resolve(address string, opts ...mq.Option) (mq.MQProducer, error) {
   205  	return NewKafkaProducer(address, opts...)
   206  }
   207  func init() {
   208  	mq.RegisterProducer("kafka", &kafkaProducerResolver{})
   209  }