github.com/qxnw/lib4go@v0.0.0-20180426074627-c80c7e84b925/mq/kafka/kafka.producer.go (about)

     1  package kafka
     2  
     3  /*
     4  import (
     5  	"errors"
     6  	"strings"
     7  	"sync"
     8  	"time"
     9  
    10  	"fmt"
    11  
    12  	"github.com/Shopify/sarama"
    13  	"github.com/qxnw/lib4go/concurrent/cmap"
    14  	"github.com/qxnw/lib4go/logger"
    15  	"github.com/qxnw/lib4go/mq"
    16  )
    17  
    18  //KafkaProducer Producer
    19  type KafkaProducer struct {
    20  	address    string
    21  	messages   chan *mq.ProcuderMessage
    22  	backupMsg  chan *mq.ProcuderMessage
    23  	queues     cmap.ConcurrentMap
    24  	connecting bool
    25  	closeCh    chan struct{}
    26  	done       bool
    27  	once       sync.Once
    28  	lk         sync.Mutex
    29  	header     []string
    30  	*mq.OptionConf
    31  }
    32  type kafkaProducer struct {
    33  	producer sarama.SyncProducer
    34  	msgQueue chan *sarama.ProducerMessage
    35  }
    36  
    37  //NewKafkaProducer 创建新的producer
    38  func NewKafkaProducer(address string, opts ...mq.Option) (producer *KafkaProducer, err error) {
    39  	producer = &KafkaProducer{address: address}
    40  	producer.queues = cmap.New(2)
    41  	producer.OptionConf = &mq.OptionConf{}
    42  	producer.messages = make(chan *mq.ProcuderMessage, 10000)
    43  	producer.backupMsg = make(chan *mq.ProcuderMessage, 100)
    44  	producer.closeCh = make(chan struct{})
    45  	for _, opt := range opts {
    46  		opt(producer.OptionConf)
    47  	}
    48  	if producer.Logger == nil {
    49  		producer.Logger = logger.GetSession("mq.producer", logger.CreateSession())
    50  	}
    51  	return
    52  }
    53  
    54  //Connect  循环连接服务器
    55  func (producer *KafkaProducer) Connect() error {
    56  	go producer.sendLoop()
    57  	return nil
    58  }
    59  
    60  //sendLoop 循环发送消息
    61  func (producer *KafkaProducer) sendLoop() {
    62  	if producer.done {
    63  		producer.disconnect()
    64  		return
    65  	}
    66  	if producer.Retry {
    67  	Loop1:
    68  		for {
    69  			select {
    70  			case msg, ok := <-producer.backupMsg:
    71  				if !ok {
    72  					break Loop1
    73  				}
    74  				pd, ok := producer.queues.Get(msg.Queue)
    75  				if !ok {
    76  					select {
    77  					case producer.backupMsg <- msg:
    78  					default:
    79  						producer.Logger.Errorf("重试发送失败,备份队列已满无法放入队列(%s):%s", msg.Queue, msg.Data)
    80  					}
    81  					continue
    82  				}
    83  				producerConn := pd.(*kafkaProducer)
    84  				_, _, err := producerConn.producer.SendMessage(&sarama.ProducerMessage{Topic: msg.Queue, Partition: 0, Value: sarama.StringEncoder(msg.Data)})
    85  				if err != nil {
    86  					select {
    87  					case producer.backupMsg <- msg:
    88  					default:
    89  						producer.Logger.Errorf("发送失败,备份队列已满无法放入队列(%s):%s", msg.Queue, msg.Data)
    90  					}
    91  				}
    92  			case msg, ok := <-producer.messages:
    93  				if !ok {
    94  					break Loop1
    95  				}
    96  				pd, ok := producer.queues.Get(msg.Queue)
    97  				if !ok {
    98  					select {
    99  					case producer.backupMsg <- msg:
   100  					default:
   101  						producer.Logger.Errorf("消息无法放入备份队列(%s):%s", msg.Queue, msg.Data)
   102  					}
   103  					producer.Logger.Errorf("消息无法从缓存中获取producer:%s,%s", msg.Queue, msg.Data)
   104  					continue
   105  				}
   106  				producerConn := pd.(*kafkaProducer)
   107  				_, _, err := producerConn.producer.SendMessage(&sarama.ProducerMessage{Topic: msg.Queue, Partition: 0, Value: sarama.StringEncoder(msg.Data)})
   108  				if err != nil {
   109  					select {
   110  					case producer.backupMsg <- msg:
   111  					default:
   112  						producer.Logger.Errorf("消息无法放入备份队列(%s):%s", msg.Queue, msg.Data)
   113  					}
   114  				}
   115  			}
   116  		}
   117  	} else {
   118  	Loop2:
   119  		for {
   120  			select {
   121  			case msg, ok := <-producer.messages:
   122  				fmt.Println("send.msg:", msg)
   123  				if !ok {
   124  					break Loop2
   125  				}
   126  				pd, ok := producer.queues.Get(msg.Queue)
   127  				if !ok {
   128  					select {
   129  					case producer.backupMsg <- msg:
   130  					default:
   131  						producer.Logger.Errorf("消息无法放入备份队列(%s):%s", msg.Queue, msg.Data)
   132  					}
   133  					producer.Logger.Errorf("消息无法从缓存中获取producer:%s,%s", msg.Queue, msg.Data)
   134  					continue
   135  				}
   136  				producerConn := pd.(*kafkaProducer)
   137  				//, Timestamp: msg.Timeout
   138  				_, _, err := producerConn.producer.SendMessage(&sarama.ProducerMessage{Topic: msg.Queue, Partition: 0, Value: sarama.StringEncoder(msg.Data)})
   139  				if err != nil {
   140  					select {
   141  					case producer.backupMsg <- msg:
   142  					default:
   143  						producer.Logger.Errorf("消息无法放入备份队列(%s):%s", msg.Queue, msg.Data)
   144  					}
   145  				}
   146  			}
   147  		}
   148  	}
   149  	if producer.done { //关闭连接
   150  		producer.disconnect()
   151  		return
   152  	}
   153  }
   154  func (producer *KafkaProducer) disconnect() {
   155  
   156  }
   157  
   158  //GetBackupMessage 获取备份数据
   159  func (producer *KafkaProducer) GetBackupMessage() chan *mq.ProcuderMessage {
   160  	return producer.backupMsg
   161  }
   162  
   163  //Send 发送消息
   164  func (producer *KafkaProducer) Send(queue string, msg string, timeout time.Duration) (err error) {
   165  	if producer.done {
   166  		return errors.New("mq producer 已关闭")
   167  	}
   168  	producer.queues.SetIfAbsentCb(queue, func(i ...interface{}) (interface{}, error) {
   169  		var err error
   170  		c := &kafkaProducer{}
   171  		config := sarama.NewConfig()
   172  		config.Producer.RequiredAcks = sarama.WaitForAll
   173  		config.Producer.Partitioner = sarama.NewManualPartitioner
   174  
   175  		c.producer, err = sarama.NewSyncProducer(strings.Split(producer.address, ","), config)
   176  		c.msgQueue = make(chan *sarama.ProducerMessage, 10)
   177  
   178  		//&sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)}
   179  		return c, err
   180  	})
   181  
   182  	pm := &mq.ProcuderMessage{Queue: queue, Data: msg, Timeout: timeout}
   183  	select {
   184  	case producer.messages <- pm:
   185  		return nil
   186  	default:
   187  		return errors.New("producer无法连接到MQ服务器,消息队列已满无法发送")
   188  	}
   189  }
   190  
   191  //Close 关闭当前连接
   192  func (producer *KafkaProducer) Close() {
   193  	producer.done = true
   194  	producer.once.Do(func() {
   195  		close(producer.closeCh)
   196  		close(producer.messages)
   197  		close(producer.backupMsg)
   198  	})
   199  
   200  }
   201  
   202  type kafkaProducerResolver struct {
   203  }
   204  
   205  func (s *kafkaProducerResolver) Resolve(address string, opts ...mq.Option) (mq.MQProducer, error) {
   206  	return NewKafkaProducer(address, opts...)
   207  }
   208  func init() {
   209  	mq.RegisterProducer("kafka", &kafkaProducerResolver{})
   210  }
   211  */