github.com/xwi88/log4go@v0.0.6/kafka_writer.go (about)

     1  package log4go
     2  
     3  import (
     4  	"encoding/json"
     5  	"log"
     6  	"time"
     7  
     8  	"github.com/Shopify/sarama"
     9  )
    10  
    11  // KafKaMSGFields kafka msg fields
    12  type KafKaMSGFields struct {
    13  	ESIndex   string `json:"es_index" mapstructure:"es_index"` // optional, init field, can set if want send data to es
    14  	Level     string `json:"level"`                            // dynamic, set by logger, mark the record level
    15  	File      string `json:"file"`                             // source code file:line_number
    16  	Message   string `json:"message"`                          // required, dynamic
    17  	ServerIP  string `json:"server_ip"`                        // required, init field, set by app
    18  	Timestamp string `json:"timestamp"`                        // required, dynamic, set by logger
    19  	Now       int64  `json:"now"`                              // choice
    20  
    21  	ExtraFields map[string]interface{} `json:"extra_fields" mapstructure:"extra_fields"` // extra fields will be added
    22  }
    23  
    24  // KafKaWriterOptions kafka writer options
    25  type KafKaWriterOptions struct {
    26  	Enable                  bool `json:"enable" mapstructure:"enable"`
    27  	Debug                   bool `json:"debug" mapstructure:"debug"`                     // if true, will output the send msg
    28  	SpecifyVersion          bool `json:"specify_version" mapstructure:"specify_version"` // if use the input version, default false
    29  	ProducerReturnSuccesses bool `json:"producer_return_successes" mapstructure:"producer_return_successes"`
    30  	BufferSize              int  `json:"buffer_size" mapstructure:"buffer_size"`
    31  
    32  	Level      string `json:"level" mapstructure:"level"`
    33  	VersionStr string `json:"version" mapstructure:"version"` // used to specify the kafka version, ex: 0.10.0.1 or 1.1.1
    34  
    35  	Key string `json:"key" mapstructure:"key"` // kafka producer key, choice field
    36  
    37  	ProducerTopic   string        `json:"producer_topic" mapstructure:"producer_topic"`
    38  	ProducerTimeout time.Duration `json:"producer_timeout" mapstructure:"producer_timeout"`
    39  	Brokers         []string      `json:"brokers" mapstructure:"brokers"`
    40  
    41  	MSG KafKaMSGFields `json:"msg"`
    42  }
    43  
    44  // KafKaWriter kafka writer
    45  type KafKaWriter struct {
    46  	level    int
    47  	producer sarama.SyncProducer
    48  	messages chan *sarama.ProducerMessage
    49  	options  KafKaWriterOptions
    50  
    51  	run  bool // avoid the block with no running kafka writer
    52  	quit chan struct{}
    53  	stop chan struct{}
    54  }
    55  
    56  // NewKafKaWriter new kafka writer
    57  func NewKafKaWriter(options KafKaWriterOptions) *KafKaWriter {
    58  	defaultLevel := DEBUG
    59  	if len(options.Level) > 0 {
    60  		defaultLevel = getLevelDefault(options.Level, defaultLevel, "")
    61  	}
    62  
    63  	return &KafKaWriter{
    64  		options: options,
    65  		quit:    make(chan struct{}),
    66  		stop:    make(chan struct{}),
    67  		level:   defaultLevel,
    68  	}
    69  }
    70  
    71  // Init service for Record
    72  func (k *KafKaWriter) Init() error {
    73  	return k.Start()
    74  }
    75  
    76  // Write service for Record
    77  func (k *KafKaWriter) Write(r *Record) error {
    78  	if r.level > k.level {
    79  		return nil
    80  	}
    81  
    82  	logMsg := r.msg
    83  	if logMsg == "" {
    84  		return nil
    85  	}
    86  	data := k.options.MSG
    87  	// timestamp, level
    88  	data.Level = LevelFlags[r.level]
    89  	now := time.Now()
    90  	data.Now = now.Unix()
    91  	data.Timestamp = now.Format(timestampLayout)
    92  	data.Message = logMsg
    93  	data.File = r.file
    94  
    95  	byteData, err := json.Marshal(data)
    96  	if err != nil {
    97  		return err
    98  	}
    99  
   100  	var structData map[string]interface{}
   101  	err = json.Unmarshal(byteData, &structData)
   102  	if err != nil {
   103  		log.Printf("[log4go] kafka writer err: %v", err.Error())
   104  	}
   105  	delete(structData, "extraFields")
   106  
   107  	// not exist new fields will be added
   108  	for k, v := range data.ExtraFields {
   109  		if _, ok := structData[k]; !ok {
   110  			structData[k] = v
   111  		}
   112  	}
   113  	structDataByte, err := json.Marshal(structData)
   114  	if err != nil {
   115  		return err
   116  	}
   117  
   118  	jsonData := string(structDataByte)
   119  
   120  	key := ""
   121  	if k.options.Key != "" {
   122  		key = k.options.Key
   123  	}
   124  
   125  	msg := &sarama.ProducerMessage{
   126  		Topic: k.options.ProducerTopic,
   127  		// autofill or use specify timestamp, you must set Version >= sarama.V0_10_0_1
   128  		// Timestamp: time.Now(),
   129  		Key:   sarama.ByteEncoder(key),
   130  		Value: sarama.ByteEncoder(jsonData),
   131  	}
   132  
   133  	if k.options.Debug {
   134  		log.Printf("[log4go] msg [topic: %v, timestamp: %v, brokers: %v]\nkey:   %v\nvalue: %v\n", msg.Topic,
   135  			msg.Timestamp, k.options.Brokers, key, jsonData)
   136  	}
   137  	go k.asyncWriteMessages(msg)
   138  
   139  	return nil
   140  }
   141  
   142  func (k *KafKaWriter) asyncWriteMessages(msg *sarama.ProducerMessage) {
   143  	if msg != nil {
   144  		k.messages <- msg
   145  	}
   146  }
   147  
   148  // send kafka message to kafka
   149  func (k *KafKaWriter) daemonProducer() {
   150  	k.run = true
   151  
   152  next:
   153  	for {
   154  		select {
   155  		case mes, ok := <-k.messages:
   156  			if ok {
   157  				partition, offset, err := k.producer.SendMessage(mes)
   158  
   159  				if err != nil {
   160  					log.Printf("[log4go] SendMessage(topic=%s, partition=%v, offset=%v, key=%s, value=%s,timstamp=%v) err=%s\n\n", mes.Topic,
   161  						partition, offset, mes.Key, mes.Value, mes.Timestamp, err.Error())
   162  					continue
   163  				} else {
   164  					if k.options.Debug {
   165  						log.Printf("[log4go] SendMessage(topic=%s, partition=%v, offset=%v, key=%s, value=%s,timstamp=%v)\n\n", mes.Topic,
   166  							partition, offset, mes.Key, mes.Value, mes.Timestamp)
   167  					}
   168  				}
   169  			}
   170  		case <-k.stop:
   171  			break next
   172  		}
   173  	}
   174  	k.quit <- struct{}{}
   175  }
   176  
   177  // Start start the kafka writer
   178  func (k *KafKaWriter) Start() (err error) {
   179  	log.Printf("[log4go] kafka writer starting")
   180  	cfg := sarama.NewConfig()
   181  	cfg.Producer.Return.Successes = k.options.ProducerReturnSuccesses
   182  	cfg.Producer.Timeout = k.options.ProducerTimeout
   183  
   184  	// if want set timestamp for data should set version
   185  	versionStr := k.options.VersionStr
   186  	// now 2.5.0, ref https://kafka.apache.org/downloads#2.5.0
   187  	// if you use low version kafka, you can specify the versionStr=0.10.0.1, (V0_10_0_1) and
   188  	// k.options.SpecifyVersion=true
   189  	kafkaVer := sarama.V2_5_0_0
   190  
   191  	if k.options.SpecifyVersion {
   192  		if versionStr != "" {
   193  			if kafkaVersion, err := sarama.ParseKafkaVersion(versionStr); err == nil {
   194  				// should be careful set the version, maybe occur EOF error
   195  				kafkaVer = kafkaVersion
   196  			}
   197  		}
   198  	}
   199  	// if not specify the version, use the sarama.V2_5_0_0 to guarante the timestamp can be control
   200  	cfg.Version = kafkaVer
   201  
   202  	// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
   203  	// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
   204  	// modulus the number of partitions. This ensures that messages with the same key always end up on the
   205  	// same partition.
   206  	// cfg.Producer.Partitioner = sarama.NewHashPartitioner
   207  	// cfg.Producer.Partitioner = sarama.NewRandomPartitioner
   208  	cfg.Producer.Partitioner = sarama.NewRoundRobinPartitioner
   209  	// cfg.Producer.Partitioner = sarama.NewReferenceHashPartitioner
   210  
   211  	k.producer, err = sarama.NewSyncProducer(k.options.Brokers, cfg)
   212  	if err != nil {
   213  		log.Printf("[log4go] sarama.NewSyncProducer err, message=%s", err.Error())
   214  		return err
   215  	}
   216  	size := k.options.BufferSize
   217  	if size <= 1 {
   218  		size = 1024
   219  	}
   220  	k.messages = make(chan *sarama.ProducerMessage, size)
   221  
   222  	go k.daemonProducer()
   223  	log.Printf("[log4go] kafka writer started")
   224  	return err
   225  }
   226  
   227  // Stop stop the kafka writer
   228  func (k *KafKaWriter) Stop() {
   229  	if k.run {
   230  		close(k.messages)
   231  		<-k.stop
   232  		err := k.producer.Close()
   233  		if err != nil {
   234  			log.Printf("[log4go] kafkaWriter stop error: %v", err.Error())
   235  		}
   236  	}
   237  }