github.com/godaddy-x/freego@v1.0.156/zlog/zap_kafka_test.go (about)

     1  package zlog_test
     2  
     3  import (
     4  	"fmt"
     5  	"github.com/Shopify/sarama"
     6  	"go.uber.org/zap"
     7  	"go.uber.org/zap/zapcore"
     8  	"gopkg.in/natefinch/lumberjack.v2"
     9  	"os"
    10  )
    11  
    12  var Logger *zap.Logger
    13  
    14  type LogKafka struct {
    15  	Producer sarama.SyncProducer
    16  	Topic    string
    17  }
    18  
    19  func (lk *LogKafka) Write(p []byte) (n int, err error) {
    20  	msg := &sarama.ProducerMessage{}
    21  	msg.Topic = lk.Topic
    22  	msg.Value = sarama.ByteEncoder(p)
    23  	_, _, err = lk.Producer.SendMessage(msg)
    24  	if err != nil {
    25  		return
    26  	}
    27  	return
    28  
    29  }
    30  func InitLogger(mode string, fileName string, maxSize, maxBackups, maxAge int, compress bool, enableKafka bool, kafkaAddress []string) {
    31  	// 打印错误级别的日志
    32  	highPriority := zap.LevelEnablerFunc(func(lvl zapcore.Level) bool {
    33  		return lvl >= zapcore.ErrorLevel
    34  	})
    35  	// 打印所有级别的日志
    36  	lowPriority := zap.LevelEnablerFunc(func(lvl zapcore.Level) bool {
    37  		return lvl >= zapcore.DebugLevel
    38  	})
    39  	var allCore []zapcore.Core
    40  
    41  	hook := lumberjack.Logger{
    42  		Filename:   fileName,
    43  		MaxSize:    maxSize, // megabytes
    44  		MaxBackups: maxBackups,
    45  		MaxAge:     maxAge,   //days
    46  		Compress:   compress, // disabled by default
    47  	}
    48  
    49  	fileWriter := zapcore.AddSync(&hook)
    50  
    51  	// High-priority output should also go to standard error, and low-priority
    52  	// output should also go to standard out.
    53  	consoleDebugging := zapcore.Lock(os.Stdout)
    54  
    55  	// for human operators.
    56  	consoleEncoder := zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig())
    57  
    58  	// Join the outputs, encoders, and level-handling functions into
    59  	// zapcore.Cores, then tee the four cores together.
    60  	// kafka
    61  	if len(kafkaAddress) > 0 && enableKafka {
    62  		var (
    63  			kl  LogKafka
    64  			err error
    65  		)
    66  		kl.Topic = "go_framework_log"
    67  		// 设置日志输入到Kafka的配置
    68  		config := sarama.NewConfig()
    69  		//等待服务器所有副本都保存成功后的响应
    70  		config.Producer.RequiredAcks = sarama.WaitForAll
    71  		//随机的分区类型
    72  		config.Producer.Partitioner = sarama.NewRandomPartitioner
    73  		//是否等待成功和失败后的响应,只有上面的RequireAcks设置不是NoReponse这里才有用.
    74  		config.Producer.Return.Successes = true
    75  		config.Producer.Return.Errors = true
    76  
    77  		kl.Producer, err = sarama.NewSyncProducer(kafkaAddress, config)
    78  		if err != nil {
    79  			fmt.Printf("connect kafka failed: %+v\n", err)
    80  			os.Exit(-1)
    81  		}
    82  		topicErrors := zapcore.AddSync(&kl)
    83  		// 打印在kafka
    84  		kafkaEncoder := zapcore.NewJSONEncoder(zap.NewDevelopmentEncoderConfig())
    85  		var kafkaCore zapcore.Core
    86  		if mode == "debug" {
    87  			kafkaCore = zapcore.NewCore(kafkaEncoder, topicErrors, lowPriority)
    88  
    89  		} else {
    90  			kafkaCore = zapcore.NewCore(kafkaEncoder, topicErrors, highPriority)
    91  
    92  		}
    93  		allCore = append(allCore, kafkaCore)
    94  	}
    95  	if mode == "debug" {
    96  		allCore = append(allCore, zapcore.NewCore(consoleEncoder, consoleDebugging, lowPriority))
    97  	}
    98  	allCore = append(allCore, zapcore.NewCore(consoleEncoder, fileWriter, highPriority))
    99  
   100  	core := zapcore.NewTee(allCore...)
   101  
   102  	// From a zapcore.Core, it's easy to construct a Logger.
   103  	Logger = zap.New(core).WithOptions(zap.AddCaller())
   104  }