github.com/erda-project/erda-infra@v1.0.9/providers/kafkav2/producer.go (about) 1 // Copyright (c) 2021 Terminus, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package kafkav2 16 17 import ( 18 "context" 19 "encoding/json" 20 "strings" 21 "time" 22 23 "github.com/recallsong/go-utils/reflectx" 24 "github.com/segmentio/kafka-go" 25 26 "github.com/erda-project/erda-infra/base/logs" 27 writer "github.com/erda-project/erda-infra/pkg/parallel-writer" 28 ) 29 30 // Message . 31 type Message struct { 32 Topic string 33 Data []byte 34 Key []byte 35 } 36 37 // ProducerConfig . 38 type ProducerConfig struct { 39 Topic string `file:"topic"` 40 Parallelism uint64 `file:"parallelism" default:"3" env:"PROVIDER_KAFKA_V2_PRODUCER_PARALLELISM"` 41 Async bool `file:"async" default:"true" env:"PROVIDER_KAFKA_V2_PRODUCER_ASYNC"` 42 Timeout time.Duration `file:"timeout" default:"30s" env:"PROVIDER_KAFKA_V2_PRODUCER_TIMEOUT"` 43 Batch struct { 44 Size int `file:"size" default:"100" env:"PROVIDER_KAFKA_V2_PRODUCER_BATCH_SIZE"` 45 SizeBytes int64 `file:"size_bytes" default:"1048576" env:"PROVIDER_KAFKA_V2_PRODUCER_BATCH_SIZE_BYTES"` 46 Timeout time.Duration `file:"timeout" default:"800ms" env:"PROVIDER_KAFKA_V2_PRODUCER_BATCH_TIMEOUT"` 47 } `file:"batch"` 48 } 49 50 // ProducerOption . 51 type ProducerOption interface { 52 errHandler() func(error) error 53 } 54 55 type producerOption struct{ _eh func(error) error } 56 57 func (p *producerOption) errHandler() func(error) error { return p._eh } 58 59 // WithAsyncWriteErrorHandler . 60 func WithAsyncWriteErrorHandler(eh func(error) error) ProducerOption { 61 return &producerOption{_eh: eh} 62 } 63 64 func newProducer(servers string, cfg ProducerConfig, log logs.Logger) (*producer, error) { 65 prod := &producer{ 66 logger: log, 67 } 68 pw := &kafka.Writer{ 69 Addr: kafka.TCP(strings.Split(servers, ",")...), 70 Balancer: kafka.CRC32Balancer{}, 71 Async: cfg.Async, 72 AllowAutoTopicCreation: true, 73 WriteTimeout: cfg.Timeout, 74 BatchSize: cfg.Batch.Size, 75 BatchTimeout: cfg.Batch.Timeout, 76 BatchBytes: cfg.Batch.SizeBytes, 77 } 78 if cfg.Topic != "" { 79 pw.Topic = cfg.Topic 80 } 81 prod.pw = pw 82 return prod, nil 83 } 84 85 type producer struct { 86 logger logs.Logger 87 pw *kafka.Writer 88 } 89 90 func (p *producer) Write(data interface{}) error { 91 return p.publish(data) 92 } 93 94 func (p *producer) WriteN(data ...interface{}) (int, error) { 95 for i, item := range data { 96 err := p.publish(item) 97 if err != nil { 98 return i, err 99 } 100 } 101 return len(data), nil 102 } 103 104 func (p *producer) publish(data interface{}) error { 105 var ( 106 value []byte 107 key []byte 108 ) 109 topic := "" 110 111 switch val := data.(type) { 112 case Message: 113 if val.Topic != "" { 114 topic = val.Topic 115 } 116 value = val.Data 117 key = val.Key 118 case []byte: 119 value = val 120 case string: 121 value = reflectx.StringToBytes(val) 122 default: 123 data, err := json.Marshal(data) 124 if err != nil { 125 return err 126 } 127 value = data 128 } 129 if p.pw.Topic == "" { 130 err := p.pw.WriteMessages(context.TODO(), kafka.Message{ 131 Topic: topic, 132 Key: key, 133 Value: value, 134 }) 135 if err != nil { 136 return err 137 } 138 } else { 139 err := p.pw.WriteMessages(context.TODO(), kafka.Message{ 140 Key: key, 141 Value: value, 142 }) 143 if err != nil { 144 return err 145 } 146 } 147 148 return nil 149 } 150 151 func (p *producer) Close() error { 152 return p.pw.Close() 153 } 154 155 func (s *service) NewProducer(cfg ProducerConfig, options ...ProducerOption) (writer.Writer, error) { 156 var eh writer.ErrorHandler = s.producerError 157 for _, item := range options { 158 if item != nil && item.errHandler() != nil { 159 eh = item.errHandler() 160 } 161 } 162 prod, err := newProducer(s.p.Cfg.Servers, cfg, s.log) 163 if err != nil { 164 return nil, err 165 } 166 return writer.ParallelBatch(func(uint64) writer.Writer { 167 return prod 168 }, cfg.Parallelism, 1, 0, eh), nil 169 } 170 171 func (s *service) producerError(err error) error { 172 s.log.Errorf("fail to write kafka: %s", err) 173 return nil // skip error 174 }