github.com/galamsiva2020/kubernetes-heapster-monitoring@v0.0.0-20210823134957-3c1baa7c1e70/common/kafka/kafka.go (about) 1 // Copyright 2015 Google Inc. All Rights Reserved. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package kafka 16 17 import ( 18 "crypto/tls" 19 "crypto/x509" 20 "encoding/json" 21 "fmt" 22 "io/ioutil" 23 "net/url" 24 "strconv" 25 "time" 26 27 kafka "github.com/Shopify/sarama" 28 "github.com/golang/glog" 29 ) 30 31 const ( 32 brokerClientID = "kafka-sink" 33 brokerDialTimeout = 10 * time.Second 34 brokerDialRetryLimit = 1 35 brokerDialRetryWait = 0 36 brokerLeaderRetryLimit = 1 37 brokerLeaderRetryWait = 0 38 metricsTopic = "heapster-metrics" 39 eventsTopic = "heapster-events" 40 ) 41 42 const ( 43 TimeSeriesTopic = "timeseriestopic" 44 EventsTopic = "eventstopic" 45 ) 46 47 type KafkaClient interface { 48 Name() string 49 Stop() 50 ProduceKafkaMessage(msgData interface{}) error 51 } 52 53 type kafkaSink struct { 54 producer kafka.SyncProducer 55 dataTopic string 56 } 57 58 func (sink *kafkaSink) ProduceKafkaMessage(msgData interface{}) error { 59 start := time.Now() 60 msgJson, err := json.Marshal(msgData) 61 if err != nil { 62 return fmt.Errorf("failed to transform the items to json : %s", err) 63 } 64 65 _, _, err = sink.producer.SendMessage(&kafka.ProducerMessage{ 66 Topic: sink.dataTopic, 67 Key: nil, 68 Value: kafka.ByteEncoder(msgJson), 69 }) 70 if err != nil { 71 return fmt.Errorf("failed to produce message to %s: %s", sink.dataTopic, err) 72 } 73 end := time.Now() 74 glog.V(4).Infof("Exported %d data to kafka in %s", len(msgJson), end.Sub(start)) 75 return nil 76 } 77 78 func (sink *kafkaSink) Name() string { 79 return "Apache Kafka Sink" 80 } 81 82 func (sink *kafkaSink) Stop() { 83 sink.producer.Close() 84 } 85 86 func getTopic(opts map[string][]string, topicType string) (string, error) { 87 var topic string 88 switch topicType { 89 case TimeSeriesTopic: 90 topic = metricsTopic 91 case EventsTopic: 92 topic = eventsTopic 93 default: 94 return "", fmt.Errorf("Topic type '%s' is illegal.", topicType) 95 } 96 97 if len(opts[topicType]) > 0 { 98 topic = opts[topicType][0] 99 } 100 101 return topic, nil 102 } 103 104 func getCompression(opts url.Values) (kafka.CompressionCodec, error) { 105 if len(opts["compression"]) == 0 { 106 return kafka.CompressionNone, nil 107 } 108 comp := opts["compression"][0] 109 switch comp { 110 case "none": 111 return kafka.CompressionNone, nil 112 case "gzip": 113 return kafka.CompressionGZIP, nil 114 case "snappy": 115 return kafka.CompressionSnappy, nil 116 case "lz4": 117 return kafka.CompressionLZ4, nil 118 default: 119 return kafka.CompressionNone, fmt.Errorf("Compression '%s' is illegal. Use none, snappy, lz4 or gzip", comp) 120 } 121 } 122 123 func getTlsConfiguration(opts url.Values) (*tls.Config, bool, error) { 124 if len(opts["cacert"]) == 0 && 125 (len(opts["cert"]) == 0 || len(opts["key"]) == 0) { 126 return nil, false, nil 127 } 128 t := &tls.Config{} 129 if len(opts["cacert"]) != 0 { 130 caFile := opts["cacert"][0] 131 caCert, err := ioutil.ReadFile(caFile) 132 if err != nil { 133 return nil, false, err 134 } 135 caCertPool := x509.NewCertPool() 136 caCertPool.AppendCertsFromPEM(caCert) 137 t.RootCAs = caCertPool 138 } 139 140 if len(opts["cert"]) != 0 && len(opts["key"]) != 0 { 141 certFile := opts["cert"][0] 142 keyFile := opts["key"][0] 143 cert, err := tls.LoadX509KeyPair(certFile, keyFile) 144 if err != nil { 145 return nil, false, err 146 } 147 t.Certificates = []tls.Certificate{cert} 148 } 149 if len(opts["insecuressl"]) != 0 { 150 insecuressl := opts["insecuressl"][0] 151 insecure, err := strconv.ParseBool(insecuressl) 152 if err != nil { 153 return nil, false, err 154 } 155 t.InsecureSkipVerify = insecure 156 } 157 158 return t, true, nil 159 } 160 161 func getSASLConfiguration(opts url.Values) (string, string, bool, error) { 162 if len(opts["user"]) == 0 { 163 return "", "", false, nil 164 } 165 user := opts["user"][0] 166 if len(opts["password"]) == 0 { 167 return "", "", false, nil 168 } 169 password := opts["password"][0] 170 return user, password, true, nil 171 } 172 173 func getOptionsWithoutSecrets(values url.Values) string { 174 var password []string 175 if len(values["password"]) != 0 { 176 password = values["password"] 177 values["password"] = []string{"***"} 178 defer func() { values["password"] = password }() 179 } 180 options := fmt.Sprintf("kafka sink option: %v", values) 181 return options 182 } 183 184 func NewKafkaClient(uri *url.URL, topicType string) (KafkaClient, error) { 185 opts, err := url.ParseQuery(uri.RawQuery) 186 if err != nil { 187 return nil, fmt.Errorf("failed to parse url's query string: %s", err) 188 } 189 glog.V(3).Info(getOptionsWithoutSecrets(opts)) 190 191 topic, err := getTopic(opts, topicType) 192 if err != nil { 193 return nil, err 194 } 195 196 compression, err := getCompression(opts) 197 if err != nil { 198 return nil, err 199 } 200 201 var kafkaBrokers []string 202 if len(opts["brokers"]) < 1 { 203 return nil, fmt.Errorf("There is no broker assigned for connecting kafka") 204 } 205 kafkaBrokers = append(kafkaBrokers, opts["brokers"]...) 206 glog.V(2).Infof("initializing kafka sink with brokers - %v", kafkaBrokers) 207 208 kafka.Logger = GologAdapterLogger{} 209 210 //structure the config of broker 211 config := kafka.NewConfig() 212 config.ClientID = brokerClientID 213 config.Net.DialTimeout = brokerDialTimeout 214 config.Metadata.Retry.Max = brokerDialRetryLimit 215 config.Metadata.Retry.Backoff = brokerDialRetryWait 216 config.Producer.Retry.Max = brokerLeaderRetryLimit 217 config.Producer.Retry.Backoff = brokerLeaderRetryWait 218 config.Producer.Compression = compression 219 config.Producer.Partitioner = kafka.NewRoundRobinPartitioner 220 config.Producer.RequiredAcks = kafka.WaitForLocal 221 config.Producer.Return.Errors = true 222 config.Producer.Return.Successes = true 223 224 config.Net.TLS.Config, config.Net.TLS.Enable, err = getTlsConfiguration(opts) 225 if err != nil { 226 return nil, err 227 } 228 229 config.Net.SASL.User, config.Net.SASL.Password, config.Net.SASL.Enable, err = getSASLConfiguration(opts) 230 if err != nil { 231 return nil, err 232 } 233 234 // set up producer of kafka server. 235 glog.V(3).Infof("attempting to setup kafka sink") 236 sinkProducer, err := kafka.NewSyncProducer(kafkaBrokers, config) 237 if err != nil { 238 return nil, fmt.Errorf("Failed to setup Producer: - %v", err) 239 } 240 241 glog.V(3).Infof("kafka sink setup successfully") 242 return &kafkaSink{ 243 producer: sinkProducer, 244 dataTopic: topic, 245 }, nil 246 }