github.com/1aal/kubeblocks@v0.0.0-20231107070852-e1c03e598921/pkg/lorry/engines/kafka/kafka.go (about) 1 /* 2 Copyright 2021 The Dapr Authors 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 http://www.apache.org/licenses/LICENSE-2.0 7 Unless required by applicable law or agreed to in writing, software 8 distributed under the License is distributed on an "AS IS" BASIS, 9 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 See the License for the specific language governing permissions and 11 limitations under the License. 12 */ 13 14 package kafka 15 16 import ( 17 "context" 18 "sync" 19 "time" 20 21 "github.com/1aal/kubeblocks/pkg/lorry/engines/kafka/thirdparty" 22 23 "github.com/go-logr/logr" 24 25 "github.com/Shopify/sarama" 26 ) 27 28 // Kafka allows reading/writing to a Kafka consumer group. 29 type Kafka struct { 30 Producer sarama.SyncProducer 31 broker *sarama.Broker 32 consumerGroup string 33 brokers []string 34 logger logr.Logger 35 authType string 36 saslUsername string 37 saslPassword string 38 initialOffset int64 39 cg sarama.ConsumerGroup 40 consumer consumer 41 config *sarama.Config 42 subscribeTopics TopicHandlerConfig 43 subscribeLock sync.Mutex 44 45 backOffConfig thirdparty.Config 46 47 // The default value should be true for kafka pubsub component and false for kafka binding component 48 // This default value can be overridden by metadata consumeRetryEnabled 49 DefaultConsumeRetryEnabled bool 50 consumeRetryEnabled bool 51 consumeRetryInterval time.Duration 52 } 53 54 func NewKafka(logger logr.Logger) *Kafka { 55 return &Kafka{ 56 logger: logger, 57 subscribeTopics: make(TopicHandlerConfig), 58 subscribeLock: sync.Mutex{}, 59 } 60 } 61 62 // Init does metadata parsing and connection establishment. 63 func (k *Kafka) Init(_ context.Context, metadata map[string]string) error { 64 upgradedMetadata, err := k.upgradeMetadata(metadata) 65 if err != nil { 66 return err 67 } 68 69 meta, err := k.getKafkaMetadata(upgradedMetadata) 70 if err != nil { 71 return err 72 } 73 74 k.brokers = meta.Brokers 75 k.consumerGroup = meta.ConsumerGroup 76 k.initialOffset = meta.InitialOffset 77 k.authType = meta.AuthType 78 79 k.broker = sarama.NewBroker(k.brokers[0]) 80 81 config := sarama.NewConfig() 82 config.Version = meta.Version 83 config.Consumer.Offsets.Initial = k.initialOffset 84 85 if meta.ClientID != "" { 86 config.ClientID = meta.ClientID 87 } 88 89 err = updateTLSConfig(config, meta) 90 if err != nil { 91 return err 92 } 93 94 switch k.authType { 95 case oidcAuthType: 96 k.logger.Info("Configuring SASL OAuth2/OIDC authentication") 97 err = updateOidcAuthInfo(config, meta) 98 if err != nil { 99 return err 100 } 101 case passwordAuthType: 102 k.logger.Info("Configuring SASL Password authentication") 103 k.saslUsername = meta.SaslUsername 104 k.saslPassword = meta.SaslPassword 105 updatePasswordAuthInfo(config, meta, k.saslUsername, k.saslPassword) 106 case mtlsAuthType: 107 k.logger.Info("Configuring mTLS authentcation") 108 err = updateMTLSAuthInfo(config, meta) 109 if err != nil { 110 return err 111 } 112 } 113 114 k.config = config 115 sarama.Logger = SaramaLogBridge{logger: k.logger} 116 117 k.Producer, err = getSyncProducer(*k.config, k.brokers, meta.MaxMessageBytes) 118 if err != nil { 119 return err 120 } 121 122 // Default retry configuration is used if no 123 // backOff properties are set. 124 if err := thirdparty.DecodeConfigWithPrefix( 125 &k.backOffConfig, 126 metadata, 127 "backOff"); err != nil { 128 return err 129 } 130 k.consumeRetryEnabled = meta.ConsumeRetryEnabled 131 k.consumeRetryInterval = meta.ConsumeRetryInterval 132 133 k.logger.Info("Kafka message bus initialization complete") 134 135 return nil 136 } 137 138 func (k *Kafka) Close() (err error) { 139 k.closeSubscriptionResources() 140 141 if k.Producer != nil { 142 err = k.Producer.Close() 143 k.Producer = nil 144 } 145 146 return err 147 } 148 149 // EventHandler is the handler used to handle the subscribed event. 150 type EventHandler func(ctx context.Context, msg *NewEvent) error 151 152 // BulkEventHandler is the handler used to handle the subscribed bulk event. 153 // type BulkEventHandler func(ctx context.Context, msg *KafkaBulkMessage) ([]pubsub.BulkSubscribeResponseEntry, error) 154 155 // SubscriptionHandlerConfig is the handler and configuration for subscription. 156 type SubscriptionHandlerConfig struct { 157 IsBulkSubscribe bool 158 Handler EventHandler 159 } 160 161 // NewEvent is an event arriving from a message bus instance. 162 type NewEvent struct { 163 Data []byte `json:"data"` 164 Topic string `json:"topic"` 165 Metadata map[string]string `json:"metadata"` 166 ContentType *string `json:"contentType,omitempty"` 167 } 168 169 // KafkaBulkMessage is a bulk event arriving from a message bus instance. 170 type KafkaBulkMessage struct { 171 Entries []KafkaBulkMessageEntry `json:"entries"` 172 Topic string `json:"topic"` 173 Metadata map[string]string `json:"metadata"` 174 } 175 176 // KafkaBulkMessageEntry is an item contained inside bulk event arriving from a message bus instance. 177 type KafkaBulkMessageEntry struct { 178 EntryID string `json:"entryId"` //nolint:stylecheck 179 Event []byte `json:"event"` 180 ContentType string `json:"contentType,omitempty"` 181 Metadata map[string]string `json:"metadata"` 182 } 183 184 func (k *Kafka) BrokerOpen() error { 185 connected, err := k.broker.Connected() 186 if err != nil { 187 k.logger.Info("broker connected err:%v", err) 188 return err 189 } 190 if !connected { 191 err = k.broker.Open(k.config) 192 if err != nil { 193 k.logger.Info("broker connected err:%v", err) 194 return err 195 } 196 } 197 198 return nil 199 } 200 201 func (k *Kafka) BrokerClose() { 202 _ = k.broker.Close() 203 } 204 205 func (k *Kafka) BrokerCreateTopics(topic string) error { 206 req := &sarama.CreateTopicsRequest{ 207 Version: 1, 208 TopicDetails: map[string]*sarama.TopicDetail{ 209 topic: { 210 NumPartitions: -1, 211 ReplicationFactor: -1, 212 }, 213 }, 214 Timeout: time.Second, 215 ValidateOnly: false, 216 } 217 218 resp, err := k.broker.CreateTopics(req) 219 if err != nil { 220 k.logger.Error(err, "CheckStatus error") 221 return err 222 } else { 223 respErr := resp.TopicErrors[topic] 224 // ErrNo details: https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes 225 if respErr.Err != 0 { 226 k.logger.Error(respErr, "CheckStatus error", "errNo", int16(respErr.Err)) 227 return respErr 228 } 229 return nil 230 } 231 }