github.com/rudderlabs/rudder-go-kit@v0.30.0/testhelper/docker/resource/kafka/kafka.go (about) 1 package kafka 2 3 import ( 4 _ "encoding/json" 5 "fmt" 6 "strconv" 7 8 _ "github.com/lib/pq" 9 "github.com/ory/dockertest/v3" 10 dc "github.com/ory/dockertest/v3/docker" 11 "golang.org/x/sync/errgroup" 12 13 kithelper "github.com/rudderlabs/rudder-go-kit/testhelper" 14 "github.com/rudderlabs/rudder-go-kit/testhelper/docker/resource" 15 ) 16 17 type scramHashGenerator uint8 18 19 const ( 20 scramPlainText scramHashGenerator = iota 21 scramSHA256 22 scramSHA512 23 24 kafkaClientPort = "9092" 25 ) 26 27 type User struct { 28 Username, Password string 29 } 30 31 type Option interface { 32 apply(*config) 33 } 34 35 type withOption struct{ setup func(*config) } 36 37 func (w withOption) apply(c *config) { w.setup(c) } 38 39 type SASLConfig struct { 40 BrokerUser User 41 Users []User 42 CertificatePassword string 43 KeyStorePath, TrustStorePath string 44 45 hashType scramHashGenerator 46 } 47 48 type config struct { 49 brokers uint 50 saslConfig *SASLConfig 51 network *dc.Network 52 dontUseDockerHostListeners bool 53 customAdvertisedListener string 54 useSchemaRegistry bool 55 } 56 57 func (c *config) defaults() { 58 if c.brokers < 1 { 59 c.brokers = 1 60 } 61 } 62 63 // WithBrokers allows to set the number of brokers in the cluster 64 func WithBrokers(noOfBrokers uint) Option { 65 return withOption{setup: func(c *config) { 66 c.brokers = noOfBrokers 67 }} 68 } 69 70 // WithSASLPlain is used to configure SASL authentication (PLAIN) 71 func WithSASLPlain(conf *SASLConfig) Option { 72 return withSASL(scramPlainText, conf) 73 } 74 75 // WithSASLScramSHA256 is used to configure SASL authentication (Scram SHA-256) 76 func WithSASLScramSHA256(conf *SASLConfig) Option { 77 return withSASL(scramSHA256, conf) 78 } 79 80 // WithSASLScramSHA512 is used to configure SASL authentication (Scram SHA-512) 81 func WithSASLScramSHA512(conf *SASLConfig) Option { 82 return withSASL(scramSHA512, conf) 83 } 84 85 func withSASL(hashType scramHashGenerator, conf *SASLConfig) Option { 86 conf.hashType = hashType 87 return withOption{setup: func(c *config) { 88 c.saslConfig = conf 89 }} 90 } 91 92 // WithNetwork allows to set a docker network to use for the cluster 93 func WithNetwork(network *dc.Network) Option { 94 return withOption{setup: func(c *config) { 95 c.network = network 96 }} 97 } 98 99 // WithoutDockerHostListeners allows to not set the advertised listener to the host mapped port 100 func WithoutDockerHostListeners() Option { 101 return withOption{setup: func(c *config) { 102 c.dontUseDockerHostListeners = true 103 }} 104 } 105 106 // WithCustomAdvertisedListener allows to set a custom advertised listener 107 func WithCustomAdvertisedListener(listener string) Option { 108 return withOption{setup: func(c *config) { 109 c.customAdvertisedListener = listener 110 }} 111 } 112 113 // WithSchemaRegistry allows to use the schema registry 114 func WithSchemaRegistry() Option { 115 return withOption{setup: func(c *config) { 116 c.useSchemaRegistry = true 117 }} 118 } 119 120 type Resource struct { 121 Ports []string 122 SchemaRegistryURL string 123 124 pool *dockertest.Pool 125 containers []*dockertest.Resource 126 } 127 128 func (k *Resource) Destroy() error { 129 g := errgroup.Group{} 130 for i := range k.containers { 131 i := i 132 g.Go(func() error { 133 return k.pool.Purge(k.containers[i]) 134 }) 135 } 136 return g.Wait() 137 } 138 139 func Setup(pool *dockertest.Pool, cln resource.Cleaner, opts ...Option) (*Resource, error) { 140 // lock so no two tests can run at the same time and try to listen on the same port 141 var c config 142 for _, opt := range opts { 143 opt.apply(&c) 144 } 145 c.defaults() 146 147 network := c.network 148 if c.network == nil { 149 var err error 150 network, err = pool.Client.CreateNetwork(dc.CreateNetworkOptions{Name: "kafka_network"}) 151 if err != nil { 152 return nil, fmt.Errorf("could not create docker network: %w", err) 153 } 154 cln.Cleanup(func() { 155 if err := pool.Client.RemoveNetwork(network.ID); err != nil { 156 cln.Log(fmt.Errorf("could not remove kafka network: %w", err)) 157 } 158 }) 159 } 160 161 zookeeperPortInt, err := kithelper.GetFreePort() 162 if err != nil { 163 return nil, err 164 } 165 zookeeperPort := fmt.Sprintf("%d/tcp", zookeeperPortInt) 166 zookeeperContainer, err := pool.RunWithOptions(&dockertest.RunOptions{ 167 Repository: "bitnami/zookeeper", 168 Tag: "3.9-debian-11", 169 NetworkID: network.ID, 170 Hostname: "zookeeper", 171 PortBindings: map[dc.Port][]dc.PortBinding{ 172 "2181/tcp": {{HostIP: "zookeeper", HostPort: zookeeperPort}}, 173 }, 174 Env: []string{"ALLOW_ANONYMOUS_LOGIN=yes"}, 175 }) 176 if err != nil { 177 return nil, err 178 } 179 cln.Cleanup(func() { 180 if err := pool.Purge(zookeeperContainer); err != nil { 181 cln.Log("Could not purge resource", err) 182 } 183 }) 184 185 cln.Log("Zookeeper localhost port", zookeeperContainer.GetPort("2181/tcp")) 186 187 envVariables := []string{ 188 "KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181", 189 "KAFKA_CFG_INTER_BROKER_LISTENER_NAME=INTERNAL", 190 "ALLOW_PLAINTEXT_LISTENER=yes", 191 } 192 193 var schemaRegistryURL string 194 if c.useSchemaRegistry { 195 bootstrapServers := "" 196 for i := uint(1); i <= c.brokers; i++ { 197 bootstrapServers += fmt.Sprintf("PLAINTEXT://kafka%d:9090,", i) 198 } 199 src, err := pool.RunWithOptions(&dockertest.RunOptions{ 200 Repository: "bitnami/schema-registry", 201 Tag: "7.5-debian-11", 202 NetworkID: network.ID, 203 Hostname: "schemaregistry", 204 ExposedPorts: []string{"8081"}, 205 Env: []string{ 206 "SCHEMA_REGISTRY_DEBUG=true", 207 "SCHEMA_REGISTRY_KAFKA_BROKERS=" + bootstrapServers[:len(bootstrapServers)-1], 208 "SCHEMA_REGISTRY_ADVERTISED_HOSTNAME=schemaregistry", 209 "SCHEMA_REGISTRY_CLIENT_AUTHENTICATION=NONE", 210 }, 211 }) 212 if err != nil { 213 return nil, err 214 } 215 cln.Cleanup(func() { 216 if err := pool.Purge(src); err != nil { 217 cln.Log("Could not purge resource", err) 218 } 219 }) 220 var srPort int 221 for p, bindings := range src.Container.NetworkSettings.Ports { 222 if p.Port() == "8081" { 223 srPort, err = strconv.Atoi(bindings[0].HostPort) 224 if err != nil { 225 panic(fmt.Errorf("cannot convert port to int: %w", err)) 226 } 227 break 228 } 229 } 230 231 envVariables = append(envVariables, "KAFKA_SCHEMA_REGISTRY_URL=schemaregistry:8081") 232 schemaRegistryURL = fmt.Sprintf("http://localhost:%d", srPort) 233 cln.Log("Schema Registry on", schemaRegistryURL) 234 } 235 236 bootstrapServers := "" 237 for i := uint(1); i <= c.brokers; i++ { 238 bootstrapServers += fmt.Sprintf("kafka%d:9090,", i) 239 } 240 bootstrapServers = bootstrapServers[:len(bootstrapServers)-1] // removing trailing comma 241 envVariables = append(envVariables, "BOOTSTRAP_SERVERS="+bootstrapServers) 242 243 var mounts []string 244 if c.saslConfig != nil { 245 if c.saslConfig.BrokerUser.Username == "" { 246 return nil, fmt.Errorf("SASL broker user must be provided") 247 } 248 if len(c.saslConfig.Users) < 1 { 249 return nil, fmt.Errorf("SASL users must be provided") 250 } 251 if c.saslConfig.CertificatePassword == "" { 252 return nil, fmt.Errorf("SASL certificate password cannot be empty") 253 } 254 if c.saslConfig.KeyStorePath == "" { 255 return nil, fmt.Errorf("SASL keystore path cannot be empty") 256 } 257 if c.saslConfig.TrustStorePath == "" { 258 return nil, fmt.Errorf("SASL truststore path cannot be empty") 259 } 260 261 mounts = []string{ 262 c.saslConfig.KeyStorePath + ":/opt/bitnami/kafka/config/certs/kafka.keystore.jks", 263 c.saslConfig.TrustStorePath + ":/opt/bitnami/kafka/config/certs/kafka.truststore.jks", 264 } 265 266 var users, passwords string 267 for _, user := range c.saslConfig.Users { 268 users += user.Username + "," 269 passwords += user.Password + "," 270 } 271 272 switch c.saslConfig.hashType { 273 case scramPlainText: 274 envVariables = append(envVariables, 275 "KAFKA_CFG_SASL_ENABLED_MECHANISMS=PLAIN", 276 "KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL=PLAIN", 277 ) 278 case scramSHA256: 279 envVariables = append(envVariables, 280 "KAFKA_CFG_SASL_ENABLED_MECHANISMS=SCRAM-SHA-256", 281 "KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL=SCRAM-SHA-256", 282 ) 283 case scramSHA512: 284 envVariables = append(envVariables, 285 "KAFKA_CFG_SASL_ENABLED_MECHANISMS=SCRAM-SHA-512", 286 "KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL=SCRAM-SHA-512", 287 ) 288 default: 289 return nil, fmt.Errorf("scram algorithm out of the known domain") 290 } 291 292 envVariables = append(envVariables, 293 "KAFKA_CLIENT_USERS="+users[:len(users)-1], // removing trailing comma 294 "KAFKA_CLIENT_PASSWORDS="+passwords[:len(passwords)-1], // removing trailing comma 295 "KAFKA_INTER_BROKER_USER="+c.saslConfig.BrokerUser.Username, 296 "KAFKA_INTER_BROKER_PASSWORD="+c.saslConfig.BrokerUser.Password, 297 "KAFKA_CERTIFICATE_PASSWORD="+c.saslConfig.CertificatePassword, 298 "KAFKA_CFG_TLS_TYPE=JKS", 299 "KAFKA_CFG_TLS_CLIENT_AUTH=none", 300 "KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM=", 301 "KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:SASL_SSL,CLIENT:SASL_SSL", 302 "KAFKA_ZOOKEEPER_TLS_VERIFY_HOSTNAME=false", 303 ) 304 } else { 305 envVariables = append(envVariables, 306 "KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:PLAINTEXT,CLIENT:PLAINTEXT", 307 ) 308 } 309 310 containers := make([]*dockertest.Resource, c.brokers) 311 for i := uint(0); i < c.brokers; i++ { 312 i := i 313 localhostPortInt, err := kithelper.GetFreePort() 314 if err != nil { 315 return nil, err 316 } 317 localhostPort := fmt.Sprintf("%d/tcp", localhostPortInt) 318 cln.Log("Kafka broker localhost port", i+1, localhostPort) 319 320 nodeID := fmt.Sprintf("%d", i+1) 321 hostname := "kafka" + nodeID 322 nodeEnvVars := append(envVariables, []string{ // skipcq: CRT-D0001 323 "KAFKA_BROKER_ID=" + nodeID, 324 "KAFKA_CFG_LISTENERS=" + fmt.Sprintf("INTERNAL://%s:9090,CLIENT://:%s", hostname, kafkaClientPort), 325 }...) 326 if c.dontUseDockerHostListeners { 327 nodeEnvVars = append(nodeEnvVars, "KAFKA_CFG_ADVERTISED_LISTENERS="+fmt.Sprintf( 328 "INTERNAL://%s:9090,CLIENT://%s:%s", hostname, hostname, kafkaClientPort, 329 )) 330 } else if c.customAdvertisedListener != "" { 331 nodeEnvVars = append(nodeEnvVars, "KAFKA_CFG_ADVERTISED_LISTENERS="+fmt.Sprintf( 332 "INTERNAL://%s:9090,CLIENT://%s", hostname, c.customAdvertisedListener, 333 )) 334 } else { 335 nodeEnvVars = append(nodeEnvVars, "KAFKA_CFG_ADVERTISED_LISTENERS="+fmt.Sprintf( 336 "INTERNAL://%s:9090,CLIENT://localhost:%d", hostname, localhostPortInt, 337 )) 338 } 339 containers[i], err = pool.RunWithOptions(&dockertest.RunOptions{ 340 Repository: "bitnami/kafka", 341 Tag: "3.6.0", 342 NetworkID: network.ID, 343 Hostname: hostname, 344 PortBindings: map[dc.Port][]dc.PortBinding{ 345 kafkaClientPort + "/tcp": {{HostIP: "localhost", HostPort: localhostPort}}, 346 }, 347 Mounts: mounts, 348 Env: nodeEnvVars, 349 }) 350 if err != nil { 351 return nil, err 352 } 353 cln.Cleanup(func() { 354 if err := pool.Purge(containers[i]); err != nil { 355 cln.Log(fmt.Errorf("could not purge Kafka resource: %w", err)) 356 } 357 }) 358 } 359 360 res := &Resource{ 361 Ports: make([]string, 0, len(containers)), 362 SchemaRegistryURL: schemaRegistryURL, 363 pool: pool, 364 containers: containers, 365 } 366 for i := 0; i < len(containers); i++ { 367 res.Ports = append(res.Ports, containers[i].GetPort(kafkaClientPort+"/tcp")) 368 } 369 370 return res, nil 371 }