github.com/hyperledger-labs/bdls@v2.1.1+incompatible/integration/runner/kafka.go (about) 1 /* 2 Copyright IBM Corp. All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package runner 8 9 import ( 10 "context" 11 "fmt" 12 "io" 13 "net" 14 "os" 15 "strconv" 16 "sync" 17 "time" 18 19 docker "github.com/fsouza/go-dockerclient" 20 "github.com/pkg/errors" 21 "github.com/tedsuo/ifrit" 22 ) 23 24 const KafkaDefaultImage = "confluentinc/cp-kafka:5.3.1" 25 26 // Kafka manages the execution of an instance of a dockerized CouchDB 27 // for tests. 28 type Kafka struct { 29 Client *docker.Client 30 Image string 31 HostIP string 32 HostPort int 33 ContainerPort docker.Port 34 Name string 35 NetworkName string 36 StartTimeout time.Duration 37 38 MessageMaxBytes int 39 ReplicaFetchMaxBytes int 40 UncleanLeaderElectionEnable bool 41 DefaultReplicationFactor int 42 MinInsyncReplicas int 43 BrokerID int 44 ZooKeeperConnect string 45 ReplicaFetchResponseMaxBytes int 46 AdvertisedListeners string 47 48 ErrorStream io.Writer 49 OutputStream io.Writer 50 51 ContainerID string 52 HostAddress string 53 ContainerAddress string 54 Address string 55 56 mutex sync.Mutex 57 stopped bool 58 } 59 60 // Run runs a Kafka container. It implements the ifrit.Runner interface 61 func (k *Kafka) Run(sigCh <-chan os.Signal, ready chan<- struct{}) error { 62 if k.Image == "" { 63 k.Image = KafkaDefaultImage 64 } 65 66 if k.Name == "" { 67 k.Name = DefaultNamer() 68 } 69 70 if k.HostIP == "" { 71 k.HostIP = "127.0.0.1" 72 } 73 74 if k.ContainerPort == docker.Port("") { 75 k.ContainerPort = docker.Port("9092/tcp") 76 } 77 78 if k.StartTimeout == 0 { 79 k.StartTimeout = DefaultStartTimeout 80 } 81 82 if k.Client == nil { 83 client, err := docker.NewClientFromEnv() 84 if err != nil { 85 return err 86 } 87 k.Client = client 88 } 89 90 if k.DefaultReplicationFactor == 0 { 91 k.DefaultReplicationFactor = 1 92 } 93 94 if k.MinInsyncReplicas == 0 { 95 k.MinInsyncReplicas = 1 96 } 97 98 if k.ZooKeeperConnect == "" { 99 k.ZooKeeperConnect = "zookeeper:2181/kafka" 100 } 101 102 if k.MessageMaxBytes == 0 { 103 k.MessageMaxBytes = 1000012 104 } 105 106 if k.ReplicaFetchMaxBytes == 0 { 107 k.ReplicaFetchMaxBytes = 1048576 108 } 109 110 if k.ReplicaFetchResponseMaxBytes == 0 { 111 k.ReplicaFetchResponseMaxBytes = 10485760 112 } 113 114 containerOptions := docker.CreateContainerOptions{ 115 Name: k.Name, 116 Config: &docker.Config{ 117 Image: k.Image, 118 Env: k.buildEnv(), 119 }, 120 HostConfig: &docker.HostConfig{ 121 AutoRemove: true, 122 PortBindings: map[docker.Port][]docker.PortBinding{ 123 k.ContainerPort: {{ 124 HostIP: k.HostIP, 125 HostPort: strconv.Itoa(k.HostPort), 126 }}, 127 }, 128 }, 129 } 130 131 if k.NetworkName != "" { 132 nw, err := k.Client.NetworkInfo(k.NetworkName) 133 if err != nil { 134 return err 135 } 136 137 containerOptions.NetworkingConfig = &docker.NetworkingConfig{ 138 EndpointsConfig: map[string]*docker.EndpointConfig{ 139 k.NetworkName: { 140 NetworkID: nw.ID, 141 }, 142 }, 143 } 144 } 145 146 container, err := k.Client.CreateContainer(containerOptions) 147 if err != nil { 148 return err 149 } 150 k.ContainerID = container.ID 151 152 err = k.Client.StartContainer(container.ID, nil) 153 if err != nil { 154 return err 155 } 156 defer k.Stop() 157 158 container, err = k.Client.InspectContainer(container.ID) 159 if err != nil { 160 return err 161 } 162 163 k.HostAddress = net.JoinHostPort( 164 container.NetworkSettings.Ports[k.ContainerPort][0].HostIP, 165 container.NetworkSettings.Ports[k.ContainerPort][0].HostPort, 166 ) 167 k.ContainerAddress = net.JoinHostPort( 168 container.NetworkSettings.Networks[k.NetworkName].IPAddress, 169 k.ContainerPort.Port(), 170 ) 171 172 logContext, cancelLogs := context.WithCancel(context.Background()) 173 defer cancelLogs() 174 go k.streamLogs(logContext) 175 176 containerExit := k.wait() 177 ctx, cancel := context.WithTimeout(context.Background(), k.StartTimeout) 178 defer cancel() 179 180 select { 181 case <-ctx.Done(): 182 return errors.Wrapf(ctx.Err(), "kafka broker in container %s did not start", k.ContainerID) 183 case <-containerExit: 184 return errors.New("container exited before ready") 185 case <-k.ready(ctx, k.ContainerAddress): 186 k.Address = k.ContainerAddress 187 case <-k.ready(ctx, k.HostAddress): 188 k.Address = k.HostAddress 189 } 190 191 cancel() 192 close(ready) 193 194 for { 195 select { 196 case err := <-containerExit: 197 return err 198 case <-sigCh: 199 if err := k.Stop(); err != nil { 200 return err 201 } 202 } 203 } 204 } 205 206 func (k *Kafka) buildEnv() []string { 207 env := []string{ 208 "KAFKA_LOG_RETENTION_MS=-1", 209 //"KAFKA_AUTO_CREATE_TOPICS_ENABLE=false", 210 fmt.Sprintf("KAFKA_MESSAGE_MAX_BYTES=%d", k.MessageMaxBytes), 211 fmt.Sprintf("KAFKA_REPLICA_FETCH_MAX_BYTES=%d", k.ReplicaFetchMaxBytes), 212 fmt.Sprintf("KAFKA_UNCLEAN_LEADER_ELECTION_ENABLE=%s", strconv.FormatBool(k.UncleanLeaderElectionEnable)), 213 fmt.Sprintf("KAFKA_DEFAULT_REPLICATION_FACTOR=%d", k.DefaultReplicationFactor), 214 fmt.Sprintf("KAFKA_MIN_INSYNC_REPLICAS=%d", k.MinInsyncReplicas), 215 fmt.Sprintf("KAFKA_BROKER_ID=%d", k.BrokerID), 216 fmt.Sprintf("KAFKA_ZOOKEEPER_CONNECT=%s", k.ZooKeeperConnect), 217 fmt.Sprintf("KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=%d", k.ReplicaFetchResponseMaxBytes), 218 fmt.Sprintf("KAFKA_ADVERTISED_LISTENERS=EXTERNAL://localhost:%d,%s://%s:9093", k.HostPort, k.NetworkName, k.Name), 219 fmt.Sprintf("KAFKA_LISTENERS=EXTERNAL://0.0.0.0:9092,%s://0.0.0.0:9093", k.NetworkName), 220 fmt.Sprintf("KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=EXTERNAL:PLAINTEXT,%s:PLAINTEXT", k.NetworkName), 221 fmt.Sprintf("KAFKA_INTER_BROKER_LISTENER_NAME=%s", k.NetworkName), 222 } 223 return env 224 } 225 226 func (k *Kafka) ready(ctx context.Context, addr string) <-chan struct{} { 227 readyCh := make(chan struct{}) 228 go func() { 229 ticker := time.NewTicker(100 * time.Millisecond) 230 defer ticker.Stop() 231 232 for { 233 conn, err := net.DialTimeout("tcp", addr, 50*time.Millisecond) 234 if err == nil { 235 conn.Close() 236 close(readyCh) 237 return 238 } 239 240 select { 241 case <-ticker.C: 242 case <-ctx.Done(): 243 return 244 } 245 } 246 }() 247 248 return readyCh 249 } 250 251 func (k *Kafka) wait() <-chan error { 252 exitCh := make(chan error) 253 go func() { 254 exitCode, err := k.Client.WaitContainer(k.ContainerID) 255 if err == nil { 256 err = fmt.Errorf("kafka: process exited with %d", exitCode) 257 } 258 exitCh <- err 259 }() 260 261 return exitCh 262 } 263 264 func (k *Kafka) streamLogs(ctx context.Context) error { 265 if k.ErrorStream == nil && k.OutputStream == nil { 266 return nil 267 } 268 269 logOptions := docker.LogsOptions{ 270 Context: ctx, 271 Container: k.ContainerID, 272 ErrorStream: k.ErrorStream, 273 OutputStream: k.OutputStream, 274 Stderr: k.ErrorStream != nil, 275 Stdout: k.OutputStream != nil, 276 Follow: true, 277 } 278 return k.Client.Logs(logOptions) 279 } 280 281 // Start starts the Kafka container using an ifrit runner 282 func (k *Kafka) Start() error { 283 p := ifrit.Invoke(k) 284 285 select { 286 case <-p.Ready(): 287 return nil 288 case err := <-p.Wait(): 289 return err 290 } 291 } 292 293 // Stop stops and removes the Kafka container 294 func (k *Kafka) Stop() error { 295 k.mutex.Lock() 296 if k.stopped { 297 k.mutex.Unlock() 298 return errors.Errorf("container %s already stopped", k.ContainerID) 299 } 300 k.stopped = true 301 k.mutex.Unlock() 302 303 return k.Client.StopContainer(k.ContainerID, 0) 304 }