github.com/QuangHoangHao/kafka-go@v0.4.36/client.go (about) 1 package kafka 2 3 import ( 4 "context" 5 "errors" 6 "fmt" 7 "net" 8 "time" 9 10 "github.com/QuangHoangHao/kafka-go/protocol" 11 ) 12 13 const ( 14 defaultCreateTopicsTimeout = 2 * time.Second 15 defaultDeleteTopicsTimeout = 2 * time.Second 16 defaultCreatePartitionsTimeout = 2 * time.Second 17 defaultProduceTimeout = 500 * time.Millisecond 18 defaultMaxWait = 500 * time.Millisecond 19 ) 20 21 // Client is a high-level API to interract with kafka brokers. 22 // 23 // All methods of the Client type accept a context as first argument, which may 24 // be used to asynchronously cancel the requests. 25 // 26 // Clients are safe to use concurrently from multiple goroutines, as long as 27 // their configuration is not changed after first use. 28 type Client struct { 29 // Address of the kafka cluster (or specific broker) that the client will be 30 // sending requests to. 31 // 32 // This field is optional, the address may be provided in each request 33 // instead. The request address takes precedence if both were specified. 34 Addr net.Addr 35 36 // Time limit for requests sent by this client. 37 // 38 // If zero, no timeout is applied. 39 Timeout time.Duration 40 41 // A transport used to communicate with the kafka brokers. 42 // 43 // If nil, DefaultTransport is used. 44 Transport RoundTripper 45 } 46 47 // A ConsumerGroup and Topic as these are both strings we define a type for 48 // clarity when passing to the Client as a function argument 49 // 50 // N.B TopicAndGroup is currently experimental! Therefore, it is subject to 51 // change, including breaking changes between MINOR and PATCH releases. 52 // 53 // DEPRECATED: this type will be removed in version 1.0, programs should 54 // migrate to use kafka.(*Client).OffsetFetch instead. 55 type TopicAndGroup struct { 56 Topic string 57 GroupId string 58 } 59 60 // ConsumerOffsets returns a map[int]int64 of partition to committed offset for 61 // a consumer group id and topic. 62 // 63 // DEPRECATED: this method will be removed in version 1.0, programs should 64 // migrate to use kafka.(*Client).OffsetFetch instead. 65 func (c *Client) ConsumerOffsets(ctx context.Context, tg TopicAndGroup) (map[int]int64, error) { 66 metadata, err := c.Metadata(ctx, &MetadataRequest{ 67 Topics: []string{tg.Topic}, 68 }) 69 70 if err != nil { 71 return nil, fmt.Errorf("failed to get topic metadata :%w", err) 72 } 73 74 topic := metadata.Topics[0] 75 partitions := make([]int, len(topic.Partitions)) 76 77 for i := range topic.Partitions { 78 partitions[i] = topic.Partitions[i].ID 79 } 80 81 offsets, err := c.OffsetFetch(ctx, &OffsetFetchRequest{ 82 GroupID: tg.GroupId, 83 Topics: map[string][]int{ 84 tg.Topic: partitions, 85 }, 86 }) 87 88 if err != nil { 89 return nil, fmt.Errorf("failed to get offsets: %w", err) 90 } 91 92 topicOffsets := offsets.Topics[topic.Name] 93 partitionOffsets := make(map[int]int64, len(topicOffsets)) 94 95 for _, off := range topicOffsets { 96 partitionOffsets[off.Partition] = off.CommittedOffset 97 } 98 99 return partitionOffsets, nil 100 } 101 102 func (c *Client) roundTrip(ctx context.Context, addr net.Addr, msg protocol.Message) (protocol.Message, error) { 103 if c.Timeout > 0 { 104 var cancel context.CancelFunc 105 ctx, cancel = context.WithTimeout(ctx, c.Timeout) 106 defer cancel() 107 } 108 109 if addr == nil { 110 if addr = c.Addr; addr == nil { 111 return nil, errors.New("no address was given for the kafka cluster in the request or on the client") 112 } 113 } 114 115 return c.transport().RoundTrip(ctx, addr, msg) 116 } 117 118 func (c *Client) transport() RoundTripper { 119 if c.Transport != nil { 120 return c.Transport 121 } 122 return DefaultTransport 123 } 124 125 func (c *Client) timeout(ctx context.Context, defaultTimeout time.Duration) time.Duration { 126 timeout := c.Timeout 127 128 if deadline, ok := ctx.Deadline(); ok { 129 if remain := time.Until(deadline); remain < timeout { 130 timeout = remain 131 } 132 } 133 134 if timeout > 0 { 135 // Half the timeout because it is communicated to kafka in multiple 136 // requests (e.g. Fetch, Produce, etc...), this adds buffer to account 137 // for network latency when waiting for the response from kafka. 138 return timeout / 2 139 } 140 141 return defaultTimeout 142 } 143 144 func (c *Client) timeoutMs(ctx context.Context, defaultTimeout time.Duration) int32 { 145 return milliseconds(c.timeout(ctx, defaultTimeout)) 146 }