github.com/hack0072008/kafka-go@v1.0.1/client.go (about) 1 package kafka 2 3 import ( 4 "context" 5 "errors" 6 "net" 7 "time" 8 9 "github.com/hack0072008/kafka-go/protocol" 10 ) 11 12 const ( 13 defaultCreateTopicsTimeout = 2 * time.Second 14 defaultDeleteTopicsTimeout = 2 * time.Second 15 defaultCreatePartitionsTimeout = 2 * time.Second 16 defaultProduceTimeout = 500 * time.Millisecond 17 defaultMaxWait = 500 * time.Millisecond 18 ) 19 20 // Client is a high-level API to interract with kafka brokers. 21 // 22 // All methods of the Client type accept a context as first argument, which may 23 // be used to asynchronously cancel the requests. 24 // 25 // Clients are safe to use concurrently from multiple goroutines, as long as 26 // their configuration is not changed after first use. 27 type Client struct { 28 // Address of the kafka cluster (or specific broker) that the client will be 29 // sending requests to. 30 // 31 // This field is optional, the address may be provided in each request 32 // instead. The request address takes precedence if both were specified. 33 Addr net.Addr 34 35 // Time limit for requests sent by this client. 36 // 37 // If zero, no timeout is applied. 38 Timeout time.Duration 39 40 // A transport used to communicate with the kafka brokers. 41 // 42 // If nil, DefaultTransport is used. 43 Transport RoundTripper 44 } 45 46 // A ConsumerGroup and Topic as these are both strings we define a type for 47 // clarity when passing to the Client as a function argument 48 // 49 // N.B TopicAndGroup is currently experimental! Therefore, it is subject to 50 // change, including breaking changes between MINOR and PATCH releases. 51 // 52 // DEPRECATED: this type will be removed in version 1.0, programs should 53 // migrate to use kafka.(*Client).OffsetFetch instead. 54 type TopicAndGroup struct { 55 Topic string 56 GroupId string 57 } 58 59 // ConsumerOffsets returns a map[int]int64 of partition to committed offset for 60 // a consumer group id and topic. 61 // 62 // DEPRECATED: this method will be removed in version 1.0, programs should 63 // migrate to use kafka.(*Client).OffsetFetch instead. 64 func (c *Client) ConsumerOffsets(ctx context.Context, tg TopicAndGroup) (map[int]int64, error) { 65 metadata, err := c.Metadata(ctx, &MetadataRequest{ 66 Topics: []string{tg.Topic}, 67 }) 68 69 if err != nil { 70 return nil, err 71 } 72 73 topic := metadata.Topics[0] 74 partitions := make([]int, len(topic.Partitions)) 75 76 for i := range topic.Partitions { 77 partitions[i] = topic.Partitions[i].ID 78 } 79 80 offsets, err := c.OffsetFetch(ctx, &OffsetFetchRequest{ 81 GroupID: tg.GroupId, 82 Topics: map[string][]int{ 83 tg.Topic: partitions, 84 }, 85 }) 86 87 if err != nil { 88 return nil, err 89 } 90 91 topicOffsets := offsets.Topics[topic.Name] 92 partitionOffsets := make(map[int]int64, len(topicOffsets)) 93 94 for _, off := range topicOffsets { 95 partitionOffsets[off.Partition] = off.CommittedOffset 96 } 97 98 return partitionOffsets, nil 99 } 100 101 func (c *Client) roundTrip(ctx context.Context, addr net.Addr, msg protocol.Message) (protocol.Message, error) { 102 if c.Timeout > 0 { 103 var cancel context.CancelFunc 104 ctx, cancel = context.WithTimeout(ctx, c.Timeout) 105 defer cancel() 106 } 107 108 if addr == nil { 109 if addr = c.Addr; addr == nil { 110 return nil, errors.New("no address was given for the kafka cluster in the request or on the client") 111 } 112 } 113 114 return c.transport().RoundTrip(ctx, addr, msg) 115 } 116 117 func (c *Client) transport() RoundTripper { 118 if c.Transport != nil { 119 return c.Transport 120 } 121 return DefaultTransport 122 } 123 124 func (c *Client) timeout(ctx context.Context, defaultTimeout time.Duration) time.Duration { 125 timeout := c.Timeout 126 127 if deadline, ok := ctx.Deadline(); ok { 128 if remain := time.Until(deadline); remain < timeout { 129 timeout = remain 130 } 131 } 132 133 if timeout > 0 { 134 // Half the timeout because it is communicated to kafka in multiple 135 // requests (e.g. Fetch, Produce, etc...), this adds buffer to account 136 // for network latency when waiting for the response from kafka. 137 return timeout / 2 138 } 139 140 return defaultTimeout 141 } 142 143 func (c *Client) timeoutMs(ctx context.Context, defaultTimeout time.Duration) int32 { 144 return milliseconds(c.timeout(ctx, defaultTimeout)) 145 }