github.com/QuangHoangHao/kafka-go@v0.4.36/fetch.go (about) 1 package kafka 2 3 import ( 4 "context" 5 "fmt" 6 "math" 7 "net" 8 "time" 9 10 "github.com/QuangHoangHao/kafka-go/protocol" 11 fetchAPI "github.com/QuangHoangHao/kafka-go/protocol/fetch" 12 ) 13 14 // FetchRequest represents a request sent to a kafka broker to retrieve records 15 // from a topic partition. 16 type FetchRequest struct { 17 // Address of the kafka broker to send the request to. 18 Addr net.Addr 19 20 // Topic, partition, and offset to retrieve records from. The offset may be 21 // one of the special FirstOffset or LastOffset constants, in which case the 22 // request will automatically discover the first or last offset of the 23 // partition and submit the request for these. 24 Topic string 25 Partition int 26 Offset int64 27 28 // Size and time limits of the response returned by the broker. 29 MinBytes int64 30 MaxBytes int64 31 MaxWait time.Duration 32 33 // The isolation level for the request. 34 // 35 // Defaults to ReadUncommitted. 36 // 37 // This field requires the kafka broker to support the Fetch API in version 38 // 4 or above (otherwise the value is ignored). 39 IsolationLevel IsolationLevel 40 } 41 42 // FetchResponse represents a response from a kafka broker to a fetch request. 43 type FetchResponse struct { 44 // The amount of time that the broker throttled the request. 45 Throttle time.Duration 46 47 // The topic and partition that the response came for (will match the values 48 // in the request). 49 Topic string 50 Partition int 51 52 // Informations about the topic partition layout returned from the broker. 53 // 54 // LastStableOffset requires the kafka broker to support the Fetch API in 55 // version 4 or above (otherwise the value is zero). 56 // 57 /// LogStartOffset requires the kafka broker to support the Fetch API in 58 // version 5 or above (otherwise the value is zero). 59 HighWatermark int64 60 LastStableOffset int64 61 LogStartOffset int64 62 63 // An error that may have occurred while attempting to fetch the records. 64 // 65 // The error contains both the kafka error code, and an error message 66 // returned by the kafka broker. Programs may use the standard errors.Is 67 // function to test the error against kafka error codes. 68 Error error 69 70 // The set of records returned in the response. 71 // 72 // The program is expected to call the RecordSet's Close method when it 73 // finished reading the records. 74 // 75 // Note that kafka may return record batches that start at an offset before 76 // the one that was requested. It is the program's responsibility to skip 77 // the offsets that it is not interested in. 78 Records RecordReader 79 } 80 81 // Fetch sends a fetch request to a kafka broker and returns the response. 82 // 83 // If the broker returned an invalid response with no topics, an error wrapping 84 // protocol.ErrNoTopic is returned. 85 // 86 // If the broker returned an invalid response with no partitions, an error 87 // wrapping ErrNoPartitions is returned. 88 func (c *Client) Fetch(ctx context.Context, req *FetchRequest) (*FetchResponse, error) { 89 timeout := c.timeout(ctx, math.MaxInt64) 90 maxWait := req.maxWait() 91 92 if maxWait < timeout { 93 timeout = maxWait 94 } 95 96 offset := req.Offset 97 switch offset { 98 case FirstOffset, LastOffset: 99 topic, partition := req.Topic, req.Partition 100 101 r, err := c.ListOffsets(ctx, &ListOffsetsRequest{ 102 Addr: req.Addr, 103 Topics: map[string][]OffsetRequest{ 104 topic: {{ 105 Partition: partition, 106 Timestamp: offset, 107 }}, 108 }, 109 }) 110 if err != nil { 111 return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", err) 112 } 113 114 for _, p := range r.Topics[topic] { 115 if p.Partition == partition { 116 if p.Error != nil { 117 return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", p.Error) 118 } 119 switch offset { 120 case FirstOffset: 121 offset = p.FirstOffset 122 case LastOffset: 123 offset = p.LastOffset 124 } 125 break 126 } 127 } 128 } 129 130 m, err := c.roundTrip(ctx, req.Addr, &fetchAPI.Request{ 131 ReplicaID: -1, 132 MaxWaitTime: milliseconds(timeout), 133 MinBytes: int32(req.MinBytes), 134 MaxBytes: int32(req.MaxBytes), 135 IsolationLevel: int8(req.IsolationLevel), 136 SessionID: -1, 137 SessionEpoch: -1, 138 Topics: []fetchAPI.RequestTopic{{ 139 Topic: req.Topic, 140 Partitions: []fetchAPI.RequestPartition{{ 141 Partition: int32(req.Partition), 142 CurrentLeaderEpoch: -1, 143 FetchOffset: offset, 144 LogStartOffset: -1, 145 PartitionMaxBytes: int32(req.MaxBytes), 146 }}, 147 }}, 148 }) 149 150 if err != nil { 151 return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", err) 152 } 153 154 res := m.(*fetchAPI.Response) 155 if len(res.Topics) == 0 { 156 return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", protocol.ErrNoTopic) 157 } 158 topic := &res.Topics[0] 159 if len(topic.Partitions) == 0 { 160 return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", protocol.ErrNoPartition) 161 } 162 partition := &topic.Partitions[0] 163 164 ret := &FetchResponse{ 165 Throttle: makeDuration(res.ThrottleTimeMs), 166 Topic: topic.Topic, 167 Partition: int(partition.Partition), 168 Error: makeError(res.ErrorCode, ""), 169 HighWatermark: partition.HighWatermark, 170 LastStableOffset: partition.LastStableOffset, 171 LogStartOffset: partition.LogStartOffset, 172 Records: partition.RecordSet.Records, 173 } 174 175 if partition.ErrorCode != 0 { 176 ret.Error = makeError(partition.ErrorCode, "") 177 } 178 179 if ret.Records == nil { 180 ret.Records = NewRecordReader() 181 } 182 183 return ret, nil 184 } 185 186 func (req *FetchRequest) maxWait() time.Duration { 187 if req.MaxWait > 0 { 188 return req.MaxWait 189 } 190 return defaultMaxWait 191 } 192 193 type fetchRequestV2 struct { 194 ReplicaID int32 195 MaxWaitTime int32 196 MinBytes int32 197 Topics []fetchRequestTopicV2 198 } 199 200 func (r fetchRequestV2) size() int32 { 201 return 4 + 4 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) 202 } 203 204 func (r fetchRequestV2) writeTo(wb *writeBuffer) { 205 wb.writeInt32(r.ReplicaID) 206 wb.writeInt32(r.MaxWaitTime) 207 wb.writeInt32(r.MinBytes) 208 wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) 209 } 210 211 type fetchRequestTopicV2 struct { 212 TopicName string 213 Partitions []fetchRequestPartitionV2 214 } 215 216 func (t fetchRequestTopicV2) size() int32 { 217 return sizeofString(t.TopicName) + 218 sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) 219 } 220 221 func (t fetchRequestTopicV2) writeTo(wb *writeBuffer) { 222 wb.writeString(t.TopicName) 223 wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) 224 } 225 226 type fetchRequestPartitionV2 struct { 227 Partition int32 228 FetchOffset int64 229 MaxBytes int32 230 } 231 232 func (p fetchRequestPartitionV2) size() int32 { 233 return 4 + 8 + 4 234 } 235 236 func (p fetchRequestPartitionV2) writeTo(wb *writeBuffer) { 237 wb.writeInt32(p.Partition) 238 wb.writeInt64(p.FetchOffset) 239 wb.writeInt32(p.MaxBytes) 240 } 241 242 type fetchResponseV2 struct { 243 ThrottleTime int32 244 Topics []fetchResponseTopicV2 245 } 246 247 func (r fetchResponseV2) size() int32 { 248 return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) 249 } 250 251 func (r fetchResponseV2) writeTo(wb *writeBuffer) { 252 wb.writeInt32(r.ThrottleTime) 253 wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) 254 } 255 256 type fetchResponseTopicV2 struct { 257 TopicName string 258 Partitions []fetchResponsePartitionV2 259 } 260 261 func (t fetchResponseTopicV2) size() int32 { 262 return sizeofString(t.TopicName) + 263 sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) 264 } 265 266 func (t fetchResponseTopicV2) writeTo(wb *writeBuffer) { 267 wb.writeString(t.TopicName) 268 wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) 269 } 270 271 type fetchResponsePartitionV2 struct { 272 Partition int32 273 ErrorCode int16 274 HighwaterMarkOffset int64 275 MessageSetSize int32 276 MessageSet messageSet 277 } 278 279 func (p fetchResponsePartitionV2) size() int32 { 280 return 4 + 2 + 8 + 4 + p.MessageSet.size() 281 } 282 283 func (p fetchResponsePartitionV2) writeTo(wb *writeBuffer) { 284 wb.writeInt32(p.Partition) 285 wb.writeInt16(p.ErrorCode) 286 wb.writeInt64(p.HighwaterMarkOffset) 287 wb.writeInt32(p.MessageSetSize) 288 p.MessageSet.writeTo(wb) 289 }