github.com/deanMdreon/kafka-go@v0.4.32/conn.go (about) 1 package kafka 2 3 import ( 4 "bufio" 5 "errors" 6 "fmt" 7 "io" 8 "math" 9 "net" 10 "os" 11 "path/filepath" 12 "runtime" 13 "sync" 14 "sync/atomic" 15 "time" 16 ) 17 18 var ( 19 errInvalidWriteTopic = errors.New("writes must NOT set Topic on kafka.Message") 20 errInvalidWritePartition = errors.New("writes must NOT set Partition on kafka.Message") 21 ) 22 23 // Conn represents a connection to a kafka broker. 24 // 25 // Instances of Conn are safe to use concurrently from multiple goroutines. 26 type Conn struct { 27 // base network connection 28 conn net.Conn 29 30 // number of inflight requests on the connection. 31 inflight int32 32 33 // offset management (synchronized on the mutex field) 34 mutex sync.Mutex 35 offset int64 36 37 // read buffer (synchronized on rlock) 38 rlock sync.Mutex 39 rbuf bufio.Reader 40 41 // write buffer (synchronized on wlock) 42 wlock sync.Mutex 43 wbuf bufio.Writer 44 wb writeBuffer 45 46 // deadline management 47 wdeadline connDeadline 48 rdeadline connDeadline 49 50 // immutable values of the connection object 51 clientID string 52 topic string 53 partition int32 54 fetchMaxBytes int32 55 fetchMinSize int32 56 broker int32 57 rack string 58 59 // correlation ID generator (synchronized on wlock) 60 correlationID int32 61 62 // number of replica acks required when publishing to a partition 63 requiredAcks int32 64 65 // lazily loaded API versions used by this connection 66 apiVersions atomic.Value // apiVersionMap 67 68 transactionalID *string 69 } 70 71 type apiVersionMap map[apiKey]ApiVersion 72 73 func (v apiVersionMap) negotiate(key apiKey, sortedSupportedVersions ...apiVersion) apiVersion { 74 x := v[key] 75 76 for i := len(sortedSupportedVersions) - 1; i >= 0; i-- { 77 s := sortedSupportedVersions[i] 78 79 if apiVersion(x.MaxVersion) >= s { 80 return s 81 } 82 } 83 84 return -1 85 } 86 87 // ConnConfig is a configuration object used to create new instances of Conn. 88 type ConnConfig struct { 89 ClientID string 90 Topic string 91 Partition int 92 Broker int 93 Rack string 94 95 // The transactional id to use for transactional delivery. Idempotent 96 // deliver should be enabled if transactional id is configured. 97 // For more details look at transactional.id description here: http://kafka.apache.org/documentation.html#producerconfigs 98 // Empty string means that this connection can't be transactional. 99 TransactionalID string 100 } 101 102 // ReadBatchConfig is a configuration object used for reading batches of messages. 103 type ReadBatchConfig struct { 104 // MinBytes indicates to the broker the minimum batch size that the consumer 105 // will accept. Setting a high minimum when consuming from a low-volume topic 106 // may result in delayed delivery when the broker does not have enough data to 107 // satisfy the defined minimum. 108 MinBytes int 109 110 // MaxBytes indicates to the broker the maximum batch size that the consumer 111 // will accept. The broker will truncate a message to satisfy this maximum, so 112 // choose a value that is high enough for your largest message size. 113 MaxBytes int 114 115 // IsolationLevel controls the visibility of transactional records. 116 // ReadUncommitted makes all records visible. With ReadCommitted only 117 // non-transactional and committed records are visible. 118 IsolationLevel IsolationLevel 119 120 // MaxWait is the amount of time for the broker while waiting to hit the 121 // min/max byte targets. This setting is independent of any network-level 122 // timeouts or deadlines. 123 // 124 // For backward compatibility, when this field is left zero, kafka-go will 125 // infer the max wait from the connection's read deadline. 126 MaxWait time.Duration 127 } 128 129 type IsolationLevel int8 130 131 const ( 132 ReadUncommitted IsolationLevel = 0 133 ReadCommitted IsolationLevel = 1 134 ) 135 136 var ( 137 // DefaultClientID is the default value used as ClientID of kafka 138 // connections. 139 DefaultClientID string 140 ) 141 142 func init() { 143 progname := filepath.Base(os.Args[0]) 144 hostname, _ := os.Hostname() 145 DefaultClientID = fmt.Sprintf("%s@%s (github.com/deanMdreon/kafka-go)", progname, hostname) 146 } 147 148 // NewConn returns a new kafka connection for the given topic and partition. 149 func NewConn(conn net.Conn, topic string, partition int) *Conn { 150 return NewConnWith(conn, ConnConfig{ 151 Topic: topic, 152 Partition: partition, 153 }) 154 } 155 156 func emptyToNullable(transactionalID string) (result *string) { 157 if transactionalID != "" { 158 result = &transactionalID 159 } 160 return result 161 } 162 163 // NewConnWith returns a new kafka connection configured with config. 164 // The offset is initialized to FirstOffset. 165 func NewConnWith(conn net.Conn, config ConnConfig) *Conn { 166 if len(config.ClientID) == 0 { 167 config.ClientID = DefaultClientID 168 } 169 170 if config.Partition < 0 || config.Partition > math.MaxInt32 { 171 panic(fmt.Sprintf("invalid partition number: %d", config.Partition)) 172 } 173 174 c := &Conn{ 175 conn: conn, 176 rbuf: *bufio.NewReader(conn), 177 wbuf: *bufio.NewWriter(conn), 178 clientID: config.ClientID, 179 topic: config.Topic, 180 partition: int32(config.Partition), 181 broker: int32(config.Broker), 182 rack: config.Rack, 183 offset: FirstOffset, 184 requiredAcks: -1, 185 transactionalID: emptyToNullable(config.TransactionalID), 186 } 187 188 c.wb.w = &c.wbuf 189 190 // The fetch request needs to ask for a MaxBytes value that is at least 191 // enough to load the control data of the response. To avoid having to 192 // recompute it on every read, it is cached here in the Conn value. 193 c.fetchMinSize = (fetchResponseV2{ 194 Topics: []fetchResponseTopicV2{{ 195 TopicName: config.Topic, 196 Partitions: []fetchResponsePartitionV2{{ 197 Partition: int32(config.Partition), 198 MessageSet: messageSet{{}}, 199 }}, 200 }}, 201 }).size() 202 c.fetchMaxBytes = math.MaxInt32 - c.fetchMinSize 203 return c 204 } 205 206 func (c *Conn) negotiateVersion(key apiKey, sortedSupportedVersions ...apiVersion) (apiVersion, error) { 207 v, err := c.loadVersions() 208 if err != nil { 209 return -1, err 210 } 211 a := v.negotiate(key, sortedSupportedVersions...) 212 if a < 0 { 213 return -1, fmt.Errorf("no matching versions were found between the client and the broker for API key %d", key) 214 } 215 return a, nil 216 } 217 218 func (c *Conn) loadVersions() (apiVersionMap, error) { 219 v, _ := c.apiVersions.Load().(apiVersionMap) 220 if v != nil { 221 return v, nil 222 } 223 224 brokerVersions, err := c.ApiVersions() 225 if err != nil { 226 return nil, err 227 } 228 229 v = make(apiVersionMap, len(brokerVersions)) 230 231 for _, a := range brokerVersions { 232 v[apiKey(a.ApiKey)] = a 233 } 234 235 c.apiVersions.Store(v) 236 return v, nil 237 } 238 239 // Broker returns a Broker value representing the kafka broker that this 240 // connection was established to. 241 func (c *Conn) Broker() Broker { 242 addr := c.conn.RemoteAddr() 243 host, port, _ := splitHostPortNumber(addr.String()) 244 return Broker{ 245 Host: host, 246 Port: port, 247 ID: int(c.broker), 248 Rack: c.rack, 249 } 250 } 251 252 // Controller requests kafka for the current controller and returns its URL 253 func (c *Conn) Controller() (broker Broker, err error) { 254 err = c.readOperation( 255 func(deadline time.Time, id int32) error { 256 return c.writeRequest(metadata, v1, id, topicMetadataRequestV1([]string{})) 257 }, 258 func(deadline time.Time, size int) error { 259 var res metadataResponseV1 260 261 if err := c.readResponse(size, &res); err != nil { 262 return err 263 } 264 for _, brokerMeta := range res.Brokers { 265 if brokerMeta.NodeID == res.ControllerID { 266 broker = Broker{ID: int(brokerMeta.NodeID), 267 Port: int(brokerMeta.Port), 268 Host: brokerMeta.Host, 269 Rack: brokerMeta.Rack} 270 break 271 } 272 } 273 return nil 274 }, 275 ) 276 return broker, err 277 } 278 279 // Brokers retrieve the broker list from the Kafka metadata 280 func (c *Conn) Brokers() ([]Broker, error) { 281 var brokers []Broker 282 err := c.readOperation( 283 func(deadline time.Time, id int32) error { 284 return c.writeRequest(metadata, v1, id, topicMetadataRequestV1([]string{})) 285 }, 286 func(deadline time.Time, size int) error { 287 var res metadataResponseV1 288 289 if err := c.readResponse(size, &res); err != nil { 290 return err 291 } 292 293 brokers = make([]Broker, len(res.Brokers)) 294 for i, brokerMeta := range res.Brokers { 295 brokers[i] = Broker{ 296 ID: int(brokerMeta.NodeID), 297 Port: int(brokerMeta.Port), 298 Host: brokerMeta.Host, 299 Rack: brokerMeta.Rack, 300 } 301 } 302 return nil 303 }, 304 ) 305 return brokers, err 306 } 307 308 // DeleteTopics deletes the specified topics. 309 func (c *Conn) DeleteTopics(topics ...string) error { 310 _, err := c.deleteTopics(deleteTopicsRequestV0{ 311 Topics: topics, 312 }) 313 return err 314 } 315 316 // findCoordinator finds the coordinator for the specified group or transaction 317 // 318 // See http://kafka.apache.org/protocol.html#The_Messages_FindCoordinator 319 func (c *Conn) findCoordinator(request findCoordinatorRequestV0) (findCoordinatorResponseV0, error) { 320 var response findCoordinatorResponseV0 321 322 err := c.readOperation( 323 func(deadline time.Time, id int32) error { 324 return c.writeRequest(findCoordinator, v0, id, request) 325 326 }, 327 func(deadline time.Time, size int) error { 328 return expectZeroSize(func() (remain int, err error) { 329 return (&response).readFrom(&c.rbuf, size) 330 }()) 331 }, 332 ) 333 if err != nil { 334 return findCoordinatorResponseV0{}, err 335 } 336 if response.ErrorCode != 0 { 337 return findCoordinatorResponseV0{}, Error(response.ErrorCode) 338 } 339 340 return response, nil 341 } 342 343 // heartbeat sends a heartbeat message required by consumer groups 344 // 345 // See http://kafka.apache.org/protocol.html#The_Messages_Heartbeat 346 func (c *Conn) heartbeat(request heartbeatRequestV0) (heartbeatResponseV0, error) { 347 var response heartbeatResponseV0 348 349 err := c.writeOperation( 350 func(deadline time.Time, id int32) error { 351 return c.writeRequest(heartbeat, v0, id, request) 352 }, 353 func(deadline time.Time, size int) error { 354 return expectZeroSize(func() (remain int, err error) { 355 return (&response).readFrom(&c.rbuf, size) 356 }()) 357 }, 358 ) 359 if err != nil { 360 return heartbeatResponseV0{}, err 361 } 362 if response.ErrorCode != 0 { 363 return heartbeatResponseV0{}, Error(response.ErrorCode) 364 } 365 366 return response, nil 367 } 368 369 // joinGroup attempts to join a consumer group 370 // 371 // See http://kafka.apache.org/protocol.html#The_Messages_JoinGroup 372 func (c *Conn) joinGroup(request joinGroupRequestV1) (joinGroupResponseV1, error) { 373 var response joinGroupResponseV1 374 375 err := c.writeOperation( 376 func(deadline time.Time, id int32) error { 377 return c.writeRequest(joinGroup, v1, id, request) 378 }, 379 func(deadline time.Time, size int) error { 380 return expectZeroSize(func() (remain int, err error) { 381 return (&response).readFrom(&c.rbuf, size) 382 }()) 383 }, 384 ) 385 if err != nil { 386 return joinGroupResponseV1{}, err 387 } 388 if response.ErrorCode != 0 { 389 return joinGroupResponseV1{}, Error(response.ErrorCode) 390 } 391 392 return response, nil 393 } 394 395 // leaveGroup leaves the consumer from the consumer group 396 // 397 // See http://kafka.apache.org/protocol.html#The_Messages_LeaveGroup 398 func (c *Conn) leaveGroup(request leaveGroupRequestV0) (leaveGroupResponseV0, error) { 399 var response leaveGroupResponseV0 400 401 err := c.writeOperation( 402 func(deadline time.Time, id int32) error { 403 return c.writeRequest(leaveGroup, v0, id, request) 404 }, 405 func(deadline time.Time, size int) error { 406 return expectZeroSize(func() (remain int, err error) { 407 return (&response).readFrom(&c.rbuf, size) 408 }()) 409 }, 410 ) 411 if err != nil { 412 return leaveGroupResponseV0{}, err 413 } 414 if response.ErrorCode != 0 { 415 return leaveGroupResponseV0{}, Error(response.ErrorCode) 416 } 417 418 return response, nil 419 } 420 421 // listGroups lists all the consumer groups 422 // 423 // See http://kafka.apache.org/protocol.html#The_Messages_ListGroups 424 func (c *Conn) listGroups(request listGroupsRequestV1) (listGroupsResponseV1, error) { 425 var response listGroupsResponseV1 426 427 err := c.readOperation( 428 func(deadline time.Time, id int32) error { 429 return c.writeRequest(listGroups, v1, id, request) 430 }, 431 func(deadline time.Time, size int) error { 432 return expectZeroSize(func() (remain int, err error) { 433 return (&response).readFrom(&c.rbuf, size) 434 }()) 435 }, 436 ) 437 if err != nil { 438 return listGroupsResponseV1{}, err 439 } 440 if response.ErrorCode != 0 { 441 return listGroupsResponseV1{}, Error(response.ErrorCode) 442 } 443 444 return response, nil 445 } 446 447 // offsetCommit commits the specified topic partition offsets 448 // 449 // See http://kafka.apache.org/protocol.html#The_Messages_OffsetCommit 450 func (c *Conn) offsetCommit(request offsetCommitRequestV2) (offsetCommitResponseV2, error) { 451 var response offsetCommitResponseV2 452 453 err := c.writeOperation( 454 func(deadline time.Time, id int32) error { 455 return c.writeRequest(offsetCommit, v2, id, request) 456 }, 457 func(deadline time.Time, size int) error { 458 return expectZeroSize(func() (remain int, err error) { 459 return (&response).readFrom(&c.rbuf, size) 460 }()) 461 }, 462 ) 463 if err != nil { 464 return offsetCommitResponseV2{}, err 465 } 466 for _, r := range response.Responses { 467 for _, pr := range r.PartitionResponses { 468 if pr.ErrorCode != 0 { 469 return offsetCommitResponseV2{}, Error(pr.ErrorCode) 470 } 471 } 472 } 473 474 return response, nil 475 } 476 477 // offsetFetch fetches the offsets for the specified topic partitions. 478 // -1 indicates that there is no offset saved for the partition. 479 // 480 // See http://kafka.apache.org/protocol.html#The_Messages_OffsetFetch 481 func (c *Conn) offsetFetch(request offsetFetchRequestV1) (offsetFetchResponseV1, error) { 482 var response offsetFetchResponseV1 483 484 err := c.readOperation( 485 func(deadline time.Time, id int32) error { 486 return c.writeRequest(offsetFetch, v1, id, request) 487 }, 488 func(deadline time.Time, size int) error { 489 return expectZeroSize(func() (remain int, err error) { 490 return (&response).readFrom(&c.rbuf, size) 491 }()) 492 }, 493 ) 494 if err != nil { 495 return offsetFetchResponseV1{}, err 496 } 497 for _, r := range response.Responses { 498 for _, pr := range r.PartitionResponses { 499 if pr.ErrorCode != 0 { 500 return offsetFetchResponseV1{}, Error(pr.ErrorCode) 501 } 502 } 503 } 504 505 return response, nil 506 } 507 508 // syncGroup completes the handshake to join a consumer group 509 // 510 // See http://kafka.apache.org/protocol.html#The_Messages_SyncGroup 511 func (c *Conn) syncGroup(request syncGroupRequestV0) (syncGroupResponseV0, error) { 512 var response syncGroupResponseV0 513 514 err := c.readOperation( 515 func(deadline time.Time, id int32) error { 516 return c.writeRequest(syncGroup, v0, id, request) 517 }, 518 func(deadline time.Time, size int) error { 519 return expectZeroSize(func() (remain int, err error) { 520 return (&response).readFrom(&c.rbuf, size) 521 }()) 522 }, 523 ) 524 if err != nil { 525 return syncGroupResponseV0{}, err 526 } 527 if response.ErrorCode != 0 { 528 return syncGroupResponseV0{}, Error(response.ErrorCode) 529 } 530 531 return response, nil 532 } 533 534 // Close closes the kafka connection. 535 func (c *Conn) Close() error { 536 return c.conn.Close() 537 } 538 539 // LocalAddr returns the local network address. 540 func (c *Conn) LocalAddr() net.Addr { 541 return c.conn.LocalAddr() 542 } 543 544 // RemoteAddr returns the remote network address. 545 func (c *Conn) RemoteAddr() net.Addr { 546 return c.conn.RemoteAddr() 547 } 548 549 // SetDeadline sets the read and write deadlines associated with the connection. 550 // It is equivalent to calling both SetReadDeadline and SetWriteDeadline. 551 // 552 // A deadline is an absolute time after which I/O operations fail with a timeout 553 // (see type Error) instead of blocking. The deadline applies to all future and 554 // pending I/O, not just the immediately following call to Read or Write. After 555 // a deadline has been exceeded, the connection may be closed if it was found to 556 // be in an unrecoverable state. 557 // 558 // A zero value for t means I/O operations will not time out. 559 func (c *Conn) SetDeadline(t time.Time) error { 560 c.rdeadline.setDeadline(t) 561 c.wdeadline.setDeadline(t) 562 return nil 563 } 564 565 // SetReadDeadline sets the deadline for future Read calls and any 566 // currently-blocked Read call. 567 // A zero value for t means Read will not time out. 568 func (c *Conn) SetReadDeadline(t time.Time) error { 569 c.rdeadline.setDeadline(t) 570 return nil 571 } 572 573 // SetWriteDeadline sets the deadline for future Write calls and any 574 // currently-blocked Write call. 575 // Even if write times out, it may return n > 0, indicating that some of the 576 // data was successfully written. 577 // A zero value for t means Write will not time out. 578 func (c *Conn) SetWriteDeadline(t time.Time) error { 579 c.wdeadline.setDeadline(t) 580 return nil 581 } 582 583 // Offset returns the current offset of the connection as pair of integers, 584 // where the first one is an offset value and the second one indicates how 585 // to interpret it. 586 // 587 // See Seek for more details about the offset and whence values. 588 func (c *Conn) Offset() (offset int64, whence int) { 589 c.mutex.Lock() 590 offset = c.offset 591 c.mutex.Unlock() 592 593 switch offset { 594 case FirstOffset: 595 offset = 0 596 whence = SeekStart 597 case LastOffset: 598 offset = 0 599 whence = SeekEnd 600 default: 601 whence = SeekAbsolute 602 } 603 return 604 } 605 606 const ( 607 SeekStart = 0 // Seek relative to the first offset available in the partition. 608 SeekAbsolute = 1 // Seek to an absolute offset. 609 SeekEnd = 2 // Seek relative to the last offset available in the partition. 610 SeekCurrent = 3 // Seek relative to the current offset. 611 612 // This flag may be combined to any of the SeekAbsolute and SeekCurrent 613 // constants to skip the bound check that the connection would do otherwise. 614 // Programs can use this flag to avoid making a metadata request to the kafka 615 // broker to read the current first and last offsets of the partition. 616 SeekDontCheck = 1 << 30 617 ) 618 619 // Seek sets the offset for the next read or write operation according to whence, which 620 // should be one of SeekStart, SeekAbsolute, SeekEnd, or SeekCurrent. 621 // When seeking relative to the end, the offset is subtracted from the current offset. 622 // Note that for historical reasons, these do not align with the usual whence constants 623 // as in lseek(2) or os.Seek. 624 // The method returns the new absolute offset of the connection. 625 func (c *Conn) Seek(offset int64, whence int) (int64, error) { 626 seekDontCheck := (whence & SeekDontCheck) != 0 627 whence &= ^SeekDontCheck 628 629 switch whence { 630 case SeekStart, SeekAbsolute, SeekEnd, SeekCurrent: 631 default: 632 return 0, fmt.Errorf("whence must be one of 0, 1, 2, or 3. (whence = %d)", whence) 633 } 634 635 if seekDontCheck { 636 if whence == SeekAbsolute { 637 c.mutex.Lock() 638 c.offset = offset 639 c.mutex.Unlock() 640 return offset, nil 641 } 642 643 if whence == SeekCurrent { 644 c.mutex.Lock() 645 c.offset += offset 646 offset = c.offset 647 c.mutex.Unlock() 648 return offset, nil 649 } 650 } 651 652 if whence == SeekAbsolute { 653 c.mutex.Lock() 654 unchanged := offset == c.offset 655 c.mutex.Unlock() 656 if unchanged { 657 return offset, nil 658 } 659 } 660 661 if whence == SeekCurrent { 662 c.mutex.Lock() 663 offset = c.offset + offset 664 c.mutex.Unlock() 665 } 666 667 first, last, err := c.ReadOffsets() 668 if err != nil { 669 return 0, err 670 } 671 672 switch whence { 673 case SeekStart: 674 offset = first + offset 675 case SeekEnd: 676 offset = last - offset 677 } 678 679 if offset < first || offset > last { 680 return 0, OffsetOutOfRange 681 } 682 683 c.mutex.Lock() 684 c.offset = offset 685 c.mutex.Unlock() 686 return offset, nil 687 } 688 689 // Read reads the message at the current offset from the connection, advancing 690 // the offset on success so the next call to a read method will produce the next 691 // message. 692 // The method returns the number of bytes read, or an error if something went 693 // wrong. 694 // 695 // While it is safe to call Read concurrently from multiple goroutines it may 696 // be hard for the program to predict the results as the connection offset will 697 // be read and written by multiple goroutines, they could read duplicates, or 698 // messages may be seen by only some of the goroutines. 699 // 700 // The method fails with io.ErrShortBuffer if the buffer passed as argument is 701 // too small to hold the message value. 702 // 703 // This method is provided to satisfy the net.Conn interface but is much less 704 // efficient than using the more general purpose ReadBatch method. 705 func (c *Conn) Read(b []byte) (int, error) { 706 batch := c.ReadBatch(1, len(b)) 707 n, err := batch.Read(b) 708 return n, coalesceErrors(silentEOF(err), batch.Close()) 709 } 710 711 // ReadMessage reads the message at the current offset from the connection, 712 // advancing the offset on success so the next call to a read method will 713 // produce the next message. 714 // 715 // Because this method allocate memory buffers for the message key and value 716 // it is less memory-efficient than Read, but has the advantage of never 717 // failing with io.ErrShortBuffer. 718 // 719 // While it is safe to call Read concurrently from multiple goroutines it may 720 // be hard for the program to predict the results as the connection offset will 721 // be read and written by multiple goroutines, they could read duplicates, or 722 // messages may be seen by only some of the goroutines. 723 // 724 // This method is provided for convenience purposes but is much less efficient 725 // than using the more general purpose ReadBatch method. 726 func (c *Conn) ReadMessage(maxBytes int) (Message, error) { 727 batch := c.ReadBatch(1, maxBytes) 728 msg, err := batch.ReadMessage() 729 return msg, coalesceErrors(silentEOF(err), batch.Close()) 730 } 731 732 // ReadBatch reads a batch of messages from the kafka server. The method always 733 // returns a non-nil Batch value. If an error occurred, either sending the fetch 734 // request or reading the response, the error will be made available by the 735 // returned value of the batch's Close method. 736 // 737 // While it is safe to call ReadBatch concurrently from multiple goroutines it 738 // may be hard for the program to predict the results as the connection offset 739 // will be read and written by multiple goroutines, they could read duplicates, 740 // or messages may be seen by only some of the goroutines. 741 // 742 // A program doesn't specify the number of messages in wants from a batch, but 743 // gives the minimum and maximum number of bytes that it wants to receive from 744 // the kafka server. 745 func (c *Conn) ReadBatch(minBytes, maxBytes int) *Batch { 746 return c.ReadBatchWith(ReadBatchConfig{ 747 MinBytes: minBytes, 748 MaxBytes: maxBytes, 749 }) 750 } 751 752 // ReadBatchWith in every way is similar to ReadBatch. ReadBatch is configured 753 // with the default values in ReadBatchConfig except for minBytes and maxBytes. 754 func (c *Conn) ReadBatchWith(cfg ReadBatchConfig) *Batch { 755 756 var adjustedDeadline time.Time 757 var maxFetch = int(c.fetchMaxBytes) 758 759 if cfg.MinBytes < 0 || cfg.MinBytes > maxFetch { 760 return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes of %d out of [1,%d] bounds", cfg.MinBytes, maxFetch)} 761 } 762 if cfg.MaxBytes < 0 || cfg.MaxBytes > maxFetch { 763 return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: maxBytes of %d out of [1,%d] bounds", cfg.MaxBytes, maxFetch)} 764 } 765 if cfg.MinBytes > cfg.MaxBytes { 766 return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes (%d) > maxBytes (%d)", cfg.MinBytes, cfg.MaxBytes)} 767 } 768 769 offset, whence := c.Offset() 770 771 offset, err := c.Seek(offset, whence|SeekDontCheck) 772 if err != nil { 773 return &Batch{err: dontExpectEOF(err)} 774 } 775 776 fetchVersion, err := c.negotiateVersion(fetch, v2, v5, v10) 777 if err != nil { 778 return &Batch{err: dontExpectEOF(err)} 779 } 780 781 id, err := c.doRequest(&c.rdeadline, func(deadline time.Time, id int32) error { 782 now := time.Now() 783 var timeout time.Duration 784 if cfg.MaxWait > 0 { 785 // explicitly-configured case: no changes are made to the deadline, 786 // and the timeout is sent exactly as specified. 787 timeout = cfg.MaxWait 788 } else { 789 // default case: use the original logic to adjust the conn's 790 // deadline.T 791 deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) 792 timeout = deadlineToTimeout(deadline, now) 793 } 794 // save this variable outside of the closure for later use in detecting 795 // truncated messages. 796 adjustedDeadline = deadline 797 switch fetchVersion { 798 case v10: 799 return c.wb.writeFetchRequestV10( 800 id, 801 c.clientID, 802 c.topic, 803 c.partition, 804 offset, 805 cfg.MinBytes, 806 cfg.MaxBytes+int(c.fetchMinSize), 807 timeout, 808 int8(cfg.IsolationLevel), 809 ) 810 case v5: 811 return c.wb.writeFetchRequestV5( 812 id, 813 c.clientID, 814 c.topic, 815 c.partition, 816 offset, 817 cfg.MinBytes, 818 cfg.MaxBytes+int(c.fetchMinSize), 819 timeout, 820 int8(cfg.IsolationLevel), 821 ) 822 default: 823 return c.wb.writeFetchRequestV2( 824 id, 825 c.clientID, 826 c.topic, 827 c.partition, 828 offset, 829 cfg.MinBytes, 830 cfg.MaxBytes+int(c.fetchMinSize), 831 timeout, 832 ) 833 } 834 }) 835 if err != nil { 836 return &Batch{err: dontExpectEOF(err)} 837 } 838 839 _, size, lock, err := c.waitResponse(&c.rdeadline, id) 840 if err != nil { 841 return &Batch{err: dontExpectEOF(err)} 842 } 843 844 var throttle int32 845 var highWaterMark int64 846 var remain int 847 848 switch fetchVersion { 849 case v10: 850 throttle, highWaterMark, remain, err = readFetchResponseHeaderV10(&c.rbuf, size) 851 case v5: 852 throttle, highWaterMark, remain, err = readFetchResponseHeaderV5(&c.rbuf, size) 853 default: 854 throttle, highWaterMark, remain, err = readFetchResponseHeaderV2(&c.rbuf, size) 855 } 856 if err == errShortRead { 857 err = checkTimeoutErr(adjustedDeadline) 858 } 859 860 var msgs *messageSetReader 861 if err == nil { 862 if highWaterMark == offset { 863 msgs = &messageSetReader{empty: true} 864 } else { 865 msgs, err = newMessageSetReader(&c.rbuf, remain) 866 } 867 } 868 if err == errShortRead { 869 err = checkTimeoutErr(adjustedDeadline) 870 } 871 return &Batch{ 872 conn: c, 873 msgs: msgs, 874 deadline: adjustedDeadline, 875 throttle: makeDuration(throttle), 876 lock: lock, 877 topic: c.topic, // topic is copied to Batch to prevent race with Batch.close 878 partition: int(c.partition), // partition is copied to Batch to prevent race with Batch.close 879 offset: offset, 880 highWaterMark: highWaterMark, 881 // there shouldn't be a short read on initially setting up the batch. 882 // as such, any io.EOF is re-mapped to an io.ErrUnexpectedEOF so that we 883 // don't accidentally signal that we successfully reached the end of the 884 // batch. 885 err: dontExpectEOF(err), 886 } 887 } 888 889 // ReadOffset returns the offset of the first message with a timestamp equal or 890 // greater to t. 891 func (c *Conn) ReadOffset(t time.Time) (int64, error) { 892 return c.readOffset(timestamp(t)) 893 } 894 895 // ReadFirstOffset returns the first offset available on the connection. 896 func (c *Conn) ReadFirstOffset() (int64, error) { 897 return c.readOffset(FirstOffset) 898 } 899 900 // ReadLastOffset returns the last offset available on the connection. 901 func (c *Conn) ReadLastOffset() (int64, error) { 902 return c.readOffset(LastOffset) 903 } 904 905 // ReadOffsets returns the absolute first and last offsets of the topic used by 906 // the connection. 907 func (c *Conn) ReadOffsets() (first, last int64, err error) { 908 // We have to submit two different requests to fetch the first and last 909 // offsets because kafka refuses requests that ask for multiple offsets 910 // on the same topic and partition. 911 if first, err = c.ReadFirstOffset(); err != nil { 912 return 913 } 914 if last, err = c.ReadLastOffset(); err != nil { 915 first = 0 // don't leak the value on error 916 return 917 } 918 return 919 } 920 921 func (c *Conn) readOffset(t int64) (offset int64, err error) { 922 err = c.readOperation( 923 func(deadline time.Time, id int32) error { 924 return c.wb.writeListOffsetRequestV1(id, c.clientID, c.topic, c.partition, t) 925 }, 926 func(deadline time.Time, size int) error { 927 return expectZeroSize(readArrayWith(&c.rbuf, size, func(r *bufio.Reader, size int) (int, error) { 928 // We skip the topic name because we've made a request for 929 // a single topic. 930 size, err := discardString(r, size) 931 if err != nil { 932 return size, err 933 } 934 935 // Reading the array of partitions, there will be only one 936 // partition which gives the offset we're looking for. 937 return readArrayWith(r, size, func(r *bufio.Reader, size int) (int, error) { 938 var p partitionOffsetV1 939 size, err := p.readFrom(r, size) 940 if err != nil { 941 return size, err 942 } 943 if p.ErrorCode != 0 { 944 return size, Error(p.ErrorCode) 945 } 946 offset = p.Offset 947 return size, nil 948 }) 949 })) 950 }, 951 ) 952 return 953 } 954 955 // ReadPartitions returns the list of available partitions for the given list of 956 // topics. 957 // 958 // If the method is called with no topic, it uses the topic configured on the 959 // connection. If there are none, the method fetches all partitions of the kafka 960 // cluster. 961 func (c *Conn) ReadPartitions(topics ...string) (partitions []Partition, err error) { 962 963 if len(topics) == 0 { 964 if len(c.topic) != 0 { 965 defaultTopics := [...]string{c.topic} 966 topics = defaultTopics[:] 967 } else { 968 // topics needs to be explicitly nil-ed out or the broker will 969 // interpret it as a request for 0 partitions instead of all. 970 topics = nil 971 } 972 } 973 974 err = c.readOperation( 975 func(deadline time.Time, id int32) error { 976 return c.writeRequest(metadata, v1, id, topicMetadataRequestV1(topics)) 977 }, 978 func(deadline time.Time, size int) error { 979 var res metadataResponseV1 980 981 if err := c.readResponse(size, &res); err != nil { 982 return err 983 } 984 985 brokers := make(map[int32]Broker, len(res.Brokers)) 986 for _, b := range res.Brokers { 987 brokers[b.NodeID] = Broker{ 988 Host: b.Host, 989 Port: int(b.Port), 990 ID: int(b.NodeID), 991 Rack: b.Rack, 992 } 993 } 994 995 for _, t := range res.Topics { 996 if t.TopicErrorCode != 0 && (c.topic == "" || t.TopicName == c.topic) { 997 // We only report errors if they happened for the topic of 998 // the connection, otherwise the topic will simply have no 999 // partitions in the result set. 1000 return Error(t.TopicErrorCode) 1001 } 1002 for _, p := range t.Partitions { 1003 partitions = append(partitions, Partition{ 1004 Topic: t.TopicName, 1005 Leader: brokers[p.Leader], 1006 Replicas: makeBrokers(brokers, p.Replicas...), 1007 Isr: makeBrokers(brokers, p.Isr...), 1008 ID: int(p.PartitionID), 1009 }) 1010 } 1011 } 1012 return nil 1013 }, 1014 ) 1015 return 1016 } 1017 1018 func makeBrokers(brokers map[int32]Broker, ids ...int32) []Broker { 1019 b := make([]Broker, 0, len(ids)) 1020 for _, id := range ids { 1021 if br, ok := brokers[id]; ok { 1022 b = append(b, br) 1023 } 1024 } 1025 return b 1026 } 1027 1028 // Write writes a message to the kafka broker that this connection was 1029 // established to. The method returns the number of bytes written, or an error 1030 // if something went wrong. 1031 // 1032 // The operation either succeeds or fail, it never partially writes the message. 1033 // 1034 // This method is exposed to satisfy the net.Conn interface but is less efficient 1035 // than the more general purpose WriteMessages method. 1036 func (c *Conn) Write(b []byte) (int, error) { 1037 return c.WriteCompressedMessages(nil, Message{Value: b}) 1038 } 1039 1040 // WriteMessages writes a batch of messages to the connection's topic and 1041 // partition, returning the number of bytes written. The write is an atomic 1042 // operation, it either fully succeeds or fails. 1043 func (c *Conn) WriteMessages(msgs ...Message) (int, error) { 1044 return c.WriteCompressedMessages(nil, msgs...) 1045 } 1046 1047 // WriteCompressedMessages writes a batch of messages to the connection's topic 1048 // and partition, returning the number of bytes written. The write is an atomic 1049 // operation, it either fully succeeds or fails. 1050 // 1051 // If the compression codec is not nil, the messages will be compressed. 1052 func (c *Conn) WriteCompressedMessages(codec CompressionCodec, msgs ...Message) (nbytes int, err error) { 1053 nbytes, _, _, _, err = c.writeCompressedMessages(codec, msgs...) 1054 return 1055 } 1056 1057 // WriteCompressedMessagesAt writes a batch of messages to the connection's topic 1058 // and partition, returning the number of bytes written, partition and offset numbers 1059 // and timestamp assigned by the kafka broker to the message set. The write is an atomic 1060 // operation, it either fully succeeds or fails. 1061 // 1062 // If the compression codec is not nil, the messages will be compressed. 1063 func (c *Conn) WriteCompressedMessagesAt(codec CompressionCodec, msgs ...Message) (nbytes int, partition int32, offset int64, appendTime time.Time, err error) { 1064 return c.writeCompressedMessages(codec, msgs...) 1065 } 1066 1067 func (c *Conn) writeCompressedMessages(codec CompressionCodec, msgs ...Message) (nbytes int, partition int32, offset int64, appendTime time.Time, err error) { 1068 if len(msgs) == 0 { 1069 return 1070 } 1071 1072 writeTime := time.Now() 1073 for i, msg := range msgs { 1074 // users may believe they can set the Topic and/or Partition 1075 // on the kafka message. 1076 if msg.Topic != "" && msg.Topic != c.topic { 1077 err = errInvalidWriteTopic 1078 return 1079 } 1080 if msg.Partition != 0 { 1081 err = errInvalidWritePartition 1082 return 1083 } 1084 1085 if msg.Time.IsZero() { 1086 msgs[i].Time = writeTime 1087 } 1088 1089 nbytes += len(msg.Key) + len(msg.Value) 1090 } 1091 1092 var produceVersion apiVersion 1093 if produceVersion, err = c.negotiateVersion(produce, v2, v3, v7); err != nil { 1094 return 1095 } 1096 1097 err = c.writeOperation( 1098 func(deadline time.Time, id int32) error { 1099 now := time.Now() 1100 deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) 1101 switch produceVersion { 1102 case v7: 1103 recordBatch, err := 1104 newRecordBatch( 1105 codec, 1106 msgs..., 1107 ) 1108 if err != nil { 1109 return err 1110 } 1111 return c.wb.writeProduceRequestV7( 1112 id, 1113 c.clientID, 1114 c.topic, 1115 c.partition, 1116 deadlineToTimeout(deadline, now), 1117 int16(atomic.LoadInt32(&c.requiredAcks)), 1118 c.transactionalID, 1119 recordBatch, 1120 ) 1121 case v3: 1122 recordBatch, err := 1123 newRecordBatch( 1124 codec, 1125 msgs..., 1126 ) 1127 if err != nil { 1128 return err 1129 } 1130 return c.wb.writeProduceRequestV3( 1131 id, 1132 c.clientID, 1133 c.topic, 1134 c.partition, 1135 deadlineToTimeout(deadline, now), 1136 int16(atomic.LoadInt32(&c.requiredAcks)), 1137 c.transactionalID, 1138 recordBatch, 1139 ) 1140 default: 1141 return c.wb.writeProduceRequestV2( 1142 codec, 1143 id, 1144 c.clientID, 1145 c.topic, 1146 c.partition, 1147 deadlineToTimeout(deadline, now), 1148 int16(atomic.LoadInt32(&c.requiredAcks)), 1149 msgs..., 1150 ) 1151 } 1152 }, 1153 func(deadline time.Time, size int) error { 1154 return expectZeroSize(readArrayWith(&c.rbuf, size, func(r *bufio.Reader, size int) (int, error) { 1155 // Skip the topic, we've produced the message to only one topic, 1156 // no need to waste resources loading it in memory. 1157 size, err := discardString(r, size) 1158 if err != nil { 1159 return size, err 1160 } 1161 1162 // Read the list of partitions, there should be only one since 1163 // we've produced a message to a single partition. 1164 size, err = readArrayWith(r, size, func(r *bufio.Reader, size int) (int, error) { 1165 switch produceVersion { 1166 case v7: 1167 var p produceResponsePartitionV7 1168 size, err := p.readFrom(r, size) 1169 if err == nil && p.ErrorCode != 0 { 1170 err = Error(p.ErrorCode) 1171 } 1172 if err == nil { 1173 partition = p.Partition 1174 offset = p.Offset 1175 appendTime = time.Unix(0, p.Timestamp*int64(time.Millisecond)) 1176 } 1177 return size, err 1178 default: 1179 var p produceResponsePartitionV2 1180 size, err := p.readFrom(r, size) 1181 if err == nil && p.ErrorCode != 0 { 1182 err = Error(p.ErrorCode) 1183 } 1184 if err == nil { 1185 partition = p.Partition 1186 offset = p.Offset 1187 appendTime = time.Unix(0, p.Timestamp*int64(time.Millisecond)) 1188 } 1189 return size, err 1190 } 1191 1192 }) 1193 if err != nil { 1194 return size, err 1195 } 1196 1197 // The response is trailed by the throttle time, also skipping 1198 // since it's not interesting here. 1199 return discardInt32(r, size) 1200 })) 1201 }, 1202 ) 1203 1204 if err != nil { 1205 nbytes = 0 1206 } 1207 1208 return 1209 } 1210 1211 // SetRequiredAcks sets the number of acknowledges from replicas that the 1212 // connection requests when producing messages. 1213 func (c *Conn) SetRequiredAcks(n int) error { 1214 switch n { 1215 case -1, 1: 1216 atomic.StoreInt32(&c.requiredAcks, int32(n)) 1217 return nil 1218 default: 1219 return InvalidRequiredAcks 1220 } 1221 } 1222 1223 func (c *Conn) writeRequestHeader(apiKey apiKey, apiVersion apiVersion, correlationID int32, size int32) { 1224 hdr := c.requestHeader(apiKey, apiVersion, correlationID) 1225 hdr.Size = (hdr.size() + size) - 4 1226 hdr.writeTo(&c.wb) 1227 } 1228 1229 func (c *Conn) writeRequest(apiKey apiKey, apiVersion apiVersion, correlationID int32, req request) error { 1230 hdr := c.requestHeader(apiKey, apiVersion, correlationID) 1231 hdr.Size = (hdr.size() + req.size()) - 4 1232 hdr.writeTo(&c.wb) 1233 req.writeTo(&c.wb) 1234 return c.wbuf.Flush() 1235 } 1236 1237 func (c *Conn) readResponse(size int, res interface{}) error { 1238 size, err := read(&c.rbuf, size, res) 1239 switch err.(type) { 1240 case Error: 1241 var e error 1242 if size, e = discardN(&c.rbuf, size, size); e != nil { 1243 err = e 1244 } 1245 } 1246 return expectZeroSize(size, err) 1247 } 1248 1249 func (c *Conn) peekResponseSizeAndID() (int32, int32, error) { 1250 b, err := c.rbuf.Peek(8) 1251 if err != nil { 1252 return 0, 0, err 1253 } 1254 size, id := makeInt32(b[:4]), makeInt32(b[4:]) 1255 return size, id, nil 1256 } 1257 1258 func (c *Conn) skipResponseSizeAndID() { 1259 c.rbuf.Discard(8) 1260 } 1261 1262 func (c *Conn) readDeadline() time.Time { 1263 return c.rdeadline.deadline() 1264 } 1265 1266 func (c *Conn) writeDeadline() time.Time { 1267 return c.wdeadline.deadline() 1268 } 1269 1270 func (c *Conn) readOperation(write func(time.Time, int32) error, read func(time.Time, int) error) error { 1271 return c.do(&c.rdeadline, write, read) 1272 } 1273 1274 func (c *Conn) writeOperation(write func(time.Time, int32) error, read func(time.Time, int) error) error { 1275 return c.do(&c.wdeadline, write, read) 1276 } 1277 1278 func (c *Conn) enter() { 1279 atomic.AddInt32(&c.inflight, +1) 1280 } 1281 1282 func (c *Conn) leave() { 1283 atomic.AddInt32(&c.inflight, -1) 1284 } 1285 1286 func (c *Conn) concurrency() int { 1287 return int(atomic.LoadInt32(&c.inflight)) 1288 } 1289 1290 func (c *Conn) do(d *connDeadline, write func(time.Time, int32) error, read func(time.Time, int) error) error { 1291 id, err := c.doRequest(d, write) 1292 if err != nil { 1293 return err 1294 } 1295 1296 deadline, size, lock, err := c.waitResponse(d, id) 1297 if err != nil { 1298 return err 1299 } 1300 1301 if err = read(deadline, size); err != nil { 1302 switch err.(type) { 1303 case Error: 1304 default: 1305 c.conn.Close() 1306 } 1307 } 1308 1309 d.unsetConnReadDeadline() 1310 lock.Unlock() 1311 return err 1312 } 1313 1314 func (c *Conn) doRequest(d *connDeadline, write func(time.Time, int32) error) (id int32, err error) { 1315 c.enter() 1316 c.wlock.Lock() 1317 c.correlationID++ 1318 id = c.correlationID 1319 err = write(d.setConnWriteDeadline(c.conn), id) 1320 d.unsetConnWriteDeadline() 1321 1322 if err != nil { 1323 // When an error occurs there's no way to know if the connection is in a 1324 // recoverable state so we're better off just giving up at this point to 1325 // avoid any risk of corrupting the following operations. 1326 c.conn.Close() 1327 c.leave() 1328 } 1329 1330 c.wlock.Unlock() 1331 return 1332 } 1333 1334 func (c *Conn) waitResponse(d *connDeadline, id int32) (deadline time.Time, size int, lock *sync.Mutex, err error) { 1335 for { 1336 var rsz int32 1337 var rid int32 1338 1339 c.rlock.Lock() 1340 deadline = d.setConnReadDeadline(c.conn) 1341 rsz, rid, err = c.peekResponseSizeAndID() 1342 1343 if err != nil { 1344 d.unsetConnReadDeadline() 1345 c.conn.Close() 1346 c.rlock.Unlock() 1347 break 1348 } 1349 1350 if id == rid { 1351 c.skipResponseSizeAndID() 1352 size, lock = int(rsz-4), &c.rlock 1353 // Don't unlock the read mutex to yield ownership to the caller. 1354 break 1355 } 1356 1357 if c.concurrency() == 1 { 1358 // If the goroutine is the only one waiting on this connection it 1359 // should be impossible to read a correlation id different from the 1360 // one it expects. This is a sign that the data we are reading on 1361 // the wire is corrupted and the connection needs to be closed. 1362 err = io.ErrNoProgress 1363 c.rlock.Unlock() 1364 break 1365 } 1366 1367 // Optimistically release the read lock if a response has already 1368 // been received but the current operation is not the target for it. 1369 c.rlock.Unlock() 1370 runtime.Gosched() 1371 } 1372 1373 c.leave() 1374 return 1375 } 1376 1377 func (c *Conn) requestHeader(apiKey apiKey, apiVersion apiVersion, correlationID int32) requestHeader { 1378 return requestHeader{ 1379 ApiKey: int16(apiKey), 1380 ApiVersion: int16(apiVersion), 1381 CorrelationID: correlationID, 1382 ClientID: c.clientID, 1383 } 1384 } 1385 1386 func (c *Conn) ApiVersions() ([]ApiVersion, error) { 1387 deadline := &c.rdeadline 1388 1389 if deadline.deadline().IsZero() { 1390 // ApiVersions is called automatically when API version negotiation 1391 // needs to happen, so we are not guaranteed that a read deadline has 1392 // been set yet. Fallback to use the write deadline in case it was 1393 // set, for example when version negotiation is initiated during a 1394 // produce request. 1395 deadline = &c.wdeadline 1396 } 1397 1398 id, err := c.doRequest(deadline, func(_ time.Time, id int32) error { 1399 h := requestHeader{ 1400 ApiKey: int16(apiVersions), 1401 ApiVersion: int16(v0), 1402 CorrelationID: id, 1403 ClientID: c.clientID, 1404 } 1405 h.Size = (h.size() - 4) 1406 h.writeTo(&c.wb) 1407 return c.wbuf.Flush() 1408 }) 1409 if err != nil { 1410 return nil, err 1411 } 1412 1413 _, size, lock, err := c.waitResponse(deadline, id) 1414 if err != nil { 1415 return nil, err 1416 } 1417 defer lock.Unlock() 1418 1419 var errorCode int16 1420 if size, err = readInt16(&c.rbuf, size, &errorCode); err != nil { 1421 return nil, err 1422 } 1423 var arrSize int32 1424 if size, err = readInt32(&c.rbuf, size, &arrSize); err != nil { 1425 return nil, err 1426 } 1427 r := make([]ApiVersion, arrSize) 1428 for i := 0; i < int(arrSize); i++ { 1429 if size, err = readInt16(&c.rbuf, size, &r[i].ApiKey); err != nil { 1430 return nil, err 1431 } 1432 if size, err = readInt16(&c.rbuf, size, &r[i].MinVersion); err != nil { 1433 return nil, err 1434 } 1435 if size, err = readInt16(&c.rbuf, size, &r[i].MaxVersion); err != nil { 1436 return nil, err 1437 } 1438 } 1439 1440 if errorCode != 0 { 1441 return r, Error(errorCode) 1442 } 1443 1444 return r, nil 1445 } 1446 1447 // connDeadline is a helper type to implement read/write deadline management on 1448 // the kafka connection. 1449 type connDeadline struct { 1450 mutex sync.Mutex 1451 value time.Time 1452 rconn net.Conn 1453 wconn net.Conn 1454 } 1455 1456 func (d *connDeadline) deadline() time.Time { 1457 d.mutex.Lock() 1458 t := d.value 1459 d.mutex.Unlock() 1460 return t 1461 } 1462 1463 func (d *connDeadline) setDeadline(t time.Time) { 1464 d.mutex.Lock() 1465 d.value = t 1466 1467 if d.rconn != nil { 1468 d.rconn.SetReadDeadline(t) 1469 } 1470 1471 if d.wconn != nil { 1472 d.wconn.SetWriteDeadline(t) 1473 } 1474 1475 d.mutex.Unlock() 1476 } 1477 1478 func (d *connDeadline) setConnReadDeadline(conn net.Conn) time.Time { 1479 d.mutex.Lock() 1480 deadline := d.value 1481 d.rconn = conn 1482 d.rconn.SetReadDeadline(deadline) 1483 d.mutex.Unlock() 1484 return deadline 1485 } 1486 1487 func (d *connDeadline) setConnWriteDeadline(conn net.Conn) time.Time { 1488 d.mutex.Lock() 1489 deadline := d.value 1490 d.wconn = conn 1491 d.wconn.SetWriteDeadline(deadline) 1492 d.mutex.Unlock() 1493 return deadline 1494 } 1495 1496 func (d *connDeadline) unsetConnReadDeadline() { 1497 d.mutex.Lock() 1498 d.rconn = nil 1499 d.mutex.Unlock() 1500 } 1501 1502 func (d *connDeadline) unsetConnWriteDeadline() { 1503 d.mutex.Lock() 1504 d.wconn = nil 1505 d.mutex.Unlock() 1506 } 1507 1508 // saslHandshake sends the SASL handshake message. This will determine whether 1509 // the Mechanism is supported by the cluster. If it's not, this function will 1510 // error out with UnsupportedSASLMechanism. 1511 // 1512 // If the mechanism is unsupported, the handshake request will reply with the 1513 // list of the cluster's configured mechanisms, which could potentially be used 1514 // to facilitate negotiation. At the moment, we are not negotiating the 1515 // mechanism as we believe that brokers are usually known to the client, and 1516 // therefore the client should already know which mechanisms are supported. 1517 // 1518 // See http://kafka.apache.org/protocol.html#The_Messages_SaslHandshake 1519 func (c *Conn) saslHandshake(mechanism string) error { 1520 // The wire format for V0 and V1 is identical, but the version 1521 // number will affect how the SASL authentication 1522 // challenge/responses are sent 1523 var resp saslHandshakeResponseV0 1524 1525 version, err := c.negotiateVersion(saslHandshake, v0, v1) 1526 if err != nil { 1527 return err 1528 } 1529 1530 err = c.writeOperation( 1531 func(deadline time.Time, id int32) error { 1532 return c.writeRequest(saslHandshake, version, id, &saslHandshakeRequestV0{Mechanism: mechanism}) 1533 }, 1534 func(deadline time.Time, size int) error { 1535 return expectZeroSize(func() (int, error) { 1536 return (&resp).readFrom(&c.rbuf, size) 1537 }()) 1538 }, 1539 ) 1540 if err == nil && resp.ErrorCode != 0 { 1541 err = Error(resp.ErrorCode) 1542 } 1543 return err 1544 } 1545 1546 // saslAuthenticate sends the SASL authenticate message. This function must 1547 // be immediately preceded by a successful saslHandshake. 1548 // 1549 // See http://kafka.apache.org/protocol.html#The_Messages_SaslAuthenticate 1550 func (c *Conn) saslAuthenticate(data []byte) ([]byte, error) { 1551 // if we sent a v1 handshake, then we must encapsulate the authentication 1552 // request in a saslAuthenticateRequest. otherwise, we read and write raw 1553 // bytes. 1554 version, err := c.negotiateVersion(saslHandshake, v0, v1) 1555 if err != nil { 1556 return nil, err 1557 } 1558 if version == v1 { 1559 var request = saslAuthenticateRequestV0{Data: data} 1560 var response saslAuthenticateResponseV0 1561 1562 err := c.writeOperation( 1563 func(deadline time.Time, id int32) error { 1564 return c.writeRequest(saslAuthenticate, v0, id, request) 1565 }, 1566 func(deadline time.Time, size int) error { 1567 return expectZeroSize(func() (remain int, err error) { 1568 return (&response).readFrom(&c.rbuf, size) 1569 }()) 1570 }, 1571 ) 1572 if err == nil && response.ErrorCode != 0 { 1573 err = Error(response.ErrorCode) 1574 } 1575 return response.Data, err 1576 } 1577 1578 // fall back to opaque bytes on the wire. the broker is expecting these if 1579 // it just processed a v0 sasl handshake. 1580 c.wb.writeInt32(int32(len(data))) 1581 if _, err := c.wb.Write(data); err != nil { 1582 return nil, err 1583 } 1584 if err := c.wb.Flush(); err != nil { 1585 return nil, err 1586 } 1587 1588 var respLen int32 1589 if _, err := readInt32(&c.rbuf, 4, &respLen); err != nil { 1590 return nil, err 1591 } 1592 1593 resp, _, err := readNewBytes(&c.rbuf, int(respLen), int(respLen)) 1594 return resp, err 1595 }