github.com/QuangHoangHao/kafka-go@v0.4.36/conn.go (about)

     1  package kafka
     2  
     3  import (
     4  	"bufio"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"math"
     9  	"net"
    10  	"os"
    11  	"path/filepath"
    12  	"runtime"
    13  	"sync"
    14  	"sync/atomic"
    15  	"time"
    16  )
    17  
    18  var (
    19  	errInvalidWriteTopic     = errors.New("writes must NOT set Topic on kafka.Message")
    20  	errInvalidWritePartition = errors.New("writes must NOT set Partition on kafka.Message")
    21  )
    22  
    23  // Conn represents a connection to a kafka broker.
    24  //
    25  // Instances of Conn are safe to use concurrently from multiple goroutines.
    26  type Conn struct {
    27  	// base network connection
    28  	conn net.Conn
    29  
    30  	// number of inflight requests on the connection.
    31  	inflight int32
    32  
    33  	// offset management (synchronized on the mutex field)
    34  	mutex  sync.Mutex
    35  	offset int64
    36  
    37  	// read buffer (synchronized on rlock)
    38  	rlock sync.Mutex
    39  	rbuf  bufio.Reader
    40  
    41  	// write buffer (synchronized on wlock)
    42  	wlock sync.Mutex
    43  	wbuf  bufio.Writer
    44  	wb    writeBuffer
    45  
    46  	// deadline management
    47  	wdeadline connDeadline
    48  	rdeadline connDeadline
    49  
    50  	// immutable values of the connection object
    51  	clientID      string
    52  	topic         string
    53  	partition     int32
    54  	fetchMaxBytes int32
    55  	fetchMinSize  int32
    56  	broker        int32
    57  	rack          string
    58  
    59  	// correlation ID generator (synchronized on wlock)
    60  	correlationID int32
    61  
    62  	// number of replica acks required when publishing to a partition
    63  	requiredAcks int32
    64  
    65  	// lazily loaded API versions used by this connection
    66  	apiVersions atomic.Value // apiVersionMap
    67  
    68  	transactionalID *string
    69  }
    70  
    71  type apiVersionMap map[apiKey]ApiVersion
    72  
    73  func (v apiVersionMap) negotiate(key apiKey, sortedSupportedVersions ...apiVersion) apiVersion {
    74  	x := v[key]
    75  
    76  	for i := len(sortedSupportedVersions) - 1; i >= 0; i-- {
    77  		s := sortedSupportedVersions[i]
    78  
    79  		if apiVersion(x.MaxVersion) >= s {
    80  			return s
    81  		}
    82  	}
    83  
    84  	return -1
    85  }
    86  
    87  // ConnConfig is a configuration object used to create new instances of Conn.
    88  type ConnConfig struct {
    89  	ClientID  string
    90  	Topic     string
    91  	Partition int
    92  	Broker    int
    93  	Rack      string
    94  
    95  	// The transactional id to use for transactional delivery. Idempotent
    96  	// deliver should be enabled if transactional id is configured.
    97  	// For more details look at transactional.id description here: http://kafka.apache.org/documentation.html#producerconfigs
    98  	// Empty string means that this connection can't be transactional.
    99  	TransactionalID string
   100  }
   101  
   102  // ReadBatchConfig is a configuration object used for reading batches of messages.
   103  type ReadBatchConfig struct {
   104  	// MinBytes indicates to the broker the minimum batch size that the consumer
   105  	// will accept. Setting a high minimum when consuming from a low-volume topic
   106  	// may result in delayed delivery when the broker does not have enough data to
   107  	// satisfy the defined minimum.
   108  	MinBytes int
   109  
   110  	// MaxBytes indicates to the broker the maximum batch size that the consumer
   111  	// will accept. The broker will truncate a message to satisfy this maximum, so
   112  	// choose a value that is high enough for your largest message size.
   113  	MaxBytes int
   114  
   115  	// IsolationLevel controls the visibility of transactional records.
   116  	// ReadUncommitted makes all records visible. With ReadCommitted only
   117  	// non-transactional and committed records are visible.
   118  	IsolationLevel IsolationLevel
   119  
   120  	// MaxWait is the amount of time for the broker while waiting to hit the
   121  	// min/max byte targets.  This setting is independent of any network-level
   122  	// timeouts or deadlines.
   123  	//
   124  	// For backward compatibility, when this field is left zero, kafka-go will
   125  	// infer the max wait from the connection's read deadline.
   126  	MaxWait time.Duration
   127  }
   128  
   129  type IsolationLevel int8
   130  
   131  const (
   132  	ReadUncommitted IsolationLevel = 0
   133  	ReadCommitted   IsolationLevel = 1
   134  )
   135  
   136  var (
   137  	// DefaultClientID is the default value used as ClientID of kafka
   138  	// connections.
   139  	DefaultClientID string
   140  )
   141  
   142  func init() {
   143  	progname := filepath.Base(os.Args[0])
   144  	hostname, _ := os.Hostname()
   145  	DefaultClientID = fmt.Sprintf("%s@%s (github.com/QuangHoangHao/kafka-go)", progname, hostname)
   146  }
   147  
   148  // NewConn returns a new kafka connection for the given topic and partition.
   149  func NewConn(conn net.Conn, topic string, partition int) *Conn {
   150  	return NewConnWith(conn, ConnConfig{
   151  		Topic:     topic,
   152  		Partition: partition,
   153  	})
   154  }
   155  
   156  func emptyToNullable(transactionalID string) (result *string) {
   157  	if transactionalID != "" {
   158  		result = &transactionalID
   159  	}
   160  	return result
   161  }
   162  
   163  // NewConnWith returns a new kafka connection configured with config.
   164  // The offset is initialized to FirstOffset.
   165  func NewConnWith(conn net.Conn, config ConnConfig) *Conn {
   166  	if len(config.ClientID) == 0 {
   167  		config.ClientID = DefaultClientID
   168  	}
   169  
   170  	if config.Partition < 0 || config.Partition > math.MaxInt32 {
   171  		panic(fmt.Sprintf("invalid partition number: %d", config.Partition))
   172  	}
   173  
   174  	c := &Conn{
   175  		conn:            conn,
   176  		rbuf:            *bufio.NewReader(conn),
   177  		wbuf:            *bufio.NewWriter(conn),
   178  		clientID:        config.ClientID,
   179  		topic:           config.Topic,
   180  		partition:       int32(config.Partition),
   181  		broker:          int32(config.Broker),
   182  		rack:            config.Rack,
   183  		offset:          FirstOffset,
   184  		requiredAcks:    -1,
   185  		transactionalID: emptyToNullable(config.TransactionalID),
   186  	}
   187  
   188  	c.wb.w = &c.wbuf
   189  
   190  	// The fetch request needs to ask for a MaxBytes value that is at least
   191  	// enough to load the control data of the response. To avoid having to
   192  	// recompute it on every read, it is cached here in the Conn value.
   193  	c.fetchMinSize = (fetchResponseV2{
   194  		Topics: []fetchResponseTopicV2{{
   195  			TopicName: config.Topic,
   196  			Partitions: []fetchResponsePartitionV2{{
   197  				Partition:  int32(config.Partition),
   198  				MessageSet: messageSet{{}},
   199  			}},
   200  		}},
   201  	}).size()
   202  	c.fetchMaxBytes = math.MaxInt32 - c.fetchMinSize
   203  	return c
   204  }
   205  
   206  func (c *Conn) negotiateVersion(key apiKey, sortedSupportedVersions ...apiVersion) (apiVersion, error) {
   207  	v, err := c.loadVersions()
   208  	if err != nil {
   209  		return -1, err
   210  	}
   211  	a := v.negotiate(key, sortedSupportedVersions...)
   212  	if a < 0 {
   213  		return -1, fmt.Errorf("no matching versions were found between the client and the broker for API key %d", key)
   214  	}
   215  	return a, nil
   216  }
   217  
   218  func (c *Conn) loadVersions() (apiVersionMap, error) {
   219  	v, _ := c.apiVersions.Load().(apiVersionMap)
   220  	if v != nil {
   221  		return v, nil
   222  	}
   223  
   224  	brokerVersions, err := c.ApiVersions()
   225  	if err != nil {
   226  		return nil, err
   227  	}
   228  
   229  	v = make(apiVersionMap, len(brokerVersions))
   230  
   231  	for _, a := range brokerVersions {
   232  		v[apiKey(a.ApiKey)] = a
   233  	}
   234  
   235  	c.apiVersions.Store(v)
   236  	return v, nil
   237  }
   238  
   239  // Broker returns a Broker value representing the kafka broker that this
   240  // connection was established to.
   241  func (c *Conn) Broker() Broker {
   242  	addr := c.conn.RemoteAddr()
   243  	host, port, _ := splitHostPortNumber(addr.String())
   244  	return Broker{
   245  		Host: host,
   246  		Port: port,
   247  		ID:   int(c.broker),
   248  		Rack: c.rack,
   249  	}
   250  }
   251  
   252  // Controller requests kafka for the current controller and returns its URL.
   253  func (c *Conn) Controller() (broker Broker, err error) {
   254  	err = c.readOperation(
   255  		func(deadline time.Time, id int32) error {
   256  			return c.writeRequest(metadata, v1, id, topicMetadataRequestV1([]string{}))
   257  		},
   258  		func(deadline time.Time, size int) error {
   259  			var res metadataResponseV1
   260  
   261  			if err := c.readResponse(size, &res); err != nil {
   262  				return err
   263  			}
   264  			for _, brokerMeta := range res.Brokers {
   265  				if brokerMeta.NodeID == res.ControllerID {
   266  					broker = Broker{ID: int(brokerMeta.NodeID),
   267  						Port: int(brokerMeta.Port),
   268  						Host: brokerMeta.Host,
   269  						Rack: brokerMeta.Rack}
   270  					break
   271  				}
   272  			}
   273  			return nil
   274  		},
   275  	)
   276  	return broker, err
   277  }
   278  
   279  // Brokers retrieve the broker list from the Kafka metadata.
   280  func (c *Conn) Brokers() ([]Broker, error) {
   281  	var brokers []Broker
   282  	err := c.readOperation(
   283  		func(deadline time.Time, id int32) error {
   284  			return c.writeRequest(metadata, v1, id, topicMetadataRequestV1([]string{}))
   285  		},
   286  		func(deadline time.Time, size int) error {
   287  			var res metadataResponseV1
   288  
   289  			if err := c.readResponse(size, &res); err != nil {
   290  				return err
   291  			}
   292  
   293  			brokers = make([]Broker, len(res.Brokers))
   294  			for i, brokerMeta := range res.Brokers {
   295  				brokers[i] = Broker{
   296  					ID:   int(brokerMeta.NodeID),
   297  					Port: int(brokerMeta.Port),
   298  					Host: brokerMeta.Host,
   299  					Rack: brokerMeta.Rack,
   300  				}
   301  			}
   302  			return nil
   303  		},
   304  	)
   305  	return brokers, err
   306  }
   307  
   308  // DeleteTopics deletes the specified topics.
   309  func (c *Conn) DeleteTopics(topics ...string) error {
   310  	_, err := c.deleteTopics(deleteTopicsRequestV0{
   311  		Topics: topics,
   312  	})
   313  	return err
   314  }
   315  
   316  // findCoordinator finds the coordinator for the specified group or transaction
   317  //
   318  // See http://kafka.apache.org/protocol.html#The_Messages_FindCoordinator
   319  func (c *Conn) findCoordinator(request findCoordinatorRequestV0) (findCoordinatorResponseV0, error) {
   320  	var response findCoordinatorResponseV0
   321  
   322  	err := c.readOperation(
   323  		func(deadline time.Time, id int32) error {
   324  			return c.writeRequest(findCoordinator, v0, id, request)
   325  
   326  		},
   327  		func(deadline time.Time, size int) error {
   328  			return expectZeroSize(func() (remain int, err error) {
   329  				return (&response).readFrom(&c.rbuf, size)
   330  			}())
   331  		},
   332  	)
   333  	if err != nil {
   334  		return findCoordinatorResponseV0{}, err
   335  	}
   336  	if response.ErrorCode != 0 {
   337  		return findCoordinatorResponseV0{}, Error(response.ErrorCode)
   338  	}
   339  
   340  	return response, nil
   341  }
   342  
   343  // heartbeat sends a heartbeat message required by consumer groups
   344  //
   345  // See http://kafka.apache.org/protocol.html#The_Messages_Heartbeat
   346  func (c *Conn) heartbeat(request heartbeatRequestV0) (heartbeatResponseV0, error) {
   347  	var response heartbeatResponseV0
   348  
   349  	err := c.writeOperation(
   350  		func(deadline time.Time, id int32) error {
   351  			return c.writeRequest(heartbeat, v0, id, request)
   352  		},
   353  		func(deadline time.Time, size int) error {
   354  			return expectZeroSize(func() (remain int, err error) {
   355  				return (&response).readFrom(&c.rbuf, size)
   356  			}())
   357  		},
   358  	)
   359  	if err != nil {
   360  		return heartbeatResponseV0{}, err
   361  	}
   362  	if response.ErrorCode != 0 {
   363  		return heartbeatResponseV0{}, Error(response.ErrorCode)
   364  	}
   365  
   366  	return response, nil
   367  }
   368  
   369  // joinGroup attempts to join a consumer group
   370  //
   371  // See http://kafka.apache.org/protocol.html#The_Messages_JoinGroup
   372  func (c *Conn) joinGroup(request joinGroupRequestV1) (joinGroupResponseV1, error) {
   373  	var response joinGroupResponseV1
   374  
   375  	err := c.writeOperation(
   376  		func(deadline time.Time, id int32) error {
   377  			return c.writeRequest(joinGroup, v1, id, request)
   378  		},
   379  		func(deadline time.Time, size int) error {
   380  			return expectZeroSize(func() (remain int, err error) {
   381  				return (&response).readFrom(&c.rbuf, size)
   382  			}())
   383  		},
   384  	)
   385  	if err != nil {
   386  		return joinGroupResponseV1{}, err
   387  	}
   388  	if response.ErrorCode != 0 {
   389  		return joinGroupResponseV1{}, Error(response.ErrorCode)
   390  	}
   391  
   392  	return response, nil
   393  }
   394  
   395  // leaveGroup leaves the consumer from the consumer group
   396  //
   397  // See http://kafka.apache.org/protocol.html#The_Messages_LeaveGroup
   398  func (c *Conn) leaveGroup(request leaveGroupRequestV0) (leaveGroupResponseV0, error) {
   399  	var response leaveGroupResponseV0
   400  
   401  	err := c.writeOperation(
   402  		func(deadline time.Time, id int32) error {
   403  			return c.writeRequest(leaveGroup, v0, id, request)
   404  		},
   405  		func(deadline time.Time, size int) error {
   406  			return expectZeroSize(func() (remain int, err error) {
   407  				return (&response).readFrom(&c.rbuf, size)
   408  			}())
   409  		},
   410  	)
   411  	if err != nil {
   412  		return leaveGroupResponseV0{}, err
   413  	}
   414  	if response.ErrorCode != 0 {
   415  		return leaveGroupResponseV0{}, Error(response.ErrorCode)
   416  	}
   417  
   418  	return response, nil
   419  }
   420  
   421  // listGroups lists all the consumer groups
   422  //
   423  // See http://kafka.apache.org/protocol.html#The_Messages_ListGroups
   424  func (c *Conn) listGroups(request listGroupsRequestV1) (listGroupsResponseV1, error) {
   425  	var response listGroupsResponseV1
   426  
   427  	err := c.readOperation(
   428  		func(deadline time.Time, id int32) error {
   429  			return c.writeRequest(listGroups, v1, id, request)
   430  		},
   431  		func(deadline time.Time, size int) error {
   432  			return expectZeroSize(func() (remain int, err error) {
   433  				return (&response).readFrom(&c.rbuf, size)
   434  			}())
   435  		},
   436  	)
   437  	if err != nil {
   438  		return listGroupsResponseV1{}, err
   439  	}
   440  	if response.ErrorCode != 0 {
   441  		return listGroupsResponseV1{}, Error(response.ErrorCode)
   442  	}
   443  
   444  	return response, nil
   445  }
   446  
   447  // offsetCommit commits the specified topic partition offsets
   448  //
   449  // See http://kafka.apache.org/protocol.html#The_Messages_OffsetCommit
   450  func (c *Conn) offsetCommit(request offsetCommitRequestV2) (offsetCommitResponseV2, error) {
   451  	var response offsetCommitResponseV2
   452  
   453  	err := c.writeOperation(
   454  		func(deadline time.Time, id int32) error {
   455  			return c.writeRequest(offsetCommit, v2, id, request)
   456  		},
   457  		func(deadline time.Time, size int) error {
   458  			return expectZeroSize(func() (remain int, err error) {
   459  				return (&response).readFrom(&c.rbuf, size)
   460  			}())
   461  		},
   462  	)
   463  	if err != nil {
   464  		return offsetCommitResponseV2{}, err
   465  	}
   466  	for _, r := range response.Responses {
   467  		for _, pr := range r.PartitionResponses {
   468  			if pr.ErrorCode != 0 {
   469  				return offsetCommitResponseV2{}, Error(pr.ErrorCode)
   470  			}
   471  		}
   472  	}
   473  
   474  	return response, nil
   475  }
   476  
   477  // offsetFetch fetches the offsets for the specified topic partitions.
   478  // -1 indicates that there is no offset saved for the partition.
   479  //
   480  // See http://kafka.apache.org/protocol.html#The_Messages_OffsetFetch
   481  func (c *Conn) offsetFetch(request offsetFetchRequestV1) (offsetFetchResponseV1, error) {
   482  	var response offsetFetchResponseV1
   483  
   484  	err := c.readOperation(
   485  		func(deadline time.Time, id int32) error {
   486  			return c.writeRequest(offsetFetch, v1, id, request)
   487  		},
   488  		func(deadline time.Time, size int) error {
   489  			return expectZeroSize(func() (remain int, err error) {
   490  				return (&response).readFrom(&c.rbuf, size)
   491  			}())
   492  		},
   493  	)
   494  	if err != nil {
   495  		return offsetFetchResponseV1{}, err
   496  	}
   497  	for _, r := range response.Responses {
   498  		for _, pr := range r.PartitionResponses {
   499  			if pr.ErrorCode != 0 {
   500  				return offsetFetchResponseV1{}, Error(pr.ErrorCode)
   501  			}
   502  		}
   503  	}
   504  
   505  	return response, nil
   506  }
   507  
   508  // syncGroup completes the handshake to join a consumer group
   509  //
   510  // See http://kafka.apache.org/protocol.html#The_Messages_SyncGroup
   511  func (c *Conn) syncGroup(request syncGroupRequestV0) (syncGroupResponseV0, error) {
   512  	var response syncGroupResponseV0
   513  
   514  	err := c.readOperation(
   515  		func(deadline time.Time, id int32) error {
   516  			return c.writeRequest(syncGroup, v0, id, request)
   517  		},
   518  		func(deadline time.Time, size int) error {
   519  			return expectZeroSize(func() (remain int, err error) {
   520  				return (&response).readFrom(&c.rbuf, size)
   521  			}())
   522  		},
   523  	)
   524  	if err != nil {
   525  		return syncGroupResponseV0{}, err
   526  	}
   527  	if response.ErrorCode != 0 {
   528  		return syncGroupResponseV0{}, Error(response.ErrorCode)
   529  	}
   530  
   531  	return response, nil
   532  }
   533  
   534  // Close closes the kafka connection.
   535  func (c *Conn) Close() error {
   536  	return c.conn.Close()
   537  }
   538  
   539  // LocalAddr returns the local network address.
   540  func (c *Conn) LocalAddr() net.Addr {
   541  	return c.conn.LocalAddr()
   542  }
   543  
   544  // RemoteAddr returns the remote network address.
   545  func (c *Conn) RemoteAddr() net.Addr {
   546  	return c.conn.RemoteAddr()
   547  }
   548  
   549  // SetDeadline sets the read and write deadlines associated with the connection.
   550  // It is equivalent to calling both SetReadDeadline and SetWriteDeadline.
   551  //
   552  // A deadline is an absolute time after which I/O operations fail with a timeout
   553  // (see type Error) instead of blocking. The deadline applies to all future and
   554  // pending I/O, not just the immediately following call to Read or Write. After
   555  // a deadline has been exceeded, the connection may be closed if it was found to
   556  // be in an unrecoverable state.
   557  //
   558  // A zero value for t means I/O operations will not time out.
   559  func (c *Conn) SetDeadline(t time.Time) error {
   560  	c.rdeadline.setDeadline(t)
   561  	c.wdeadline.setDeadline(t)
   562  	return nil
   563  }
   564  
   565  // SetReadDeadline sets the deadline for future Read calls and any
   566  // currently-blocked Read call.
   567  // A zero value for t means Read will not time out.
   568  func (c *Conn) SetReadDeadline(t time.Time) error {
   569  	c.rdeadline.setDeadline(t)
   570  	return nil
   571  }
   572  
   573  // SetWriteDeadline sets the deadline for future Write calls and any
   574  // currently-blocked Write call.
   575  // Even if write times out, it may return n > 0, indicating that some of the
   576  // data was successfully written.
   577  // A zero value for t means Write will not time out.
   578  func (c *Conn) SetWriteDeadline(t time.Time) error {
   579  	c.wdeadline.setDeadline(t)
   580  	return nil
   581  }
   582  
   583  // Offset returns the current offset of the connection as pair of integers,
   584  // where the first one is an offset value and the second one indicates how
   585  // to interpret it.
   586  //
   587  // See Seek for more details about the offset and whence values.
   588  func (c *Conn) Offset() (offset int64, whence int) {
   589  	c.mutex.Lock()
   590  	offset = c.offset
   591  	c.mutex.Unlock()
   592  
   593  	switch offset {
   594  	case FirstOffset:
   595  		offset = 0
   596  		whence = SeekStart
   597  	case LastOffset:
   598  		offset = 0
   599  		whence = SeekEnd
   600  	default:
   601  		whence = SeekAbsolute
   602  	}
   603  	return
   604  }
   605  
   606  const (
   607  	SeekStart    = 0 // Seek relative to the first offset available in the partition.
   608  	SeekAbsolute = 1 // Seek to an absolute offset.
   609  	SeekEnd      = 2 // Seek relative to the last offset available in the partition.
   610  	SeekCurrent  = 3 // Seek relative to the current offset.
   611  
   612  	// This flag may be combined to any of the SeekAbsolute and SeekCurrent
   613  	// constants to skip the bound check that the connection would do otherwise.
   614  	// Programs can use this flag to avoid making a metadata request to the kafka
   615  	// broker to read the current first and last offsets of the partition.
   616  	SeekDontCheck = 1 << 30
   617  )
   618  
   619  // Seek sets the offset for the next read or write operation according to whence, which
   620  // should be one of SeekStart, SeekAbsolute, SeekEnd, or SeekCurrent.
   621  // When seeking relative to the end, the offset is subtracted from the current offset.
   622  // Note that for historical reasons, these do not align with the usual whence constants
   623  // as in lseek(2) or os.Seek.
   624  // The method returns the new absolute offset of the connection.
   625  func (c *Conn) Seek(offset int64, whence int) (int64, error) {
   626  	seekDontCheck := (whence & SeekDontCheck) != 0
   627  	whence &= ^SeekDontCheck
   628  
   629  	switch whence {
   630  	case SeekStart, SeekAbsolute, SeekEnd, SeekCurrent:
   631  	default:
   632  		return 0, fmt.Errorf("whence must be one of 0, 1, 2, or 3. (whence = %d)", whence)
   633  	}
   634  
   635  	if seekDontCheck {
   636  		if whence == SeekAbsolute {
   637  			c.mutex.Lock()
   638  			c.offset = offset
   639  			c.mutex.Unlock()
   640  			return offset, nil
   641  		}
   642  
   643  		if whence == SeekCurrent {
   644  			c.mutex.Lock()
   645  			c.offset += offset
   646  			offset = c.offset
   647  			c.mutex.Unlock()
   648  			return offset, nil
   649  		}
   650  	}
   651  
   652  	if whence == SeekAbsolute {
   653  		c.mutex.Lock()
   654  		unchanged := offset == c.offset
   655  		c.mutex.Unlock()
   656  		if unchanged {
   657  			return offset, nil
   658  		}
   659  	}
   660  
   661  	if whence == SeekCurrent {
   662  		c.mutex.Lock()
   663  		offset = c.offset + offset
   664  		c.mutex.Unlock()
   665  	}
   666  
   667  	first, last, err := c.ReadOffsets()
   668  	if err != nil {
   669  		return 0, err
   670  	}
   671  
   672  	switch whence {
   673  	case SeekStart:
   674  		offset = first + offset
   675  	case SeekEnd:
   676  		offset = last - offset
   677  	}
   678  
   679  	if offset < first || offset > last {
   680  		return 0, OffsetOutOfRange
   681  	}
   682  
   683  	c.mutex.Lock()
   684  	c.offset = offset
   685  	c.mutex.Unlock()
   686  	return offset, nil
   687  }
   688  
   689  // Read reads the message at the current offset from the connection, advancing
   690  // the offset on success so the next call to a read method will produce the next
   691  // message.
   692  // The method returns the number of bytes read, or an error if something went
   693  // wrong.
   694  //
   695  // While it is safe to call Read concurrently from multiple goroutines it may
   696  // be hard for the program to predict the results as the connection offset will
   697  // be read and written by multiple goroutines, they could read duplicates, or
   698  // messages may be seen by only some of the goroutines.
   699  //
   700  // The method fails with io.ErrShortBuffer if the buffer passed as argument is
   701  // too small to hold the message value.
   702  //
   703  // This method is provided to satisfy the net.Conn interface but is much less
   704  // efficient than using the more general purpose ReadBatch method.
   705  func (c *Conn) Read(b []byte) (int, error) {
   706  	batch := c.ReadBatch(1, len(b))
   707  	n, err := batch.Read(b)
   708  	return n, coalesceErrors(silentEOF(err), batch.Close())
   709  }
   710  
   711  // ReadMessage reads the message at the current offset from the connection,
   712  // advancing the offset on success so the next call to a read method will
   713  // produce the next message.
   714  //
   715  // Because this method allocate memory buffers for the message key and value
   716  // it is less memory-efficient than Read, but has the advantage of never
   717  // failing with io.ErrShortBuffer.
   718  //
   719  // While it is safe to call Read concurrently from multiple goroutines it may
   720  // be hard for the program to predict the results as the connection offset will
   721  // be read and written by multiple goroutines, they could read duplicates, or
   722  // messages may be seen by only some of the goroutines.
   723  //
   724  // This method is provided for convenience purposes but is much less efficient
   725  // than using the more general purpose ReadBatch method.
   726  func (c *Conn) ReadMessage(maxBytes int) (Message, error) {
   727  	batch := c.ReadBatch(1, maxBytes)
   728  	msg, err := batch.ReadMessage()
   729  	return msg, coalesceErrors(silentEOF(err), batch.Close())
   730  }
   731  
   732  // ReadBatch reads a batch of messages from the kafka server. The method always
   733  // returns a non-nil Batch value. If an error occurred, either sending the fetch
   734  // request or reading the response, the error will be made available by the
   735  // returned value of  the batch's Close method.
   736  //
   737  // While it is safe to call ReadBatch concurrently from multiple goroutines it
   738  // may be hard for the program to predict the results as the connection offset
   739  // will be read and written by multiple goroutines, they could read duplicates,
   740  // or messages may be seen by only some of the goroutines.
   741  //
   742  // A program doesn't specify the number of messages in wants from a batch, but
   743  // gives the minimum and maximum number of bytes that it wants to receive from
   744  // the kafka server.
   745  func (c *Conn) ReadBatch(minBytes, maxBytes int) *Batch {
   746  	return c.ReadBatchWith(ReadBatchConfig{
   747  		MinBytes: minBytes,
   748  		MaxBytes: maxBytes,
   749  	})
   750  }
   751  
   752  // ReadBatchWith in every way is similar to ReadBatch. ReadBatch is configured
   753  // with the default values in ReadBatchConfig except for minBytes and maxBytes.
   754  func (c *Conn) ReadBatchWith(cfg ReadBatchConfig) *Batch {
   755  
   756  	var adjustedDeadline time.Time
   757  	var maxFetch = int(c.fetchMaxBytes)
   758  
   759  	if cfg.MinBytes < 0 || cfg.MinBytes > maxFetch {
   760  		return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes of %d out of [1,%d] bounds", cfg.MinBytes, maxFetch)}
   761  	}
   762  	if cfg.MaxBytes < 0 || cfg.MaxBytes > maxFetch {
   763  		return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: maxBytes of %d out of [1,%d] bounds", cfg.MaxBytes, maxFetch)}
   764  	}
   765  	if cfg.MinBytes > cfg.MaxBytes {
   766  		return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes (%d) > maxBytes (%d)", cfg.MinBytes, cfg.MaxBytes)}
   767  	}
   768  
   769  	offset, whence := c.Offset()
   770  
   771  	offset, err := c.Seek(offset, whence|SeekDontCheck)
   772  	if err != nil {
   773  		return &Batch{err: dontExpectEOF(err)}
   774  	}
   775  
   776  	fetchVersion, err := c.negotiateVersion(fetch, v2, v5, v10)
   777  	if err != nil {
   778  		return &Batch{err: dontExpectEOF(err)}
   779  	}
   780  
   781  	id, err := c.doRequest(&c.rdeadline, func(deadline time.Time, id int32) error {
   782  		now := time.Now()
   783  		var timeout time.Duration
   784  		if cfg.MaxWait > 0 {
   785  			// explicitly-configured case: no changes are made to the deadline,
   786  			// and the timeout is sent exactly as specified.
   787  			timeout = cfg.MaxWait
   788  		} else {
   789  			// default case: use the original logic to adjust the conn's
   790  			// deadline.T
   791  			deadline = adjustDeadlineForRTT(deadline, now, defaultRTT)
   792  			timeout = deadlineToTimeout(deadline, now)
   793  		}
   794  		// save this variable outside of the closure for later use in detecting
   795  		// truncated messages.
   796  		adjustedDeadline = deadline
   797  		switch fetchVersion {
   798  		case v10:
   799  			return c.wb.writeFetchRequestV10(
   800  				id,
   801  				c.clientID,
   802  				c.topic,
   803  				c.partition,
   804  				offset,
   805  				cfg.MinBytes,
   806  				cfg.MaxBytes+int(c.fetchMinSize),
   807  				timeout,
   808  				int8(cfg.IsolationLevel),
   809  			)
   810  		case v5:
   811  			return c.wb.writeFetchRequestV5(
   812  				id,
   813  				c.clientID,
   814  				c.topic,
   815  				c.partition,
   816  				offset,
   817  				cfg.MinBytes,
   818  				cfg.MaxBytes+int(c.fetchMinSize),
   819  				timeout,
   820  				int8(cfg.IsolationLevel),
   821  			)
   822  		default:
   823  			return c.wb.writeFetchRequestV2(
   824  				id,
   825  				c.clientID,
   826  				c.topic,
   827  				c.partition,
   828  				offset,
   829  				cfg.MinBytes,
   830  				cfg.MaxBytes+int(c.fetchMinSize),
   831  				timeout,
   832  			)
   833  		}
   834  	})
   835  	if err != nil {
   836  		return &Batch{err: dontExpectEOF(err)}
   837  	}
   838  
   839  	_, size, lock, err := c.waitResponse(&c.rdeadline, id)
   840  	if err != nil {
   841  		return &Batch{err: dontExpectEOF(err)}
   842  	}
   843  
   844  	var throttle int32
   845  	var highWaterMark int64
   846  	var remain int
   847  
   848  	switch fetchVersion {
   849  	case v10:
   850  		throttle, highWaterMark, remain, err = readFetchResponseHeaderV10(&c.rbuf, size)
   851  	case v5:
   852  		throttle, highWaterMark, remain, err = readFetchResponseHeaderV5(&c.rbuf, size)
   853  	default:
   854  		throttle, highWaterMark, remain, err = readFetchResponseHeaderV2(&c.rbuf, size)
   855  	}
   856  	if errors.Is(err, errShortRead) {
   857  		err = checkTimeoutErr(adjustedDeadline)
   858  	}
   859  
   860  	var msgs *messageSetReader
   861  	if err == nil {
   862  		if highWaterMark == offset {
   863  			msgs = &messageSetReader{empty: true}
   864  		} else {
   865  			msgs, err = newMessageSetReader(&c.rbuf, remain)
   866  		}
   867  	}
   868  	if errors.Is(err, errShortRead) {
   869  		err = checkTimeoutErr(adjustedDeadline)
   870  	}
   871  
   872  	return &Batch{
   873  		conn:          c,
   874  		msgs:          msgs,
   875  		deadline:      adjustedDeadline,
   876  		throttle:      makeDuration(throttle),
   877  		lock:          lock,
   878  		topic:         c.topic,          // topic is copied to Batch to prevent race with Batch.close
   879  		partition:     int(c.partition), // partition is copied to Batch to prevent race with Batch.close
   880  		offset:        offset,
   881  		highWaterMark: highWaterMark,
   882  		// there shouldn't be a short read on initially setting up the batch.
   883  		// as such, any io.EOF is re-mapped to an io.ErrUnexpectedEOF so that we
   884  		// don't accidentally signal that we successfully reached the end of the
   885  		// batch.
   886  		err: dontExpectEOF(err),
   887  	}
   888  }
   889  
   890  // ReadOffset returns the offset of the first message with a timestamp equal or
   891  // greater to t.
   892  func (c *Conn) ReadOffset(t time.Time) (int64, error) {
   893  	return c.readOffset(timestamp(t))
   894  }
   895  
   896  // ReadFirstOffset returns the first offset available on the connection.
   897  func (c *Conn) ReadFirstOffset() (int64, error) {
   898  	return c.readOffset(FirstOffset)
   899  }
   900  
   901  // ReadLastOffset returns the last offset available on the connection.
   902  func (c *Conn) ReadLastOffset() (int64, error) {
   903  	return c.readOffset(LastOffset)
   904  }
   905  
   906  // ReadOffsets returns the absolute first and last offsets of the topic used by
   907  // the connection.
   908  func (c *Conn) ReadOffsets() (first, last int64, err error) {
   909  	// We have to submit two different requests to fetch the first and last
   910  	// offsets because kafka refuses requests that ask for multiple offsets
   911  	// on the same topic and partition.
   912  	if first, err = c.ReadFirstOffset(); err != nil {
   913  		return
   914  	}
   915  	if last, err = c.ReadLastOffset(); err != nil {
   916  		first = 0 // don't leak the value on error
   917  		return
   918  	}
   919  	return
   920  }
   921  
   922  func (c *Conn) readOffset(t int64) (offset int64, err error) {
   923  	err = c.readOperation(
   924  		func(deadline time.Time, id int32) error {
   925  			return c.wb.writeListOffsetRequestV1(id, c.clientID, c.topic, c.partition, t)
   926  		},
   927  		func(deadline time.Time, size int) error {
   928  			return expectZeroSize(readArrayWith(&c.rbuf, size, func(r *bufio.Reader, size int) (int, error) {
   929  				// We skip the topic name because we've made a request for
   930  				// a single topic.
   931  				size, err := discardString(r, size)
   932  				if err != nil {
   933  					return size, err
   934  				}
   935  
   936  				// Reading the array of partitions, there will be only one
   937  				// partition which gives the offset we're looking for.
   938  				return readArrayWith(r, size, func(r *bufio.Reader, size int) (int, error) {
   939  					var p partitionOffsetV1
   940  					size, err := p.readFrom(r, size)
   941  					if err != nil {
   942  						return size, err
   943  					}
   944  					if p.ErrorCode != 0 {
   945  						return size, Error(p.ErrorCode)
   946  					}
   947  					offset = p.Offset
   948  					return size, nil
   949  				})
   950  			}))
   951  		},
   952  	)
   953  	return
   954  }
   955  
   956  // ReadPartitions returns the list of available partitions for the given list of
   957  // topics.
   958  //
   959  // If the method is called with no topic, it uses the topic configured on the
   960  // connection. If there are none, the method fetches all partitions of the kafka
   961  // cluster.
   962  func (c *Conn) ReadPartitions(topics ...string) (partitions []Partition, err error) {
   963  
   964  	if len(topics) == 0 {
   965  		if len(c.topic) != 0 {
   966  			defaultTopics := [...]string{c.topic}
   967  			topics = defaultTopics[:]
   968  		} else {
   969  			// topics needs to be explicitly nil-ed out or the broker will
   970  			// interpret it as a request for 0 partitions instead of all.
   971  			topics = nil
   972  		}
   973  	}
   974  
   975  	err = c.readOperation(
   976  		func(deadline time.Time, id int32) error {
   977  			return c.writeRequest(metadata, v1, id, topicMetadataRequestV1(topics))
   978  		},
   979  		func(deadline time.Time, size int) error {
   980  			var res metadataResponseV1
   981  
   982  			if err := c.readResponse(size, &res); err != nil {
   983  				return err
   984  			}
   985  
   986  			brokers := make(map[int32]Broker, len(res.Brokers))
   987  			for _, b := range res.Brokers {
   988  				brokers[b.NodeID] = Broker{
   989  					Host: b.Host,
   990  					Port: int(b.Port),
   991  					ID:   int(b.NodeID),
   992  					Rack: b.Rack,
   993  				}
   994  			}
   995  
   996  			for _, t := range res.Topics {
   997  				if t.TopicErrorCode != 0 && (c.topic == "" || t.TopicName == c.topic) {
   998  					// We only report errors if they happened for the topic of
   999  					// the connection, otherwise the topic will simply have no
  1000  					// partitions in the result set.
  1001  					return Error(t.TopicErrorCode)
  1002  				}
  1003  				for _, p := range t.Partitions {
  1004  					partitions = append(partitions, Partition{
  1005  						Topic:    t.TopicName,
  1006  						Leader:   brokers[p.Leader],
  1007  						Replicas: makeBrokers(brokers, p.Replicas...),
  1008  						Isr:      makeBrokers(brokers, p.Isr...),
  1009  						ID:       int(p.PartitionID),
  1010  					})
  1011  				}
  1012  			}
  1013  			return nil
  1014  		},
  1015  	)
  1016  	return
  1017  }
  1018  
  1019  func makeBrokers(brokers map[int32]Broker, ids ...int32) []Broker {
  1020  	b := make([]Broker, 0, len(ids))
  1021  	for _, id := range ids {
  1022  		if br, ok := brokers[id]; ok {
  1023  			b = append(b, br)
  1024  		}
  1025  	}
  1026  	return b
  1027  }
  1028  
  1029  // Write writes a message to the kafka broker that this connection was
  1030  // established to. The method returns the number of bytes written, or an error
  1031  // if something went wrong.
  1032  //
  1033  // The operation either succeeds or fail, it never partially writes the message.
  1034  //
  1035  // This method is exposed to satisfy the net.Conn interface but is less efficient
  1036  // than the more general purpose WriteMessages method.
  1037  func (c *Conn) Write(b []byte) (int, error) {
  1038  	return c.WriteCompressedMessages(nil, Message{Value: b})
  1039  }
  1040  
  1041  // WriteMessages writes a batch of messages to the connection's topic and
  1042  // partition, returning the number of bytes written. The write is an atomic
  1043  // operation, it either fully succeeds or fails.
  1044  func (c *Conn) WriteMessages(msgs ...Message) (int, error) {
  1045  	return c.WriteCompressedMessages(nil, msgs...)
  1046  }
  1047  
  1048  // WriteCompressedMessages writes a batch of messages to the connection's topic
  1049  // and partition, returning the number of bytes written. The write is an atomic
  1050  // operation, it either fully succeeds or fails.
  1051  //
  1052  // If the compression codec is not nil, the messages will be compressed.
  1053  func (c *Conn) WriteCompressedMessages(codec CompressionCodec, msgs ...Message) (nbytes int, err error) {
  1054  	nbytes, _, _, _, err = c.writeCompressedMessages(codec, msgs...)
  1055  	return
  1056  }
  1057  
  1058  // WriteCompressedMessagesAt writes a batch of messages to the connection's topic
  1059  // and partition, returning the number of bytes written, partition and offset numbers
  1060  // and timestamp assigned by the kafka broker to the message set. The write is an atomic
  1061  // operation, it either fully succeeds or fails.
  1062  //
  1063  // If the compression codec is not nil, the messages will be compressed.
  1064  func (c *Conn) WriteCompressedMessagesAt(codec CompressionCodec, msgs ...Message) (nbytes int, partition int32, offset int64, appendTime time.Time, err error) {
  1065  	return c.writeCompressedMessages(codec, msgs...)
  1066  }
  1067  
  1068  func (c *Conn) writeCompressedMessages(codec CompressionCodec, msgs ...Message) (nbytes int, partition int32, offset int64, appendTime time.Time, err error) {
  1069  	if len(msgs) == 0 {
  1070  		return
  1071  	}
  1072  
  1073  	writeTime := time.Now()
  1074  	for i, msg := range msgs {
  1075  		// users may believe they can set the Topic and/or Partition
  1076  		// on the kafka message.
  1077  		if msg.Topic != "" && msg.Topic != c.topic {
  1078  			err = errInvalidWriteTopic
  1079  			return
  1080  		}
  1081  		if msg.Partition != 0 {
  1082  			err = errInvalidWritePartition
  1083  			return
  1084  		}
  1085  
  1086  		if msg.Time.IsZero() {
  1087  			msgs[i].Time = writeTime
  1088  		}
  1089  
  1090  		nbytes += len(msg.Key) + len(msg.Value)
  1091  	}
  1092  
  1093  	var produceVersion apiVersion
  1094  	if produceVersion, err = c.negotiateVersion(produce, v2, v3, v7); err != nil {
  1095  		return
  1096  	}
  1097  
  1098  	err = c.writeOperation(
  1099  		func(deadline time.Time, id int32) error {
  1100  			now := time.Now()
  1101  			deadline = adjustDeadlineForRTT(deadline, now, defaultRTT)
  1102  			switch produceVersion {
  1103  			case v7:
  1104  				recordBatch, err :=
  1105  					newRecordBatch(
  1106  						codec,
  1107  						msgs...,
  1108  					)
  1109  				if err != nil {
  1110  					return err
  1111  				}
  1112  				return c.wb.writeProduceRequestV7(
  1113  					id,
  1114  					c.clientID,
  1115  					c.topic,
  1116  					c.partition,
  1117  					deadlineToTimeout(deadline, now),
  1118  					int16(atomic.LoadInt32(&c.requiredAcks)),
  1119  					c.transactionalID,
  1120  					recordBatch,
  1121  				)
  1122  			case v3:
  1123  				recordBatch, err :=
  1124  					newRecordBatch(
  1125  						codec,
  1126  						msgs...,
  1127  					)
  1128  				if err != nil {
  1129  					return err
  1130  				}
  1131  				return c.wb.writeProduceRequestV3(
  1132  					id,
  1133  					c.clientID,
  1134  					c.topic,
  1135  					c.partition,
  1136  					deadlineToTimeout(deadline, now),
  1137  					int16(atomic.LoadInt32(&c.requiredAcks)),
  1138  					c.transactionalID,
  1139  					recordBatch,
  1140  				)
  1141  			default:
  1142  				return c.wb.writeProduceRequestV2(
  1143  					codec,
  1144  					id,
  1145  					c.clientID,
  1146  					c.topic,
  1147  					c.partition,
  1148  					deadlineToTimeout(deadline, now),
  1149  					int16(atomic.LoadInt32(&c.requiredAcks)),
  1150  					msgs...,
  1151  				)
  1152  			}
  1153  		},
  1154  		func(deadline time.Time, size int) error {
  1155  			return expectZeroSize(readArrayWith(&c.rbuf, size, func(r *bufio.Reader, size int) (int, error) {
  1156  				// Skip the topic, we've produced the message to only one topic,
  1157  				// no need to waste resources loading it in memory.
  1158  				size, err := discardString(r, size)
  1159  				if err != nil {
  1160  					return size, err
  1161  				}
  1162  
  1163  				// Read the list of partitions, there should be only one since
  1164  				// we've produced a message to a single partition.
  1165  				size, err = readArrayWith(r, size, func(r *bufio.Reader, size int) (int, error) {
  1166  					switch produceVersion {
  1167  					case v7:
  1168  						var p produceResponsePartitionV7
  1169  						size, err := p.readFrom(r, size)
  1170  						if err == nil && p.ErrorCode != 0 {
  1171  							err = Error(p.ErrorCode)
  1172  						}
  1173  						if err == nil {
  1174  							partition = p.Partition
  1175  							offset = p.Offset
  1176  							appendTime = time.Unix(0, p.Timestamp*int64(time.Millisecond))
  1177  						}
  1178  						return size, err
  1179  					default:
  1180  						var p produceResponsePartitionV2
  1181  						size, err := p.readFrom(r, size)
  1182  						if err == nil && p.ErrorCode != 0 {
  1183  							err = Error(p.ErrorCode)
  1184  						}
  1185  						if err == nil {
  1186  							partition = p.Partition
  1187  							offset = p.Offset
  1188  							appendTime = time.Unix(0, p.Timestamp*int64(time.Millisecond))
  1189  						}
  1190  						return size, err
  1191  					}
  1192  
  1193  				})
  1194  				if err != nil {
  1195  					return size, err
  1196  				}
  1197  
  1198  				// The response is trailed by the throttle time, also skipping
  1199  				// since it's not interesting here.
  1200  				return discardInt32(r, size)
  1201  			}))
  1202  		},
  1203  	)
  1204  
  1205  	if err != nil {
  1206  		nbytes = 0
  1207  	}
  1208  
  1209  	return
  1210  }
  1211  
  1212  // SetRequiredAcks sets the number of acknowledges from replicas that the
  1213  // connection requests when producing messages.
  1214  func (c *Conn) SetRequiredAcks(n int) error {
  1215  	switch n {
  1216  	case -1, 1:
  1217  		atomic.StoreInt32(&c.requiredAcks, int32(n))
  1218  		return nil
  1219  	default:
  1220  		return InvalidRequiredAcks
  1221  	}
  1222  }
  1223  
  1224  func (c *Conn) writeRequest(apiKey apiKey, apiVersion apiVersion, correlationID int32, req request) error {
  1225  	hdr := c.requestHeader(apiKey, apiVersion, correlationID)
  1226  	hdr.Size = (hdr.size() + req.size()) - 4
  1227  	hdr.writeTo(&c.wb)
  1228  	req.writeTo(&c.wb)
  1229  	return c.wbuf.Flush()
  1230  }
  1231  
  1232  func (c *Conn) readResponse(size int, res interface{}) error {
  1233  	size, err := read(&c.rbuf, size, res)
  1234  	if err != nil {
  1235  		var kafkaError Error
  1236  		if errors.As(err, &kafkaError) {
  1237  			size, err = discardN(&c.rbuf, size, size)
  1238  		}
  1239  	}
  1240  	return expectZeroSize(size, err)
  1241  }
  1242  
  1243  func (c *Conn) peekResponseSizeAndID() (int32, int32, error) {
  1244  	b, err := c.rbuf.Peek(8)
  1245  	if err != nil {
  1246  		return 0, 0, err
  1247  	}
  1248  	size, id := makeInt32(b[:4]), makeInt32(b[4:])
  1249  	return size, id, nil
  1250  }
  1251  
  1252  func (c *Conn) skipResponseSizeAndID() {
  1253  	c.rbuf.Discard(8)
  1254  }
  1255  
  1256  func (c *Conn) readDeadline() time.Time {
  1257  	return c.rdeadline.deadline()
  1258  }
  1259  
  1260  func (c *Conn) writeDeadline() time.Time {
  1261  	return c.wdeadline.deadline()
  1262  }
  1263  
  1264  func (c *Conn) readOperation(write func(time.Time, int32) error, read func(time.Time, int) error) error {
  1265  	return c.do(&c.rdeadline, write, read)
  1266  }
  1267  
  1268  func (c *Conn) writeOperation(write func(time.Time, int32) error, read func(time.Time, int) error) error {
  1269  	return c.do(&c.wdeadline, write, read)
  1270  }
  1271  
  1272  func (c *Conn) enter() {
  1273  	atomic.AddInt32(&c.inflight, +1)
  1274  }
  1275  
  1276  func (c *Conn) leave() {
  1277  	atomic.AddInt32(&c.inflight, -1)
  1278  }
  1279  
  1280  func (c *Conn) concurrency() int {
  1281  	return int(atomic.LoadInt32(&c.inflight))
  1282  }
  1283  
  1284  func (c *Conn) do(d *connDeadline, write func(time.Time, int32) error, read func(time.Time, int) error) error {
  1285  	id, err := c.doRequest(d, write)
  1286  	if err != nil {
  1287  		return err
  1288  	}
  1289  
  1290  	deadline, size, lock, err := c.waitResponse(d, id)
  1291  	if err != nil {
  1292  		return err
  1293  	}
  1294  
  1295  	if err = read(deadline, size); err != nil {
  1296  		var kafkaError Error
  1297  		if !errors.As(err, &kafkaError) {
  1298  			c.conn.Close()
  1299  		}
  1300  	}
  1301  
  1302  	d.unsetConnReadDeadline()
  1303  	lock.Unlock()
  1304  	return err
  1305  }
  1306  
  1307  func (c *Conn) doRequest(d *connDeadline, write func(time.Time, int32) error) (id int32, err error) {
  1308  	c.enter()
  1309  	c.wlock.Lock()
  1310  	c.correlationID++
  1311  	id = c.correlationID
  1312  	err = write(d.setConnWriteDeadline(c.conn), id)
  1313  	d.unsetConnWriteDeadline()
  1314  
  1315  	if err != nil {
  1316  		// When an error occurs there's no way to know if the connection is in a
  1317  		// recoverable state so we're better off just giving up at this point to
  1318  		// avoid any risk of corrupting the following operations.
  1319  		c.conn.Close()
  1320  		c.leave()
  1321  	}
  1322  
  1323  	c.wlock.Unlock()
  1324  	return
  1325  }
  1326  
  1327  func (c *Conn) waitResponse(d *connDeadline, id int32) (deadline time.Time, size int, lock *sync.Mutex, err error) {
  1328  	for {
  1329  		var rsz int32
  1330  		var rid int32
  1331  
  1332  		c.rlock.Lock()
  1333  		deadline = d.setConnReadDeadline(c.conn)
  1334  		rsz, rid, err = c.peekResponseSizeAndID()
  1335  
  1336  		if err != nil {
  1337  			d.unsetConnReadDeadline()
  1338  			c.conn.Close()
  1339  			c.rlock.Unlock()
  1340  			break
  1341  		}
  1342  
  1343  		if id == rid {
  1344  			c.skipResponseSizeAndID()
  1345  			size, lock = int(rsz-4), &c.rlock
  1346  			// Don't unlock the read mutex to yield ownership to the caller.
  1347  			break
  1348  		}
  1349  
  1350  		if c.concurrency() == 1 {
  1351  			// If the goroutine is the only one waiting on this connection it
  1352  			// should be impossible to read a correlation id different from the
  1353  			// one it expects. This is a sign that the data we are reading on
  1354  			// the wire is corrupted and the connection needs to be closed.
  1355  			err = io.ErrNoProgress
  1356  			c.rlock.Unlock()
  1357  			break
  1358  		}
  1359  
  1360  		// Optimistically release the read lock if a response has already
  1361  		// been received but the current operation is not the target for it.
  1362  		c.rlock.Unlock()
  1363  		runtime.Gosched()
  1364  	}
  1365  
  1366  	c.leave()
  1367  	return
  1368  }
  1369  
  1370  func (c *Conn) requestHeader(apiKey apiKey, apiVersion apiVersion, correlationID int32) requestHeader {
  1371  	return requestHeader{
  1372  		ApiKey:        int16(apiKey),
  1373  		ApiVersion:    int16(apiVersion),
  1374  		CorrelationID: correlationID,
  1375  		ClientID:      c.clientID,
  1376  	}
  1377  }
  1378  
  1379  func (c *Conn) ApiVersions() ([]ApiVersion, error) {
  1380  	deadline := &c.rdeadline
  1381  
  1382  	if deadline.deadline().IsZero() {
  1383  		// ApiVersions is called automatically when API version negotiation
  1384  		// needs to happen, so we are not guaranteed that a read deadline has
  1385  		// been set yet. Fallback to use the write deadline in case it was
  1386  		// set, for example when version negotiation is initiated during a
  1387  		// produce request.
  1388  		deadline = &c.wdeadline
  1389  	}
  1390  
  1391  	id, err := c.doRequest(deadline, func(_ time.Time, id int32) error {
  1392  		h := requestHeader{
  1393  			ApiKey:        int16(apiVersions),
  1394  			ApiVersion:    int16(v0),
  1395  			CorrelationID: id,
  1396  			ClientID:      c.clientID,
  1397  		}
  1398  		h.Size = (h.size() - 4)
  1399  		h.writeTo(&c.wb)
  1400  		return c.wbuf.Flush()
  1401  	})
  1402  	if err != nil {
  1403  		return nil, err
  1404  	}
  1405  
  1406  	_, size, lock, err := c.waitResponse(deadline, id)
  1407  	if err != nil {
  1408  		return nil, err
  1409  	}
  1410  	defer lock.Unlock()
  1411  
  1412  	var errorCode int16
  1413  	if size, err = readInt16(&c.rbuf, size, &errorCode); err != nil {
  1414  		return nil, err
  1415  	}
  1416  	var arrSize int32
  1417  	if size, err = readInt32(&c.rbuf, size, &arrSize); err != nil {
  1418  		return nil, err
  1419  	}
  1420  	r := make([]ApiVersion, arrSize)
  1421  	for i := 0; i < int(arrSize); i++ {
  1422  		if size, err = readInt16(&c.rbuf, size, &r[i].ApiKey); err != nil {
  1423  			return nil, err
  1424  		}
  1425  		if size, err = readInt16(&c.rbuf, size, &r[i].MinVersion); err != nil {
  1426  			return nil, err
  1427  		}
  1428  		if size, err = readInt16(&c.rbuf, size, &r[i].MaxVersion); err != nil {
  1429  			return nil, err
  1430  		}
  1431  	}
  1432  
  1433  	if errorCode != 0 {
  1434  		return r, Error(errorCode)
  1435  	}
  1436  
  1437  	return r, nil
  1438  }
  1439  
  1440  // connDeadline is a helper type to implement read/write deadline management on
  1441  // the kafka connection.
  1442  type connDeadline struct {
  1443  	mutex sync.Mutex
  1444  	value time.Time
  1445  	rconn net.Conn
  1446  	wconn net.Conn
  1447  }
  1448  
  1449  func (d *connDeadline) deadline() time.Time {
  1450  	d.mutex.Lock()
  1451  	t := d.value
  1452  	d.mutex.Unlock()
  1453  	return t
  1454  }
  1455  
  1456  func (d *connDeadline) setDeadline(t time.Time) {
  1457  	d.mutex.Lock()
  1458  	d.value = t
  1459  
  1460  	if d.rconn != nil {
  1461  		d.rconn.SetReadDeadline(t)
  1462  	}
  1463  
  1464  	if d.wconn != nil {
  1465  		d.wconn.SetWriteDeadline(t)
  1466  	}
  1467  
  1468  	d.mutex.Unlock()
  1469  }
  1470  
  1471  func (d *connDeadline) setConnReadDeadline(conn net.Conn) time.Time {
  1472  	d.mutex.Lock()
  1473  	deadline := d.value
  1474  	d.rconn = conn
  1475  	d.rconn.SetReadDeadline(deadline)
  1476  	d.mutex.Unlock()
  1477  	return deadline
  1478  }
  1479  
  1480  func (d *connDeadline) setConnWriteDeadline(conn net.Conn) time.Time {
  1481  	d.mutex.Lock()
  1482  	deadline := d.value
  1483  	d.wconn = conn
  1484  	d.wconn.SetWriteDeadline(deadline)
  1485  	d.mutex.Unlock()
  1486  	return deadline
  1487  }
  1488  
  1489  func (d *connDeadline) unsetConnReadDeadline() {
  1490  	d.mutex.Lock()
  1491  	d.rconn = nil
  1492  	d.mutex.Unlock()
  1493  }
  1494  
  1495  func (d *connDeadline) unsetConnWriteDeadline() {
  1496  	d.mutex.Lock()
  1497  	d.wconn = nil
  1498  	d.mutex.Unlock()
  1499  }
  1500  
  1501  // saslHandshake sends the SASL handshake message.  This will determine whether
  1502  // the Mechanism is supported by the cluster.  If it's not, this function will
  1503  // error out with UnsupportedSASLMechanism.
  1504  //
  1505  // If the mechanism is unsupported, the handshake request will reply with the
  1506  // list of the cluster's configured mechanisms, which could potentially be used
  1507  // to facilitate negotiation.  At the moment, we are not negotiating the
  1508  // mechanism as we believe that brokers are usually known to the client, and
  1509  // therefore the client should already know which mechanisms are supported.
  1510  //
  1511  // See http://kafka.apache.org/protocol.html#The_Messages_SaslHandshake
  1512  func (c *Conn) saslHandshake(mechanism string) error {
  1513  	// The wire format for V0 and V1 is identical, but the version
  1514  	// number will affect how the SASL authentication
  1515  	// challenge/responses are sent
  1516  	var resp saslHandshakeResponseV0
  1517  
  1518  	version, err := c.negotiateVersion(saslHandshake, v0, v1)
  1519  	if err != nil {
  1520  		return err
  1521  	}
  1522  
  1523  	err = c.writeOperation(
  1524  		func(deadline time.Time, id int32) error {
  1525  			return c.writeRequest(saslHandshake, version, id, &saslHandshakeRequestV0{Mechanism: mechanism})
  1526  		},
  1527  		func(deadline time.Time, size int) error {
  1528  			return expectZeroSize(func() (int, error) {
  1529  				return (&resp).readFrom(&c.rbuf, size)
  1530  			}())
  1531  		},
  1532  	)
  1533  	if err == nil && resp.ErrorCode != 0 {
  1534  		err = Error(resp.ErrorCode)
  1535  	}
  1536  	return err
  1537  }
  1538  
  1539  // saslAuthenticate sends the SASL authenticate message.  This function must
  1540  // be immediately preceded by a successful saslHandshake.
  1541  //
  1542  // See http://kafka.apache.org/protocol.html#The_Messages_SaslAuthenticate
  1543  func (c *Conn) saslAuthenticate(data []byte) ([]byte, error) {
  1544  	// if we sent a v1 handshake, then we must encapsulate the authentication
  1545  	// request in a saslAuthenticateRequest.  otherwise, we read and write raw
  1546  	// bytes.
  1547  	version, err := c.negotiateVersion(saslHandshake, v0, v1)
  1548  	if err != nil {
  1549  		return nil, err
  1550  	}
  1551  	if version == v1 {
  1552  		var request = saslAuthenticateRequestV0{Data: data}
  1553  		var response saslAuthenticateResponseV0
  1554  
  1555  		err := c.writeOperation(
  1556  			func(deadline time.Time, id int32) error {
  1557  				return c.writeRequest(saslAuthenticate, v0, id, request)
  1558  			},
  1559  			func(deadline time.Time, size int) error {
  1560  				return expectZeroSize(func() (remain int, err error) {
  1561  					return (&response).readFrom(&c.rbuf, size)
  1562  				}())
  1563  			},
  1564  		)
  1565  		if err == nil && response.ErrorCode != 0 {
  1566  			err = Error(response.ErrorCode)
  1567  		}
  1568  		return response.Data, err
  1569  	}
  1570  
  1571  	// fall back to opaque bytes on the wire.  the broker is expecting these if
  1572  	// it just processed a v0 sasl handshake.
  1573  	c.wb.writeInt32(int32(len(data)))
  1574  	if _, err := c.wb.Write(data); err != nil {
  1575  		return nil, err
  1576  	}
  1577  	if err := c.wb.Flush(); err != nil {
  1578  		return nil, err
  1579  	}
  1580  
  1581  	var respLen int32
  1582  	if _, err := readInt32(&c.rbuf, 4, &respLen); err != nil {
  1583  		return nil, err
  1584  	}
  1585  
  1586  	resp, _, err := readNewBytes(&c.rbuf, int(respLen), int(respLen))
  1587  	return resp, err
  1588  }