github.com/anacrolix/torrent@v1.61.0/peerconn.go (about)

     1  package torrent
     2  
     3  import (
     4  	"bufio"
     5  	"bytes"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"log/slog"
    10  	"math/rand"
    11  	"net"
    12  	"net/netip"
    13  	"strconv"
    14  	"strings"
    15  	"sync/atomic"
    16  	"time"
    17  	"weak"
    18  
    19  	"github.com/RoaringBitmap/roaring"
    20  	"github.com/anacrolix/chansync"
    21  	"github.com/anacrolix/generics"
    22  	. "github.com/anacrolix/generics"
    23  	"github.com/anacrolix/log"
    24  	"github.com/anacrolix/missinggo/v2/bitmap"
    25  	"github.com/anacrolix/missinggo/v2/panicif"
    26  	"github.com/anacrolix/multiless"
    27  
    28  	"golang.org/x/time/rate"
    29  
    30  	"github.com/anacrolix/torrent/bencode"
    31  	requestStrategy "github.com/anacrolix/torrent/internal/request-strategy"
    32  
    33  	"github.com/anacrolix/torrent/merkle"
    34  	"github.com/anacrolix/torrent/metainfo"
    35  	"github.com/anacrolix/torrent/mse"
    36  	pp "github.com/anacrolix/torrent/peer_protocol"
    37  	utHolepunch "github.com/anacrolix/torrent/peer_protocol/ut-holepunch"
    38  )
    39  
    40  type PeerStatus struct {
    41  	Id  PeerID
    42  	Ok  bool
    43  	Err string // see https://github.com/golang/go/issues/5161
    44  }
    45  
    46  // Maintains the state of a BitTorrent-protocol based connection with a peer.
    47  type PeerConn struct {
    48  	Peer
    49  
    50  	// Indexed by metadata piece, set to true if posted and pending a response.
    51  	metadataRequests []bool
    52  	sentHaves        bitmap.Bitmap
    53  	// Chunks that we might reasonably expect to receive from the peer. Due to latency, buffering,
    54  	// and implementation differences, we may receive chunks that are no longer in the set of
    55  	// requests actually want. This could use a roaring.BSI if the memory use becomes noticeable.
    56  	validReceiveChunks map[RequestIndex]int
    57  	PeerMaxRequests    maxRequests // Maximum pending requests the peer allows.
    58  
    59  	// Move to PeerConn?
    60  	protocolLogger log.Logger
    61  
    62  	// BEP 52
    63  	v2 bool
    64  
    65  	// A string that should identify the PeerConn's net.Conn endpoints. The net.Conn could
    66  	// be wrapping WebRTC, uTP, or TCP etc. Used in writing the conn status for peers.
    67  	connString string
    68  
    69  	// See BEP 3 etc.
    70  	PeerID             PeerID
    71  	PeerExtensionBytes pp.PeerExtensionBits
    72  	PeerListenPort     int
    73  
    74  	// The local extended protocols to advertise in the extended handshake, and to support receiving
    75  	// from the peer. This will point to the Client default when the PeerConnAdded callback is
    76  	// invoked. Do not modify this, point it to your own instance. Do not modify the destination
    77  	// after returning from the callback.
    78  	LocalLtepProtocolMap *LocalLtepProtocolMap
    79  
    80  	// The actual Conn, used for closing, and setting socket options. Do not use methods on this
    81  	// while holding any mutexes.
    82  	conn net.Conn
    83  	// The Reader and Writer for this Conn, with hooks installed for stats,
    84  	// limiting, deadlines etc.
    85  	w io.Writer
    86  	r io.Reader
    87  
    88  	messageWriter peerConnMsgWriter
    89  
    90  	// The peer's extension map, as sent in their extended handshake.
    91  	PeerExtensionIDs map[pp.ExtensionName]pp.ExtensionNumber
    92  	PeerClientName   atomic.Value
    93  	uploadTimer      *time.Timer
    94  	pex              pexConnState
    95  
    96  	// The pieces the peer has claimed to have.
    97  	_peerPieces roaring.Bitmap
    98  	// The peer has everything. This can occur due to a special message, when
    99  	// we may not even know the number of pieces in the torrent yet.
   100  	peerSentHaveAll bool
   101  
   102  	requestState requestStrategy.PeerRequestState
   103  
   104  	outstandingHolepunchingRendezvous map[netip.AddrPort]struct{}
   105  
   106  	// Hash requests sent to the peer. If there's an issue we probably don't want to reissue these,
   107  	// because I haven't implemented it smart enough yet.
   108  	sentHashRequests map[hashRequest]struct{}
   109  	// Hash pieces received from the peer, mapped from pieces root to piece layer hashes. This way
   110  	// we can verify all the pieces for a file when they're all arrived before submitting them to
   111  	// the torrent.
   112  	receivedHashPieces map[[32]byte][][32]byte
   113  
   114  	// Requests from the peer that haven't yet been read from storage for upload.
   115  	unreadPeerRequests map[Request]struct{}
   116  	// Peer request data that's ready to be uploaded.
   117  	readyPeerRequests map[Request][]byte
   118  	// Total peer request data buffered has decreased, so the server can read more.
   119  	peerRequestDataAllocDecreased chansync.BroadcastCond
   120  	// A routine is handling buffering peer request data.
   121  	peerRequestServerRunning bool
   122  
   123  	// Set true after we've added our ConnStats generated during handshake to other ConnStat
   124  	// instances as determined when the *Torrent became known.
   125  	reconciledHandshakeStats bool
   126  }
   127  
   128  func (*PeerConn) allConnStatsImplField(stats *AllConnStats) *ConnStats {
   129  	return &stats.PeerConns
   130  }
   131  
   132  func (cn *PeerConn) lastWriteUploadRate() float64 {
   133  	cn.messageWriter.mu.Lock()
   134  	defer cn.messageWriter.mu.Unlock()
   135  	return cn.messageWriter.dataUploadRate
   136  }
   137  
   138  func (cn *PeerConn) pexStatus() string {
   139  	if !cn.bitExtensionEnabled(pp.ExtensionBitLtep) {
   140  		return "extended protocol disabled"
   141  	}
   142  	if cn.PeerExtensionIDs == nil {
   143  		return "pending extended handshake"
   144  	}
   145  	if !cn.supportsExtension(pp.ExtensionNamePex) {
   146  		return "unsupported"
   147  	}
   148  	return fmt.Sprintf(
   149  		"%v conns, %v unsent events",
   150  		len(cn.pex.remoteLiveConns),
   151  		cn.pex.numPending(),
   152  	)
   153  }
   154  
   155  func (cn *PeerConn) peerImplStatusLines() []string {
   156  	return []string{
   157  		cn.connString,
   158  		fmt.Sprintf("peer id: %+q", cn.PeerID),
   159  		fmt.Sprintf("extensions: %v", cn.PeerExtensionBytes),
   160  		fmt.Sprintf("ltep extensions: %v", cn.PeerExtensionIDs),
   161  		fmt.Sprintf("pex: %s", cn.pexStatus()),
   162  		fmt.Sprintf(
   163  			"reqq: %d+%v/(%d/%d):%d/%d, flags: %s",
   164  			cn.requestState.Requests.GetCardinality(),
   165  			cn.requestState.Cancelled.GetCardinality(),
   166  			cn.nominalMaxRequests(),
   167  			cn.PeerMaxRequests,
   168  			cn.numPeerRequests(),
   169  			localClientReqq,
   170  			cn.statusFlags(),
   171  		),
   172  	}
   173  }
   174  
   175  // Returns true if the connection is over IPv6.
   176  func (cn *PeerConn) ipv6() bool {
   177  	ip := cn.remoteIp()
   178  	if ip.To4() != nil {
   179  		return false
   180  	}
   181  	return len(ip) == net.IPv6len
   182  }
   183  
   184  // Returns true the if the dialer/initiator has the higher client peer ID. See
   185  // https://github.com/arvidn/libtorrent/blame/272828e1cc37b042dfbbafa539222d8533e99755/src/bt_peer_connection.cpp#L3536-L3557.
   186  // As far as I can tell, Transmission just keeps the oldest connection.
   187  func (cn *PeerConn) isPreferredDirection() bool {
   188  	// True if our client peer ID is higher than the remote's peer ID.
   189  	return bytes.Compare(cn.PeerID[:], cn.t.cl.peerID[:]) < 0 == cn.outgoing
   190  }
   191  
   192  // Returns whether the left connection should be preferred over the right one,
   193  // considering only their networking properties. If ok is false, we can't
   194  // decide.
   195  func (l *PeerConn) hasPreferredNetworkOver(r *PeerConn) bool {
   196  	var ml multiless.Computation
   197  	ml = ml.Bool(r.isPreferredDirection(), l.isPreferredDirection())
   198  	ml = ml.Bool(l.utp(), r.utp())
   199  	ml = ml.Bool(r.ipv6(), l.ipv6())
   200  	return ml.Less()
   201  }
   202  
   203  func (cn *PeerConn) peerHasAllPieces() (all, known bool) {
   204  	if cn.peerSentHaveAll {
   205  		return true, true
   206  	}
   207  	if !cn.t.haveInfo() {
   208  		return false, false
   209  	}
   210  	return cn._peerPieces.GetCardinality() == uint64(cn.t.numPieces()), true
   211  }
   212  
   213  func (cn *PeerConn) onGotInfo(info *metainfo.Info) {
   214  	cn.setNumPieces(info.NumPieces())
   215  }
   216  
   217  // Correct the PeerPieces slice length. Return false if the existing slice is invalid, such as by
   218  // receiving badly sized BITFIELD, or invalid HAVE messages.
   219  func (cn *PeerConn) setNumPieces(num pieceIndex) {
   220  	cn._peerPieces.RemoveRange(bitmap.BitRange(num), bitmap.ToEnd)
   221  	cn.peerPiecesChanged()
   222  }
   223  
   224  func (cn *PeerConn) peerPieces() *roaring.Bitmap {
   225  	return &cn._peerPieces
   226  }
   227  
   228  func (cn *PeerConn) connectionFlags() string {
   229  	var sb strings.Builder
   230  	add := func(s string) {
   231  		if sb.Len() > 0 {
   232  			sb.WriteByte(',')
   233  		}
   234  		sb.WriteString(s)
   235  	}
   236  	// From first relevant to last.
   237  	add(string(cn.Discovery))
   238  	if cn.utp() {
   239  		add("U")
   240  	}
   241  	if cn.cryptoMethod == mse.CryptoMethodRC4 {
   242  		add("E")
   243  	} else if cn.headerEncrypted {
   244  		add("e")
   245  	}
   246  	if cn.v2 {
   247  		add("v2")
   248  	} else {
   249  		add("v1")
   250  	}
   251  	return sb.String()
   252  }
   253  
   254  func (cn *PeerConn) utp() bool {
   255  	return parseNetworkString(cn.Network).Udp
   256  }
   257  
   258  func (cn *PeerConn) onClose() {
   259  	if cn.pex.IsEnabled() {
   260  		cn.pex.Close()
   261  	}
   262  	cn.tickleWriter()
   263  	if cn.conn != nil {
   264  		go cn.conn.Close()
   265  	}
   266  	if cb := cn.callbacks.PeerConnClosed; cb != nil {
   267  		cb(cn)
   268  	}
   269  }
   270  
   271  // Writes a message into the write buffer. Returns whether it's okay to keep writing. Writing is
   272  // done asynchronously, so it may be that we're not able to honour backpressure from this method.
   273  func (cn *PeerConn) write(msg pp.Message) bool {
   274  	torrent.Add(fmt.Sprintf("messages written of type %s", msg.Type.String()), 1)
   275  	// We don't need to track bytes here because the connection's Writer has that behaviour injected
   276  	// (although there's some delay between us buffering the message, and the connection writer
   277  	// flushing it out.).
   278  	notFull := cn.messageWriter.write(msg)
   279  	// Last I checked only Piece messages affect stats, and we don't write those.
   280  	cn.wroteMsg(&msg)
   281  	cn.tickleWriter()
   282  	return notFull
   283  }
   284  
   285  func (cn *PeerConn) requestMetadataPiece(index int) {
   286  	eID := cn.PeerExtensionIDs[pp.ExtensionNameMetadata]
   287  	if eID == pp.ExtensionDeleteNumber {
   288  		return
   289  	}
   290  	if index < len(cn.metadataRequests) && cn.metadataRequests[index] {
   291  		return
   292  	}
   293  	cn.protocolLogger.WithDefaultLevel(log.Debug).Printf("requesting metadata piece %d", index)
   294  	cn.write(pp.MetadataExtensionRequestMsg(eID, index))
   295  	for index >= len(cn.metadataRequests) {
   296  		cn.metadataRequests = append(cn.metadataRequests, false)
   297  	}
   298  	cn.metadataRequests[index] = true
   299  }
   300  
   301  func (cn *PeerConn) requestedMetadataPiece(index int) bool {
   302  	return index < len(cn.metadataRequests) && cn.metadataRequests[index]
   303  }
   304  
   305  func (cn *PeerConn) onPeerSentCancel(r Request) {
   306  	if !cn.havePeerRequest(r) {
   307  		torrent.Add("unexpected cancels received", 1)
   308  		return
   309  	}
   310  	if cn.fastEnabled() {
   311  		cn.reject(r)
   312  	} else {
   313  		cn.deletePeerRequest(r)
   314  	}
   315  }
   316  
   317  func (me *PeerConn) deletePeerRequest(r Request) {
   318  	delete(me.unreadPeerRequests, r)
   319  	me.deleteReadyPeerRequest(r)
   320  }
   321  
   322  func (me *PeerConn) havePeerRequest(r Request) bool {
   323  	return MapContains(me.unreadPeerRequests, r) || MapContains(me.readyPeerRequests, r)
   324  }
   325  
   326  func (cn *PeerConn) choke(msg messageWriter) (more bool) {
   327  	if cn.choking {
   328  		return true
   329  	}
   330  	cn.choking = true
   331  	more = msg(pp.Message{
   332  		Type: pp.Choke,
   333  	})
   334  	if !cn.fastEnabled() {
   335  		cn.deleteAllPeerRequests()
   336  	}
   337  	return
   338  }
   339  
   340  func (cn *PeerConn) deleteAllPeerRequests() {
   341  	clear(cn.unreadPeerRequests)
   342  	clear(cn.readyPeerRequests)
   343  }
   344  
   345  func (cn *PeerConn) unchoke(msg func(pp.Message) bool) bool {
   346  	if !cn.choking {
   347  		return true
   348  	}
   349  	cn.choking = false
   350  	return msg(pp.Message{
   351  		Type: pp.Unchoke,
   352  	})
   353  }
   354  
   355  func (pc *PeerConn) writeInterested(interested bool) bool {
   356  	return pc.write(pp.Message{
   357  		Type: func() pp.MessageType {
   358  			if interested {
   359  				return pp.Interested
   360  			} else {
   361  				return pp.NotInterested
   362  			}
   363  		}(),
   364  	})
   365  }
   366  
   367  // The final piece to actually commit to a request. Typically, this sends or begins handling the
   368  // request.
   369  func (me *PeerConn) _request(r Request) bool {
   370  	return me.write(pp.Message{
   371  		Type:   pp.Request,
   372  		Index:  r.Index,
   373  		Begin:  r.Begin,
   374  		Length: r.Length,
   375  	})
   376  }
   377  
   378  func (me *PeerConn) handleCancel(r RequestIndex) {
   379  	me.write(makeCancelMessage(me.t.requestIndexToRequest(r)))
   380  	if me.remoteRejectsCancels() {
   381  		// Record that we expect to get a cancel ack.
   382  		if !me.requestState.Cancelled.CheckedAdd(r) {
   383  			panic("request already cancelled")
   384  		}
   385  	}
   386  }
   387  
   388  // Whether we should expect a reject message after sending a cancel.
   389  func (me *PeerConn) remoteRejectsCancels() bool {
   390  	if !me.fastEnabled() {
   391  		return false
   392  	}
   393  	if me.remoteIsTransmission() {
   394  		// Transmission did not send rejects for received cancels. See
   395  		// https://github.com/transmission/transmission/pull/2275. Fixed in 4.0.0-beta.1 onward in
   396  		// https://github.com/transmission/transmission/commit/76719bf34c255da4fca991c2ad3fa4b65d2154b1.
   397  		// Peer ID prefix scheme described
   398  		// https://github.com/transmission/transmission/blob/7ec7607bbcf0fa99bd4b157b9b0f0c411d59f45d/CMakeLists.txt#L128-L149.
   399  		return me.PeerID[3] >= '4'
   400  	}
   401  	return true
   402  }
   403  
   404  func (cn *PeerConn) fillWriteBuffer() {
   405  	if cn.messageWriter.writeBuffer.Len() > writeBufferLowWaterLen {
   406  		// Fully committing to our max requests requires sufficient space (see
   407  		// maxLocalToRemoteRequests). Flush what we have instead. We also prefer always to make
   408  		// requests than to do PEX or upload, so we short-circuit before handling those. Any update
   409  		// request reason will not be cleared, so we'll come right back here when there's space. We
   410  		// can't do this in maybeUpdateActualRequestState because it's a method on Peer and has no
   411  		// knowledge of write buffers.
   412  		return
   413  	}
   414  	cn.requestMissingHashes()
   415  	cn.maybeUpdateActualRequestState()
   416  	if cn.pex.IsEnabled() {
   417  		if flow := cn.pex.Share(cn.write); !flow {
   418  			return
   419  		}
   420  	}
   421  	cn.upload(cn.write)
   422  }
   423  
   424  func (cn *PeerConn) have(piece pieceIndex) {
   425  	if cn.sentHaves.Get(bitmap.BitIndex(piece)) {
   426  		return
   427  	}
   428  	cn.write(pp.Message{
   429  		Type:  pp.Have,
   430  		Index: pp.Integer(piece),
   431  	})
   432  	cn.sentHaves.Add(bitmap.BitIndex(piece))
   433  }
   434  
   435  func (cn *PeerConn) postBitfield() {
   436  	if cn.sentHaves.Len() != 0 {
   437  		panic("bitfield must be first have-related message sent")
   438  	}
   439  	if !cn.t.haveAnyPieces() {
   440  		return
   441  	}
   442  	cn.write(pp.Message{
   443  		Type:     pp.Bitfield,
   444  		Bitfield: cn.t.bitfield(),
   445  	})
   446  	cn.sentHaves = bitmap.Bitmap{cn.t._completedPieces.Clone()}
   447  }
   448  
   449  func (cn *PeerConn) handleOnNeedUpdateRequests() {
   450  	// The writer determines the request state as needed when it can write.
   451  	cn.tickleWriter()
   452  }
   453  
   454  func (cn *PeerConn) raisePeerMinPieces(newMin pieceIndex) {
   455  	if newMin > cn.peerMinPieces {
   456  		cn.peerMinPieces = newMin
   457  	}
   458  }
   459  
   460  func (cn *PeerConn) peerSentHave(piece pieceIndex) error {
   461  	if cn.t.haveInfo() && piece >= cn.t.numPieces() || piece < 0 {
   462  		return errors.New("invalid piece")
   463  	}
   464  	if cn.peerHasPiece(piece) {
   465  		return nil
   466  	}
   467  	cn.raisePeerMinPieces(piece + 1)
   468  	if !cn.peerHasPiece(piece) {
   469  		cn.t.incPieceAvailability(piece)
   470  	}
   471  	cn._peerPieces.Add(uint32(piece))
   472  	if cn.t.wantPieceIndex(piece) {
   473  		cn.onNeedUpdateRequests("have")
   474  	}
   475  	cn.peerPiecesChanged()
   476  	return nil
   477  }
   478  
   479  func (cn *PeerConn) peerSentBitfield(bf []bool) error {
   480  	if len(bf)%8 != 0 {
   481  		panic("expected bitfield length divisible by 8")
   482  	}
   483  	// We know that the last byte means that at most the last 7 bits are wasted.
   484  	cn.raisePeerMinPieces(pieceIndex(len(bf) - 7))
   485  	if cn.t.haveInfo() && len(bf) > int(cn.t.numPieces()) {
   486  		// Ignore known excess pieces.
   487  		bf = bf[:cn.t.numPieces()]
   488  	}
   489  	bm := boolSliceToBitmap(bf)
   490  	if cn.t.haveInfo() && pieceIndex(bm.GetCardinality()) == cn.t.numPieces() {
   491  		cn.onPeerHasAllPieces()
   492  		return nil
   493  	}
   494  	if !bm.IsEmpty() {
   495  		cn.raisePeerMinPieces(pieceIndex(bm.Maximum()) + 1)
   496  	}
   497  	shouldUpdateRequests := false
   498  	if cn.peerSentHaveAll {
   499  		if !cn.t.deleteConnWithAllPieces(&cn.Peer) {
   500  			panic(cn)
   501  		}
   502  		cn.peerSentHaveAll = false
   503  		if !cn._peerPieces.IsEmpty() {
   504  			panic("if peer has all, we expect no individual peer pieces to be set")
   505  		}
   506  	} else {
   507  		bm.Xor(&cn._peerPieces)
   508  	}
   509  	cn.peerSentHaveAll = false
   510  	// bm is now 'on' for pieces that are changing
   511  	bm.Iterate(func(x uint32) bool {
   512  		pi := pieceIndex(x)
   513  		if cn._peerPieces.Contains(x) {
   514  			// Then we must be losing this piece
   515  			cn.t.decPieceAvailability(pi)
   516  		} else {
   517  			if !shouldUpdateRequests && cn.t.wantPieceIndex(pieceIndex(x)) {
   518  				shouldUpdateRequests = true
   519  			}
   520  			// We must be gaining this piece
   521  			cn.t.incPieceAvailability(pieceIndex(x))
   522  		}
   523  		return true
   524  	})
   525  	// Apply the changes. If we had everything previously, this should be empty, so xor is the same
   526  	// as or.
   527  	cn._peerPieces.Xor(&bm)
   528  	if shouldUpdateRequests {
   529  		cn.onNeedUpdateRequests("bitfield")
   530  	}
   531  	// We didn't guard this before, I see no reason to do it now.
   532  	cn.peerPiecesChanged()
   533  	return nil
   534  }
   535  
   536  func (cn *PeerConn) onPeerHasAllPiecesNoTriggers() {
   537  	t := cn.t
   538  	if t.haveInfo() {
   539  		cn._peerPieces.Iterate(func(x uint32) bool {
   540  			t.decPieceAvailability(pieceIndex(x))
   541  			return true
   542  		})
   543  	}
   544  	t.addConnWithAllPieces(&cn.Peer)
   545  	cn.peerSentHaveAll = true
   546  	cn._peerPieces.Clear()
   547  }
   548  
   549  func (cn *PeerConn) onPeerHasAllPieces() {
   550  	cn.onPeerHasAllPiecesNoTriggers()
   551  	cn.peerHasAllPiecesTriggers()
   552  }
   553  
   554  func (cn *PeerConn) peerHasAllPiecesTriggers() {
   555  	if !cn.t._pendingPieces.IsEmpty() {
   556  		cn.onNeedUpdateRequests("Peer.onPeerHasAllPieces")
   557  	}
   558  	cn.peerPiecesChanged()
   559  }
   560  
   561  func (cn *PeerConn) onPeerSentHaveAll() error {
   562  	cn.onPeerHasAllPieces()
   563  	return nil
   564  }
   565  
   566  func (cn *PeerConn) peerSentHaveNone() error {
   567  	if !cn.peerSentHaveAll {
   568  		cn.t.decPeerPieceAvailability(&cn.Peer)
   569  	}
   570  	cn._peerPieces.Clear()
   571  	cn.peerSentHaveAll = false
   572  	cn.peerPiecesChanged()
   573  	return nil
   574  }
   575  
   576  func (c *PeerConn) requestPendingMetadata() {
   577  	if c.t.haveInfo() {
   578  		return
   579  	}
   580  	if c.PeerExtensionIDs[pp.ExtensionNameMetadata] == 0 {
   581  		// Peer doesn't support this.
   582  		return
   583  	}
   584  	// Request metadata pieces that we don't have in a random order.
   585  	var pending []int
   586  	for index := 0; index < c.t.metadataPieceCount(); index++ {
   587  		if !c.t.haveMetadataPiece(index) && !c.requestedMetadataPiece(index) {
   588  			pending = append(pending, index)
   589  		}
   590  	}
   591  	rand.Shuffle(len(pending), func(i, j int) { pending[i], pending[j] = pending[j], pending[i] })
   592  	for _, i := range pending {
   593  		c.requestMetadataPiece(i)
   594  	}
   595  }
   596  
   597  func (cn *PeerConn) wroteMsg(msg *pp.Message) {
   598  	torrent.Add(fmt.Sprintf("messages written of type %s", msg.Type.String()), 1)
   599  	if msg.Type == pp.Extended {
   600  		for name, id := range cn.PeerExtensionIDs {
   601  			if id != msg.ExtendedID {
   602  				continue
   603  			}
   604  			torrent.Add(fmt.Sprintf("Extended messages written for protocol %q", name), 1)
   605  		}
   606  	}
   607  	cn.modifyRelevantConnStats(func(cs *ConnStats) { cs.wroteMsg(msg) })
   608  }
   609  
   610  func (cn *PeerConn) wroteBytes(n int64) {
   611  	cn.modifyRelevantConnStats(add(n, func(cs *ConnStats) *Count { return &cs.BytesWritten }))
   612  }
   613  
   614  func (c *PeerConn) fastEnabled() bool {
   615  	return c.PeerExtensionBytes.SupportsFast() && c.t.cl.config.Extensions.SupportsFast()
   616  }
   617  
   618  func (c *PeerConn) reject(r Request) {
   619  	if !c.fastEnabled() {
   620  		panic("fast not enabled")
   621  	}
   622  	c.write(r.ToMsg(pp.Reject))
   623  	// It is possible to reject a request before it is added to peer requests due to being invalid.
   624  	c.deletePeerRequest(r)
   625  }
   626  
   627  func (c *PeerConn) maximumPeerRequestChunkLength() (_ Option[int]) {
   628  	uploadRateLimiter := c.t.cl.config.UploadRateLimiter
   629  	if uploadRateLimiter.Limit() == rate.Inf {
   630  		return
   631  	}
   632  	return Some(uploadRateLimiter.Burst())
   633  }
   634  
   635  func (me *PeerConn) numPeerRequests() int {
   636  	return len(me.unreadPeerRequests) + len(me.readyPeerRequests)
   637  }
   638  
   639  // startFetch is for testing purposes currently.
   640  func (c *PeerConn) onReadRequest(r Request, startFetch bool) error {
   641  	requestedChunkLengths.Add(strconv.FormatUint(r.Length.Uint64(), 10), 1)
   642  	if c.havePeerRequest(r) {
   643  		torrent.Add("duplicate requests received", 1)
   644  		if c.fastEnabled() {
   645  			return errors.New("received duplicate request with fast enabled")
   646  		}
   647  		return nil
   648  	}
   649  	if c.choking {
   650  		torrent.Add("requests received while choking", 1)
   651  		if c.fastEnabled() {
   652  			torrent.Add("requests rejected while choking", 1)
   653  			c.reject(r)
   654  		}
   655  		return nil
   656  	}
   657  	// TODO: What if they've already requested this?
   658  	if c.numPeerRequests() >= localClientReqq {
   659  		torrent.Add("requests received while queue full", 1)
   660  		if c.fastEnabled() {
   661  			c.reject(r)
   662  		}
   663  		// BEP 6 says we may close here if we choose.
   664  		return nil
   665  	}
   666  	if opt := c.maximumPeerRequestChunkLength(); opt.Ok && int(r.Length) > opt.Value {
   667  		err := fmt.Errorf("peer requested chunk too long (%v)", r.Length)
   668  		// Brother ewwww...
   669  		c.protocolLogger.Levelf(log.Warning, "%v", err.Error())
   670  		if c.fastEnabled() {
   671  			c.reject(r)
   672  			return nil
   673  		} else {
   674  			return err
   675  		}
   676  	}
   677  	if !c.t.havePiece(pieceIndex(r.Index)) {
   678  		// TODO: Tell the peer we don't have the piece, and reject this request.
   679  		requestsReceivedForMissingPieces.Add(1)
   680  		return fmt.Errorf("peer requested piece we don't have: %v", r.Index.Int())
   681  	}
   682  	pieceLength := c.t.pieceLength(pieceIndex(r.Index))
   683  	// Check this after we know we have the piece, so that the piece length will be known.
   684  	if chunkOverflowsPiece(r.ChunkSpec, pieceLength) {
   685  		torrent.Add("bad requests received", 1)
   686  		return errors.New("chunk overflows piece")
   687  	}
   688  	MakeMapIfNilWithCap(&c.unreadPeerRequests, localClientReqq)
   689  	c.unreadPeerRequests[r] = struct{}{}
   690  	if startFetch {
   691  		c.startPeerRequestServer()
   692  	}
   693  	return nil
   694  }
   695  
   696  func (c *PeerConn) startPeerRequestServer() {
   697  	if !c.peerRequestServerRunning {
   698  		go c.peerRequestServer()
   699  		c.peerRequestServerRunning = true
   700  	}
   701  }
   702  
   703  func (c *PeerConn) peerRequestServer() {
   704  	c.locker().Lock()
   705  again:
   706  	if !c.closed.IsSet() {
   707  		for r := range c.unreadPeerRequests {
   708  			c.servePeerRequest(r)
   709  			goto again
   710  		}
   711  	}
   712  	panicif.False(c.peerRequestServerRunning)
   713  	c.peerRequestServerRunning = false
   714  	c.locker().Unlock()
   715  }
   716  
   717  func (c *PeerConn) peerRequestDataBuffered() (n int) {
   718  	// TODO: Should we include a limit to the number of individual requests to keep N small, or keep
   719  	// a counter elsewhere?
   720  	for r := range c.readyPeerRequests {
   721  		n += r.Length.Int()
   722  	}
   723  	return
   724  }
   725  
   726  func (c *PeerConn) waitForDataAlloc(size int) bool {
   727  	maxAlloc := c.t.cl.config.MaxAllocPeerRequestDataPerConn
   728  	locker := c.locker()
   729  	for {
   730  		if size > maxAlloc {
   731  			c.slogger.Warn("peer request length exceeds MaxAllocPeerRequestDataPerConn",
   732  				"requested", size,
   733  				"max", maxAlloc)
   734  			return false
   735  		}
   736  		if c.peerRequestDataBuffered()+size <= maxAlloc {
   737  			return true
   738  		}
   739  		allocDecreased := c.peerRequestDataAllocDecreased.Signaled()
   740  		locker.Unlock()
   741  		select {
   742  		case <-c.closedCtx.Done():
   743  			locker.Lock()
   744  			return false
   745  		case <-allocDecreased:
   746  		}
   747  		c.locker().Lock()
   748  	}
   749  }
   750  
   751  func (me *PeerConn) deleteReadyPeerRequest(r Request) {
   752  	v, ok := me.readyPeerRequests[r]
   753  	if !ok {
   754  		return
   755  	}
   756  	delete(me.readyPeerRequests, r)
   757  	if len(v) > 0 {
   758  		me.peerRequestDataAllocDecreased.Broadcast()
   759  	}
   760  }
   761  
   762  // Handles an outstanding peer request. It's either rejected, or buffered for the writer.
   763  func (c *PeerConn) servePeerRequest(r Request) {
   764  	defer func() {
   765  		// Prevent caller from stalling. It's either rejected or buffered.
   766  		panicif.True(MapContains(c.unreadPeerRequests, r))
   767  	}()
   768  	if !c.waitForDataAlloc(r.Length.Int()) {
   769  		// Might have been removed while unlocked.
   770  		if MapContains(c.unreadPeerRequests, r) {
   771  			c.useBestReject(r)
   772  		}
   773  		return
   774  	}
   775  	c.locker().Unlock()
   776  	b, err := c.readPeerRequestData(r)
   777  	c.locker().Lock()
   778  	if err != nil {
   779  		c.peerRequestDataReadFailed(err, r)
   780  		return
   781  	}
   782  	if !MapContains(c.unreadPeerRequests, r) {
   783  		c.slogger.Debug("read data for peer request but no longer wanted", "request", r)
   784  		return
   785  	}
   786  	MustDelete(c.unreadPeerRequests, r)
   787  	MakeMapIfNil(&c.readyPeerRequests)
   788  	c.readyPeerRequests[r] = b
   789  	c.tickleWriter()
   790  }
   791  
   792  // If this is maintained correctly, we might be able to support optional synchronous reading for
   793  // chunk sending, the way it used to work.
   794  func (c *PeerConn) peerRequestDataReadFailed(err error, r Request) {
   795  	torrent.Add("peer request data read failures", 1)
   796  	logLevel := log.Warning
   797  	if c.t.hasStorageCap() || c.t.closed.IsSet() {
   798  		// It's expected that pieces might drop. See
   799  		// https://github.com/anacrolix/torrent/issues/702#issuecomment-1000953313. Also the torrent
   800  		// may have been Dropped, and the user expects to own the files, see
   801  		// https://github.com/anacrolix/torrent/issues/980.
   802  		logLevel = log.Debug
   803  	}
   804  	c.logger.Levelf(logLevel, "error reading chunk for peer Request %v: %v", r, err)
   805  	if c.t.closed.IsSet() {
   806  		return
   807  	}
   808  	i := pieceIndex(r.Index)
   809  	if c.t.pieceComplete(i) {
   810  		// There used to be more code here that just duplicated the following break. Piece
   811  		// completions are currently cached, so I'm not sure how helpful this update is, except to
   812  		// pull any completion changes pushed to the storage backend in failed reads that got us
   813  		// here.
   814  		c.t.updatePieceCompletion(i)
   815  	}
   816  	// We've probably dropped a piece from storage, but there's no way to communicate this to the
   817  	// peer. If they ask for it again, we kick them allowing us to send them updated piece states if
   818  	// we reconnect. TODO: Instead, we could just try to update them with Bitfield or HaveNone and
   819  	// if they kick us for breaking protocol, on reconnect we will be compliant again (at least
   820  	// initially).
   821  	c.useBestReject(r)
   822  }
   823  
   824  // Reject a peer request using the best protocol support we have available.
   825  func (c *PeerConn) useBestReject(r Request) {
   826  	if c.fastEnabled() {
   827  		c.reject(r)
   828  	} else {
   829  		if c.choking {
   830  			// If fast isn't enabled, I think we would have wiped all peer requests when we last
   831  			// choked, and requests while we're choking would be ignored. It could be possible that
   832  			// a peer request data read completed concurrently to it being deleted elsewhere.
   833  			c.protocolLogger.WithDefaultLevel(log.Warning).Printf("already choking peer, requests might not be rejected correctly")
   834  		}
   835  		// Choking a non-fast peer should cause them to flush all their requests.
   836  		c.choke(c.write)
   837  	}
   838  }
   839  
   840  func (c *PeerConn) readPeerRequestData(r Request) ([]byte, error) {
   841  	b := make([]byte, r.Length)
   842  	p := c.t.info.Piece(int(r.Index))
   843  	n, err := c.t.readAt(b, p.Offset()+int64(r.Begin))
   844  	if n == len(b) {
   845  		if errors.Is(err, io.EOF) {
   846  			err = nil
   847  		}
   848  	} else {
   849  		if err == nil {
   850  			panic("expected error")
   851  		}
   852  	}
   853  	return b, err
   854  }
   855  
   856  func (c *PeerConn) logProtocolBehaviour(level log.Level, format string, arg ...interface{}) {
   857  	c.protocolLogger.WithContextText(fmt.Sprintf(
   858  		"peer id %q, ext v %q", c.PeerID, c.PeerClientName.Load(),
   859  	)).SkipCallers(1).Levelf(level, format, arg...)
   860  }
   861  
   862  // Processes incoming BitTorrent wire-protocol messages. The client lock is held upon entry and
   863  // exit. Returning will end the connection.
   864  func (c *PeerConn) mainReadLoop() (err error) {
   865  	defer func() {
   866  		if err != nil {
   867  			torrent.Add("connection.mainReadLoop returned with error", 1)
   868  		} else {
   869  			torrent.Add("connection.mainReadLoop returned with no error", 1)
   870  		}
   871  	}()
   872  	t := c.t
   873  	cl := t.cl
   874  
   875  	decoder := pp.Decoder{
   876  		R:         bufio.NewReaderSize(c.r, 1<<17),
   877  		MaxLength: 4 * pp.Integer(max(int64(t.chunkSize), defaultChunkSize)),
   878  		Pool:      &t.chunkPool,
   879  	}
   880  	for {
   881  		var msg pp.Message
   882  		func() {
   883  			cl.unlock()
   884  			// TODO: Could TryLock and pump for more messages here until we can get the lock and
   885  			// process them in a batch.
   886  			defer cl.lock()
   887  			err = decoder.Decode(&msg)
   888  			if err != nil {
   889  				err = fmt.Errorf("decoding message: %w", err)
   890  			}
   891  		}()
   892  		// Do this before checking closed.
   893  		if cb := c.callbacks.ReadMessage; cb != nil && err == nil {
   894  			cb(c, &msg)
   895  		}
   896  		if t.closed.IsSet() || c.closed.IsSet() {
   897  			return nil
   898  		}
   899  		if err != nil {
   900  			return err
   901  		}
   902  		c.lastMessageReceived = time.Now()
   903  		if msg.Keepalive {
   904  			receivedKeepalives.Add(1)
   905  			continue
   906  		}
   907  		messageTypesReceived.Add(msg.Type.String(), 1)
   908  		if msg.Type.FastExtension() && !c.fastEnabled() {
   909  			runSafeExtraneous(func() { torrent.Add("fast messages received when extension is disabled", 1) })
   910  			return fmt.Errorf("received fast extension message (type=%v) but extension is disabled", msg.Type)
   911  		}
   912  		switch msg.Type {
   913  		case pp.Choke:
   914  			if c.peerChoking {
   915  				break
   916  			}
   917  			if !c.fastEnabled() {
   918  				c.deleteAllRequests("choked by non-fast PeerConn")
   919  			} else {
   920  				// We don't decrement pending requests here, let's wait for the peer to either
   921  				// reject or satisfy the outstanding requests. Additionally, some peers may unchoke
   922  				// us and resume where they left off, we don't want to have piled on to those chunks
   923  				// in the meanwhile. I think a peer's ability to abuse this should be limited: they
   924  				// could let us request a lot of stuff, then choke us and never reject, but they're
   925  				// only a single peer, our chunk balancing should smooth over this abuse.
   926  			}
   927  			c.peerChoking = true
   928  			c.updateExpectingChunks()
   929  		case pp.Unchoke:
   930  			if !c.peerChoking {
   931  				// Some clients do this for some reason. Transmission doesn't error on this, so we
   932  				// won't for consistency.
   933  				c.logProtocolBehaviour(log.Debug, "received unchoke when already unchoked")
   934  				break
   935  			}
   936  			c.peerChoking = false
   937  			preservedCount := 0
   938  			c.requestState.Requests.Iterate(func(x RequestIndex) bool {
   939  				if !c.peerAllowedFast.Contains(c.t.pieceIndexOfRequestIndex(x)) {
   940  					preservedCount++
   941  				}
   942  				return true
   943  			})
   944  			if preservedCount != 0 {
   945  				// TODO: Yes this is a debug log but I'm not happy with the state of the logging lib
   946  				// right now.
   947  				c.protocolLogger.Levelf(log.Debug,
   948  					"%v requests were preserved while being choked (fast=%v)",
   949  					preservedCount,
   950  					c.fastEnabled())
   951  
   952  				torrent.Add("requestsPreservedThroughChoking", int64(preservedCount))
   953  			}
   954  			if !c.t._pendingPieces.IsEmpty() {
   955  				c.onNeedUpdateRequests("unchoked")
   956  			}
   957  			c.updateExpectingChunks()
   958  		case pp.Interested:
   959  			c.peerInterested = true
   960  			c.tickleWriter()
   961  		case pp.NotInterested:
   962  			c.peerInterested = false
   963  			// We don't clear their requests since it isn't clear in the spec.
   964  			// We'll probably choke them for this, which will clear them if
   965  			// appropriate, and is clearly specified.
   966  		case pp.Have:
   967  			err = c.peerSentHave(pieceIndex(msg.Index))
   968  		case pp.Bitfield:
   969  			err = c.peerSentBitfield(msg.Bitfield)
   970  		case pp.Request:
   971  			r := newRequestFromMessage(&msg)
   972  			err = c.onReadRequest(r, true)
   973  			if err != nil {
   974  				err = fmt.Errorf("on reading request %v: %w", r, err)
   975  			}
   976  		case pp.Piece:
   977  			c.doChunkReadStats(int64(len(msg.Piece)))
   978  			err = c.receiveChunk(&msg)
   979  			t.putChunkBuffer(msg.Piece)
   980  			msg.Piece = nil
   981  			if err != nil {
   982  				err = fmt.Errorf("receiving chunk: %w", err)
   983  			}
   984  		case pp.Cancel:
   985  			req := newRequestFromMessage(&msg)
   986  			c.onPeerSentCancel(req)
   987  		case pp.Port:
   988  			ipa, ok := tryIpPortFromNetAddr(c.RemoteAddr)
   989  			if !ok {
   990  				break
   991  			}
   992  			pingAddr := net.UDPAddr{
   993  				IP:   ipa.IP,
   994  				Port: ipa.Port,
   995  			}
   996  			if msg.Port != 0 {
   997  				pingAddr.Port = int(msg.Port)
   998  			}
   999  			cl.eachDhtServer(func(s DhtServer) {
  1000  				go s.Ping(&pingAddr)
  1001  			})
  1002  		case pp.Suggest:
  1003  			torrent.Add("suggests received", 1)
  1004  			log.Fmsg("peer suggested piece %d", msg.Index).AddValues(c, msg.Index).LogLevel(log.Debug, c.t.logger)
  1005  			c.onNeedUpdateRequests("suggested")
  1006  		case pp.HaveAll:
  1007  			err = c.onPeerSentHaveAll()
  1008  		case pp.HaveNone:
  1009  			err = c.peerSentHaveNone()
  1010  		case pp.Reject:
  1011  			req := newRequestFromMessage(&msg)
  1012  			if !c.remoteRejectedRequest(c.t.requestIndexFromRequest(req)) {
  1013  				err = fmt.Errorf("received invalid reject for request %v", req)
  1014  				c.protocolLogger.Levelf(log.Debug, "%v", err)
  1015  			}
  1016  		case pp.AllowedFast:
  1017  			torrent.Add("allowed fasts received", 1)
  1018  			log.Fmsg("peer allowed fast: %d", msg.Index).AddValues(c).LogLevel(log.Debug, c.t.logger)
  1019  			c.onNeedUpdateRequests("PeerConn.mainReadLoop allowed fast")
  1020  		case pp.Extended:
  1021  			err = c.onReadExtendedMsg(msg.ExtendedID, msg.ExtendedPayload)
  1022  		case pp.Hashes:
  1023  			err = c.onReadHashes(&msg)
  1024  		case pp.HashRequest:
  1025  			err = c.onHashRequest(&msg)
  1026  		case pp.HashReject:
  1027  			c.protocolLogger.Levelf(log.Info, "received unimplemented BitTorrent v2 message: %v", msg.Type)
  1028  		default:
  1029  			err = fmt.Errorf("received unknown message type: %#v", msg.Type)
  1030  		}
  1031  		if err != nil {
  1032  			return err
  1033  		}
  1034  	}
  1035  }
  1036  
  1037  func (c *PeerConn) onReadExtendedMsg(id pp.ExtensionNumber, payload []byte) (err error) {
  1038  	defer func() {
  1039  		// TODO: Should we still do this?
  1040  		if err != nil {
  1041  			// These clients use their own extension IDs for outgoing message
  1042  			// types, which is incorrect.
  1043  			if bytes.HasPrefix(c.PeerID[:], []byte("-SD0100-")) || strings.HasPrefix(string(c.PeerID[:]), "-XL0012-") {
  1044  				err = nil
  1045  			}
  1046  		}
  1047  	}()
  1048  	t := c.t
  1049  	cl := t.cl
  1050  	{
  1051  		event := PeerConnReadExtensionMessageEvent{
  1052  			PeerConn:        c,
  1053  			ExtensionNumber: id,
  1054  			Payload:         payload,
  1055  		}
  1056  		for _, cb := range c.callbacks.PeerConnReadExtensionMessage {
  1057  			cb(event)
  1058  		}
  1059  	}
  1060  	if id == pp.HandshakeExtendedID {
  1061  		var d pp.ExtendedHandshakeMessage
  1062  		if err := bencode.Unmarshal(payload, &d); err != nil {
  1063  			c.protocolLogger.Printf("error parsing extended handshake message %q: %s", payload, err)
  1064  			return fmt.Errorf("unmarshalling extended handshake payload: %w", err)
  1065  		}
  1066  		// Trigger this callback after it's been processed. If you want to handle it yourself, you
  1067  		// should hook PeerConnReadExtensionMessage.
  1068  		if cb := c.callbacks.ReadExtendedHandshake; cb != nil {
  1069  			cb(c, &d)
  1070  		}
  1071  		if d.Reqq != 0 {
  1072  			c.PeerMaxRequests = d.Reqq
  1073  		}
  1074  		c.PeerClientName.Store(d.V)
  1075  		if c.PeerExtensionIDs == nil {
  1076  			c.PeerExtensionIDs = make(map[pp.ExtensionName]pp.ExtensionNumber, len(d.M))
  1077  		}
  1078  		c.PeerListenPort = d.Port
  1079  		c.PeerPrefersEncryption = d.Encryption
  1080  		for name, id := range d.M {
  1081  			if _, ok := c.PeerExtensionIDs[name]; !ok {
  1082  				peersSupportingExtension.Add(
  1083  					// expvar.Var.String must produce valid JSON. "ut_payme\xeet_address" was being
  1084  					// entered here which caused problems later when unmarshalling.
  1085  					strconv.Quote(string(name)),
  1086  					1)
  1087  			}
  1088  			c.PeerExtensionIDs[name] = id
  1089  		}
  1090  		if d.MetadataSize != 0 {
  1091  			if err = t.setMetadataSize(d.MetadataSize); err != nil {
  1092  				return fmt.Errorf("setting metadata size to %d: %w", d.MetadataSize, err)
  1093  			}
  1094  		}
  1095  		c.requestPendingMetadata()
  1096  		if !t.cl.config.DisablePEX {
  1097  			t.pex.Add(c) // we learnt enough now
  1098  			// This checks the extension is supported internally.
  1099  			c.pex.Init(c)
  1100  		}
  1101  		return nil
  1102  	}
  1103  	extensionName, builtin, err := c.LocalLtepProtocolMap.LookupId(id)
  1104  	if err != nil {
  1105  		return
  1106  	}
  1107  	if !builtin {
  1108  		// User should have taken care of this in PeerConnReadExtensionMessage callback.
  1109  		return nil
  1110  	}
  1111  	switch extensionName {
  1112  	case pp.ExtensionNameMetadata:
  1113  		err := cl.gotMetadataExtensionMsg(payload, t, c)
  1114  		if err != nil {
  1115  			return fmt.Errorf("handling metadata extension message: %w", err)
  1116  		}
  1117  		return nil
  1118  	case pp.ExtensionNamePex:
  1119  		if !c.pex.IsEnabled() {
  1120  			return nil // or hang-up maybe?
  1121  		}
  1122  		err = c.pex.Recv(payload)
  1123  		if err != nil {
  1124  			err = fmt.Errorf("receiving pex message: %w", err)
  1125  		}
  1126  		return
  1127  	case utHolepunch.ExtensionName:
  1128  		var msg utHolepunch.Msg
  1129  		err = msg.UnmarshalBinary(payload)
  1130  		if err != nil {
  1131  			err = fmt.Errorf("unmarshalling ut_holepunch message: %w", err)
  1132  			return
  1133  		}
  1134  		err = c.t.handleReceivedUtHolepunchMsg(msg, c)
  1135  		return
  1136  	default:
  1137  		panic(fmt.Sprintf("unhandled builtin extension protocol %q", extensionName))
  1138  	}
  1139  }
  1140  
  1141  // Set both the Reader and Writer for the connection from a single ReadWriter.
  1142  func (cn *PeerConn) setRW(rw io.ReadWriter) {
  1143  	cn.r = rw
  1144  	cn.w = rw
  1145  }
  1146  
  1147  // Returns the Reader and Writer as a combined ReadWriter.
  1148  func (cn *PeerConn) rw() io.ReadWriter {
  1149  	return struct {
  1150  		io.Reader
  1151  		io.Writer
  1152  	}{cn.r, cn.w}
  1153  }
  1154  
  1155  func (c *PeerConn) uploadAllowed() bool {
  1156  	if c.t.cl.config.NoUpload {
  1157  		return false
  1158  	}
  1159  	if c.t.dataUploadDisallowed {
  1160  		return false
  1161  	}
  1162  	if c.t.seeding() {
  1163  		return true
  1164  	}
  1165  	if !c.peerHasWantedPieces() {
  1166  		return false
  1167  	}
  1168  	// Don't upload more than 100 KiB more than we download.
  1169  	if c._stats.BytesWrittenData.Int64() >= c._stats.BytesReadData.Int64()+100<<10 {
  1170  		return false
  1171  	}
  1172  	return true
  1173  }
  1174  
  1175  func (c *PeerConn) setRetryUploadTimer(delay time.Duration) {
  1176  	if c.uploadTimer == nil {
  1177  		c.uploadTimer = time.AfterFunc(delay, c.tickleWriter)
  1178  	} else {
  1179  		c.uploadTimer.Reset(delay)
  1180  	}
  1181  }
  1182  
  1183  // Also handles choking and unchoking of the remote peer.
  1184  func (c *PeerConn) upload(msg func(pp.Message) bool) bool {
  1185  	// Breaking or completing this loop means we don't want to upload to the peer anymore, and we
  1186  	// choke them.
  1187  another:
  1188  	for c.uploadAllowed() {
  1189  		// We want to upload to the peer.
  1190  		if !c.unchoke(msg) {
  1191  			return false
  1192  		}
  1193  		for r := range c.readyPeerRequests {
  1194  			res := c.t.cl.config.UploadRateLimiter.ReserveN(time.Now(), int(r.Length))
  1195  			if !res.OK() {
  1196  				panic(fmt.Sprintf("upload rate limiter burst size < %d", r.Length))
  1197  			}
  1198  			delay := res.Delay()
  1199  			if delay > 0 {
  1200  				res.Cancel()
  1201  				c.setRetryUploadTimer(delay)
  1202  				// Hard to say what to return here.
  1203  				return true
  1204  			}
  1205  			more := c.sendChunk(r, msg)
  1206  			if !more {
  1207  				return false
  1208  			}
  1209  			goto another
  1210  		}
  1211  		return true
  1212  	}
  1213  	return c.choke(msg)
  1214  }
  1215  
  1216  func (cn *PeerConn) drop() {
  1217  	cn.t.dropConnection(cn)
  1218  }
  1219  
  1220  func (cn *PeerConn) providedBadData() {
  1221  	cn.t.cl.banPeerIP(cn.remoteIp())
  1222  }
  1223  
  1224  // This is called when something has changed that should wake the writer, such as putting stuff into
  1225  // the writeBuffer, or changing some state that the writer can act on.
  1226  func (c *PeerConn) tickleWriter() {
  1227  	c.messageWriter.writeCond.Broadcast()
  1228  }
  1229  
  1230  func (c *PeerConn) sendChunk(r Request, msg func(pp.Message) bool) (more bool) {
  1231  	b := MapMustGet(c.readyPeerRequests, r)
  1232  	panicif.NotEq(len(b), r.Length.Int())
  1233  	c.deleteReadyPeerRequest(r)
  1234  	c.lastChunkSent = time.Now()
  1235  	return msg(pp.Message{
  1236  		Type:  pp.Piece,
  1237  		Index: r.Index,
  1238  		Begin: r.Begin,
  1239  		Piece: b,
  1240  	})
  1241  }
  1242  
  1243  func (c *PeerConn) setTorrent(t *Torrent) {
  1244  	panicif.NotNil(c.t)
  1245  	c.t = t
  1246  	c.initClosedCtx()
  1247  	c.setPeerLoggers(t.logger, t.slogger())
  1248  	c.reconcileHandshakeStats()
  1249  }
  1250  
  1251  func (c *PeerConn) pexPeerFlags() pp.PexPeerFlags {
  1252  	f := pp.PexPeerFlags(0)
  1253  	if c.PeerPrefersEncryption {
  1254  		f |= pp.PexPrefersEncryption
  1255  	}
  1256  	if c.outgoing {
  1257  		f |= pp.PexOutgoingConn
  1258  	}
  1259  	if c.utp() {
  1260  		f |= pp.PexSupportsUtp
  1261  	}
  1262  	return f
  1263  }
  1264  
  1265  // This returns the address to use if we want to dial the peer again. It incorporates the peer's
  1266  // advertised listen port.
  1267  func (c *PeerConn) dialAddr() PeerRemoteAddr {
  1268  	if c.outgoing || c.PeerListenPort == 0 {
  1269  		return c.RemoteAddr
  1270  	}
  1271  	addrPort, err := addrPortFromPeerRemoteAddr(c.RemoteAddr)
  1272  	if err != nil {
  1273  		c.logger.Levelf(
  1274  			log.Warning,
  1275  			"error parsing %q for alternate dial port: %v",
  1276  			c.RemoteAddr,
  1277  			err,
  1278  		)
  1279  		return c.RemoteAddr
  1280  	}
  1281  	return netip.AddrPortFrom(addrPort.Addr(), uint16(c.PeerListenPort))
  1282  }
  1283  
  1284  func (c *PeerConn) pexEvent(t pexEventType) (_ pexEvent, err error) {
  1285  	f := c.pexPeerFlags()
  1286  	dialAddr := c.dialAddr()
  1287  	addr, err := addrPortFromPeerRemoteAddr(dialAddr)
  1288  	if err != nil || !addr.IsValid() {
  1289  		err = fmt.Errorf("parsing dial addr %q: %w", dialAddr, err)
  1290  		return
  1291  	}
  1292  	return pexEvent{t, addr, f, nil}, nil
  1293  }
  1294  
  1295  func (pc *PeerConn) String() string {
  1296  	return fmt.Sprintf(
  1297  		"%T %p [flags=%v id=%+q, exts=%v, v=%q]",
  1298  		pc,
  1299  		pc,
  1300  		pc.connectionFlags(),
  1301  		pc.PeerID,
  1302  		pc.PeerExtensionBytes,
  1303  		pc.PeerClientName.Load(),
  1304  	)
  1305  }
  1306  
  1307  // Returns the pieces the peer could have based on their claims. If we don't know how many pieces
  1308  // are in the torrent, it could be a very large range if the peer has sent HaveAll.
  1309  func (pc *PeerConn) PeerPieces() *roaring.Bitmap {
  1310  	pc.locker().RLock()
  1311  	defer pc.locker().RUnlock()
  1312  	return pc.newPeerPieces()
  1313  }
  1314  
  1315  func (pc *PeerConn) remoteIsTransmission() bool {
  1316  	return bytes.HasPrefix(pc.PeerID[:], []byte("-TR")) && pc.PeerID[7] == '-'
  1317  }
  1318  
  1319  func (pc *PeerConn) remoteDialAddrPort() (netip.AddrPort, error) {
  1320  	dialAddr := pc.dialAddr()
  1321  	return addrPortFromPeerRemoteAddr(dialAddr)
  1322  }
  1323  
  1324  func (pc *PeerConn) bitExtensionEnabled(bit pp.ExtensionBit) bool {
  1325  	return pc.t.cl.config.Extensions.GetBit(bit) && pc.PeerExtensionBytes.GetBit(bit)
  1326  }
  1327  
  1328  func (cn *PeerConn) peerPiecesChanged() {
  1329  	cn.t.maybeDropMutuallyCompletePeer(cn)
  1330  }
  1331  
  1332  // Returns whether the connection could be useful to us. We're seeding and
  1333  // they want data, we don't have metainfo and they can provide it, etc.
  1334  func (c *PeerConn) useful() bool {
  1335  	t := c.t
  1336  	if c.closed.IsSet() {
  1337  		return false
  1338  	}
  1339  	if !t.haveInfo() {
  1340  		return c.supportsExtension("ut_metadata")
  1341  	}
  1342  	if t.seeding() && c.peerInterested {
  1343  		return true
  1344  	}
  1345  	if c.peerHasWantedPieces() {
  1346  		return true
  1347  	}
  1348  	return false
  1349  }
  1350  
  1351  func makeBuiltinLtepProtocols(pex bool) LocalLtepProtocolMap {
  1352  	ps := []pp.ExtensionName{pp.ExtensionNameMetadata, utHolepunch.ExtensionName}
  1353  	if pex {
  1354  		ps = append(ps, pp.ExtensionNamePex)
  1355  	}
  1356  	return LocalLtepProtocolMap{
  1357  		Index:      ps,
  1358  		NumBuiltin: len(ps),
  1359  	}
  1360  }
  1361  
  1362  func (c *PeerConn) addBuiltinLtepProtocols(pex bool) {
  1363  	c.LocalLtepProtocolMap = &c.t.cl.defaultLocalLtepProtocolMap
  1364  }
  1365  
  1366  func (pc *PeerConn) WriteExtendedMessage(extName pp.ExtensionName, payload []byte) error {
  1367  	pc.locker().Lock()
  1368  	defer pc.locker().Unlock()
  1369  	id := pc.PeerExtensionIDs[extName]
  1370  	if id == 0 {
  1371  		return fmt.Errorf("peer does not support or has disabled extension %q", extName)
  1372  	}
  1373  	pc.write(pp.Message{
  1374  		Type:            pp.Extended,
  1375  		ExtendedID:      id,
  1376  		ExtendedPayload: payload,
  1377  	})
  1378  	return nil
  1379  }
  1380  
  1381  func (pc *PeerConn) shouldRequestHashes() bool {
  1382  	return pc.t.haveInfo() && pc.v2 && pc.t.info.HasV2()
  1383  }
  1384  
  1385  func (pc *PeerConn) requestMissingHashes() {
  1386  	if !pc.shouldRequestHashes() {
  1387  		return
  1388  	}
  1389  	info := pc.t.info
  1390  	baseLayer := pp.Integer(merkle.Log2RoundingUp(merkle.RoundUpToPowerOfTwo(
  1391  		uint((pc.t.usualPieceSize() + merkle.BlockSize - 1) / merkle.BlockSize)),
  1392  	))
  1393  	nextFileBeginPiece := 0
  1394  file:
  1395  	for _, file := range info.UpvertedFiles() {
  1396  		fileNumPieces := int((file.Length + info.PieceLength - 1) / info.PieceLength)
  1397  		// We would be requesting the leaves, the file must be short enough that we can just do with
  1398  		// the pieces root as the piece hash.
  1399  		if fileNumPieces <= 1 {
  1400  			continue
  1401  		}
  1402  		curFileBeginPiece := nextFileBeginPiece
  1403  		nextFileBeginPiece += fileNumPieces
  1404  		haveAllHashes := true
  1405  		for i := range fileNumPieces {
  1406  			torrentPieceIndex := curFileBeginPiece + i
  1407  			if !pc.peerHasPiece(torrentPieceIndex) {
  1408  				continue file
  1409  			}
  1410  			if !pc.t.piece(torrentPieceIndex).hashV2.Ok {
  1411  				haveAllHashes = false
  1412  			}
  1413  		}
  1414  		if haveAllHashes {
  1415  			continue
  1416  		}
  1417  		piecesRoot := file.PiecesRoot.Unwrap()
  1418  		proofLayers := pp.Integer(0)
  1419  		for index := 0; index < fileNumPieces; index += 512 {
  1420  			// Minimizing to the number of pieces in a file conflicts with the BEP.
  1421  			length := merkle.RoundUpToPowerOfTwo(uint(min(512, fileNumPieces-index)))
  1422  			if length < 2 {
  1423  				// This should have been filtered out by baseLayer and pieces root as piece hash
  1424  				// checks.
  1425  				panic(length)
  1426  			}
  1427  			if length%2 != 0 {
  1428  				pc.protocolLogger.Levelf(log.Warning, "requesting odd hashes length %d", length)
  1429  			}
  1430  			msg := pp.Message{
  1431  				Type:        pp.HashRequest,
  1432  				PiecesRoot:  piecesRoot,
  1433  				BaseLayer:   baseLayer,
  1434  				Index:       pp.Integer(index),
  1435  				Length:      pp.Integer(length),
  1436  				ProofLayers: proofLayers,
  1437  			}
  1438  			hr := hashRequestFromMessage(msg)
  1439  			if generics.MapContains(pc.sentHashRequests, hr) {
  1440  				continue
  1441  			}
  1442  			pc.write(msg)
  1443  			generics.MakeMapIfNil(&pc.sentHashRequests)
  1444  			pc.sentHashRequests[hr] = struct{}{}
  1445  		}
  1446  	}
  1447  }
  1448  
  1449  func (pc *PeerConn) onReadHashes(msg *pp.Message) (err error) {
  1450  	file := pc.t.getFileByPiecesRoot(msg.PiecesRoot)
  1451  	filePieceHashes := pc.receivedHashPieces[msg.PiecesRoot]
  1452  	if filePieceHashes == nil {
  1453  		filePieceHashes = make([][32]byte, file.numPieces())
  1454  		generics.MakeMapIfNil(&pc.receivedHashPieces)
  1455  		pc.receivedHashPieces[msg.PiecesRoot] = filePieceHashes
  1456  	}
  1457  	if msg.ProofLayers != 0 {
  1458  		// This isn't handled yet.
  1459  		panic(msg.ProofLayers)
  1460  	}
  1461  	copy(filePieceHashes[msg.Index:], msg.Hashes)
  1462  	root := merkle.RootWithPadHash(
  1463  		filePieceHashes,
  1464  		metainfo.HashForPiecePad(int64(pc.t.usualPieceSize())))
  1465  	expectedPiecesRoot := file.piecesRoot.Unwrap()
  1466  	if root == expectedPiecesRoot {
  1467  		pc.protocolLogger.WithNames(v2HashesLogName).Levelf(
  1468  			log.Info,
  1469  			"got piece hashes for file %v (num pieces %v)",
  1470  			file, file.numPieces())
  1471  		for filePieceIndex, peerHash := range filePieceHashes {
  1472  			torrentPieceIndex := file.BeginPieceIndex() + filePieceIndex
  1473  			pc.t.piece(torrentPieceIndex).setV2Hash(peerHash)
  1474  		}
  1475  	} else {
  1476  		pc.protocolLogger.WithNames(v2HashesLogName).Levelf(
  1477  			log.Debug,
  1478  			"peer file piece hashes root mismatch: %x != %x",
  1479  			root, expectedPiecesRoot)
  1480  	}
  1481  	return nil
  1482  }
  1483  
  1484  func (pc *PeerConn) getHashes(msg *pp.Message) ([][32]byte, error) {
  1485  	if msg.ProofLayers != 0 {
  1486  		return nil, errors.New("proof layers not supported")
  1487  	}
  1488  	if msg.Length > 8192 {
  1489  		return nil, fmt.Errorf("requested too many hashes: %d", msg.Length)
  1490  	}
  1491  	file := pc.t.getFileByPiecesRoot(msg.PiecesRoot)
  1492  	if file == nil {
  1493  		return nil, fmt.Errorf("no file for pieces root %x", msg.PiecesRoot)
  1494  	}
  1495  	beginPieceIndex := file.BeginPieceIndex()
  1496  	endPieceIndex := file.EndPieceIndex()
  1497  	length := merkle.RoundUpToPowerOfTwo(uint(endPieceIndex - beginPieceIndex))
  1498  	if uint(msg.Index+msg.Length) > length {
  1499  		return nil, errors.New("invalid hash range")
  1500  	}
  1501  
  1502  	hashes := make([][32]byte, msg.Length)
  1503  	padHash := metainfo.HashForPiecePad(int64(pc.t.usualPieceSize()))
  1504  	for i := range hashes {
  1505  		torrentPieceIndex := beginPieceIndex + int(msg.Index) + i
  1506  		if torrentPieceIndex >= endPieceIndex {
  1507  			hashes[i] = padHash
  1508  			continue
  1509  		}
  1510  		piece := pc.t.piece(torrentPieceIndex)
  1511  		hash, err := piece.obtainHashV2()
  1512  		if err != nil {
  1513  			return nil, fmt.Errorf("can't get hash for piece %d: %w", torrentPieceIndex, err)
  1514  		}
  1515  		hashes[i] = hash
  1516  	}
  1517  	return hashes, nil
  1518  }
  1519  
  1520  func (pc *PeerConn) onHashRequest(msg *pp.Message) error {
  1521  	if !pc.t.info.HasV2() {
  1522  		return errors.New("torrent has no v2 metadata")
  1523  	}
  1524  
  1525  	resp := pp.Message{
  1526  		PiecesRoot:  msg.PiecesRoot,
  1527  		BaseLayer:   msg.BaseLayer,
  1528  		Index:       msg.Index,
  1529  		Length:      msg.Length,
  1530  		ProofLayers: msg.ProofLayers,
  1531  	}
  1532  
  1533  	hashes, err := pc.getHashes(msg)
  1534  	if err != nil {
  1535  		pc.protocolLogger.WithNames(v2HashesLogName).Levelf(log.Debug, "error getting hashes: %v", err)
  1536  		resp.Type = pp.HashReject
  1537  		pc.write(resp)
  1538  		return nil
  1539  	}
  1540  
  1541  	resp.Type = pp.Hashes
  1542  	resp.Hashes = hashes
  1543  	pc.write(resp)
  1544  	return nil
  1545  }
  1546  
  1547  type hashRequest struct {
  1548  	piecesRoot                            [32]byte
  1549  	baseLayer, index, length, proofLayers pp.Integer
  1550  }
  1551  
  1552  func (hr hashRequest) toMessage() pp.Message {
  1553  	return pp.Message{
  1554  		Type:        pp.HashRequest,
  1555  		PiecesRoot:  hr.piecesRoot,
  1556  		BaseLayer:   hr.baseLayer,
  1557  		Index:       hr.index,
  1558  		Length:      hr.length,
  1559  		ProofLayers: hr.proofLayers,
  1560  	}
  1561  }
  1562  
  1563  func hashRequestFromMessage(m pp.Message) hashRequest {
  1564  	return hashRequest{
  1565  		piecesRoot:  m.PiecesRoot,
  1566  		baseLayer:   m.BaseLayer,
  1567  		index:       m.Index,
  1568  		length:      m.Length,
  1569  		proofLayers: m.ProofLayers,
  1570  	}
  1571  }
  1572  
  1573  func (me *PeerConn) peerPtr() *Peer {
  1574  	return &me.Peer
  1575  }
  1576  
  1577  // The actual value to use as the maximum outbound requests.
  1578  func (cn *PeerConn) nominalMaxRequests() maxRequests {
  1579  	return max(1, min(cn.PeerMaxRequests, cn.peakRequests*2, maxLocalToRemoteRequests))
  1580  }
  1581  
  1582  // Set the Peer loggers. This is given Client loggers, and later Torrent loggers when the Torrent is
  1583  // set.
  1584  func (me *PeerConn) setPeerLoggers(a log.Logger, s *slog.Logger) {
  1585  	me.Peer.logger = a.WithDefaultLevel(log.Warning).WithContextText(fmt.Sprintf("%T %p", me, me))
  1586  	me.Peer.slogger = s.With(fmt.Sprintf("%T", me), fmt.Sprintf("%p", me))
  1587  	me.protocolLogger = me.logger.WithNames(protocolLoggingName)
  1588  }
  1589  
  1590  // Methods moved from peer.go (in their original order):
  1591  
  1592  func (p *PeerConn) initRequestState() {
  1593  	p.requestState.Requests = &peerRequests{}
  1594  }
  1595  
  1596  func (cn *PeerConn) expectingChunks() bool {
  1597  	if cn.requestState.Requests.IsEmpty() {
  1598  		return false
  1599  	}
  1600  	if !cn.requestState.Interested {
  1601  		return false
  1602  	}
  1603  	if !cn.peerChoking {
  1604  		return true
  1605  	}
  1606  	haveAllowedFastRequests := false
  1607  	cn.peerAllowedFast.Iterate(func(i pieceIndex) bool {
  1608  		haveAllowedFastRequests = roaringBitmapRangeCardinality[RequestIndex](
  1609  			cn.requestState.Requests,
  1610  			cn.t.pieceRequestIndexBegin(i),
  1611  			cn.t.pieceRequestIndexBegin(i+1),
  1612  		) == 0
  1613  		return !haveAllowedFastRequests
  1614  	})
  1615  	return haveAllowedFastRequests
  1616  }
  1617  
  1618  func (cn *PeerConn) cumInterest() time.Duration {
  1619  	ret := cn.priorInterest
  1620  	if cn.requestState.Interested {
  1621  		ret += time.Since(cn.lastBecameInterested)
  1622  	}
  1623  	return ret
  1624  }
  1625  
  1626  func (cn *PeerConn) supportsExtension(ext pp.ExtensionName) bool {
  1627  	_, ok := cn.PeerExtensionIDs[ext]
  1628  	return ok
  1629  }
  1630  
  1631  // Inspired by https://github.com/transmission/transmission/wiki/Peer-Status-Text.
  1632  func (cn *PeerConn) statusFlags() (ret string) {
  1633  	c := func(b byte) {
  1634  		ret += string([]byte{b})
  1635  	}
  1636  	if cn.requestState.Interested {
  1637  		c('i')
  1638  	}
  1639  	if cn.choking {
  1640  		c('c')
  1641  	}
  1642  	c(':')
  1643  	ret += cn.connectionFlags()
  1644  	c(':')
  1645  	if cn.peerInterested {
  1646  		c('i')
  1647  	}
  1648  	if cn.peerChoking {
  1649  		c('c')
  1650  	}
  1651  	return
  1652  }
  1653  
  1654  func (cn *PeerConn) iterContiguousPieceRequests(f func(piece pieceIndex, count int)) {
  1655  	var last Option[pieceIndex]
  1656  	var count int
  1657  	next := func(item Option[pieceIndex]) {
  1658  		if item == last {
  1659  			count++
  1660  		} else {
  1661  			if count != 0 {
  1662  				f(last.Value, count)
  1663  			}
  1664  			last = item
  1665  			count = 1
  1666  		}
  1667  	}
  1668  	cn.requestState.Requests.Iterate(func(requestIndex requestStrategy.RequestIndex) bool {
  1669  		next(Some(cn.t.pieceIndexOfRequestIndex(requestIndex)))
  1670  		return true
  1671  	})
  1672  	next(None[pieceIndex]())
  1673  }
  1674  
  1675  func (cn *PeerConn) peerImplWriteStatus(w io.Writer) {
  1676  	prio, err := cn.peerPriority()
  1677  	prioStr := fmt.Sprintf("%08x", prio)
  1678  	if err != nil {
  1679  		prioStr += ": " + err.Error()
  1680  	}
  1681  	fmt.Fprintf(w, "bep40-prio: %v\n", prioStr)
  1682  	fmt.Fprintf(w, "last msg: %s, connected: %s, last helpful: %s, itime: %s, etime: %s\n",
  1683  		eventAgeString(cn.lastMessageReceived),
  1684  		eventAgeString(cn.completedHandshake),
  1685  		eventAgeString(cn.lastHelpful()),
  1686  		cn.cumInterest(),
  1687  		cn.totalExpectingTime(),
  1688  	)
  1689  	fmt.Fprintf(w,
  1690  		"%s completed, chunks uploaded: %v\n",
  1691  		cn.completedString(),
  1692  		&cn._stats.ChunksWritten,
  1693  	)
  1694  	fmt.Fprintf(w, "requested pieces:")
  1695  	cn.iterContiguousPieceRequests(func(piece pieceIndex, count int) {
  1696  		fmt.Fprintf(w, " %v(%v)", piece, count)
  1697  	})
  1698  }
  1699  
  1700  func (cn *PeerConn) setInterested(interested bool) bool {
  1701  	if cn.requestState.Interested == interested {
  1702  		return true
  1703  	}
  1704  	cn.requestState.Interested = interested
  1705  	if interested {
  1706  		cn.lastBecameInterested = time.Now()
  1707  	} else if !cn.lastBecameInterested.IsZero() {
  1708  		cn.priorInterest += time.Since(cn.lastBecameInterested)
  1709  	}
  1710  	cn.updateExpectingChunks()
  1711  	return cn.writeInterested(interested)
  1712  }
  1713  
  1714  // This function seems to only used by Peer.request. It's all logic checks, so maybe we can no-op it
  1715  // when we want to go fast.
  1716  func (cn *PeerConn) shouldRequest(r RequestIndex) error {
  1717  	err := cn.t.checkValidReceiveChunk(cn.t.requestIndexToRequest(r))
  1718  	if err != nil {
  1719  		return err
  1720  	}
  1721  	pi := cn.t.pieceIndexOfRequestIndex(r)
  1722  	if cn.requestState.Cancelled.Contains(r) {
  1723  		return errors.New("request is cancelled and waiting acknowledgement")
  1724  	}
  1725  	if !cn.peerHasPiece(pi) {
  1726  		return errors.New("requesting piece peer doesn't have")
  1727  	}
  1728  	if !cn.t.peerIsActive(cn.peerPtr()) {
  1729  		panic("requesting but not in active conns")
  1730  	}
  1731  	if cn.closed.IsSet() {
  1732  		panic("requesting when connection is closed")
  1733  	}
  1734  	if cn.t.hashingPiece(pi) {
  1735  		panic("piece is being hashed")
  1736  	}
  1737  	p := cn.t.piece(pi)
  1738  	if p.marking {
  1739  		panic("piece is being marked")
  1740  	}
  1741  	if cn.t.pieceQueuedForHash(pi) {
  1742  		panic("piece is queued for hash")
  1743  	}
  1744  	if cn.peerChoking && !cn.peerAllowedFast.Contains(pi) {
  1745  		// This could occur if we made a request with the fast extension, and then got choked and
  1746  		// haven't had the request rejected yet.
  1747  		if !cn.requestState.Requests.Contains(r) {
  1748  			panic("peer choking and piece not allowed fast")
  1749  		}
  1750  	}
  1751  	return nil
  1752  }
  1753  
  1754  func (cn *PeerConn) mustRequest(r RequestIndex) bool {
  1755  	more, err := cn.request(r)
  1756  	if err != nil {
  1757  		panic(err)
  1758  	}
  1759  	return more
  1760  }
  1761  
  1762  func (cn *PeerConn) request(r RequestIndex) (more bool, err error) {
  1763  	if err := cn.shouldRequest(r); err != nil {
  1764  		panic(err)
  1765  	}
  1766  	if cn.requestState.Requests.Contains(r) {
  1767  		return true, nil
  1768  	}
  1769  	if maxRequests(cn.requestState.Requests.GetCardinality()) >= cn.nominalMaxRequests() {
  1770  		return true, errors.New("too many outstanding requests")
  1771  	}
  1772  	cn.requestState.Requests.Add(r)
  1773  	if cn.validReceiveChunks == nil {
  1774  		cn.validReceiveChunks = make(map[RequestIndex]int)
  1775  	}
  1776  	cn.validReceiveChunks[r]++
  1777  	cn.t.requestState[r] = requestState{
  1778  		peer: weak.Make(cn),
  1779  		when: time.Now(),
  1780  	}
  1781  	cn.updateExpectingChunks()
  1782  	ppReq := cn.t.requestIndexToRequest(r)
  1783  	for _, f := range cn.callbacks.SentRequest {
  1784  		f(PeerRequestEvent{cn.peerPtr(), ppReq})
  1785  	}
  1786  	return cn._request(ppReq), nil
  1787  }
  1788  
  1789  func (me *PeerConn) cancel(r RequestIndex) {
  1790  	if !me.deleteRequest(r) {
  1791  		panic("request not existing should have been guarded")
  1792  	}
  1793  	me.handleCancel(r)
  1794  	me.decPeakRequests()
  1795  	if me.isLowOnRequests() {
  1796  		me.onNeedUpdateRequests(peerUpdateRequestsPeerCancelReason)
  1797  	}
  1798  }
  1799  
  1800  // Sets a reason to update requests, and if there wasn't already one, handle it.
  1801  func (cn *PeerConn) onNeedUpdateRequests(reason updateRequestReason) {
  1802  	if cn.needRequestUpdate != "" {
  1803  		return
  1804  	}
  1805  	cn.needRequestUpdate = reason
  1806  	// Run this before the Client lock is released.
  1807  	cn.locker().DeferUniqueUnaryFunc(cn, cn.handleOnNeedUpdateRequests)
  1808  }
  1809  
  1810  // Returns true if it was valid to reject the request.
  1811  func (c *PeerConn) remoteRejectedRequest(r RequestIndex) bool {
  1812  	if c.deleteRequest(r) {
  1813  		c.decPeakRequests()
  1814  	} else if !c.requestState.Cancelled.CheckedRemove(r) {
  1815  		// The request was already cancelled.
  1816  		return false
  1817  	}
  1818  	if c.isLowOnRequests() {
  1819  		c.onNeedUpdateRequests(peerUpdateRequestsRemoteRejectReason)
  1820  	}
  1821  	c.decExpectedChunkReceive(r)
  1822  	return true
  1823  }
  1824  
  1825  func (c *PeerConn) decExpectedChunkReceive(r RequestIndex) {
  1826  	count := c.validReceiveChunks[r]
  1827  	if count == 1 {
  1828  		delete(c.validReceiveChunks, r)
  1829  	} else if count > 1 {
  1830  		c.validReceiveChunks[r] = count - 1
  1831  	} else {
  1832  		panic(r)
  1833  	}
  1834  }
  1835  
  1836  // Returns true if an outstanding request is removed. Cancelled requests should be handled
  1837  // separately.
  1838  func (c *PeerConn) deleteRequest(r RequestIndex) bool {
  1839  	if !c.requestState.Requests.CheckedRemove(r) {
  1840  		return false
  1841  	}
  1842  	for _, f := range c.callbacks.DeletedRequest {
  1843  		f(PeerRequestEvent{c.peerPtr(), c.t.requestIndexToRequest(r)})
  1844  	}
  1845  	c.updateExpectingChunks()
  1846  	// TODO: Can't this happen if a request is stolen?
  1847  	if c.t.requestingPeer(r) != c {
  1848  		panic("only one peer should have a given request at a time")
  1849  	}
  1850  	delete(c.t.requestState, r)
  1851  	// c.t.iterPeers(func(p *Peer) {
  1852  	// 	if p.isLowOnRequests() {
  1853  	// 		p.onNeedUpdateRequests("Peer.deleteRequest")
  1854  	// 	}
  1855  	// })
  1856  	return true
  1857  }
  1858  
  1859  func (c *PeerConn) deleteAllRequests(reason updateRequestReason) {
  1860  	if c.requestState.Requests.IsEmpty() {
  1861  		return
  1862  	}
  1863  	c.requestState.Requests.IterateSnapshot(func(x RequestIndex) bool {
  1864  		if !c.deleteRequest(x) {
  1865  			panic("request should exist")
  1866  		}
  1867  		return true
  1868  	})
  1869  	c.assertNoRequests()
  1870  	c.t.iterPeers(func(p *Peer) {
  1871  		if p.isLowOnRequests() {
  1872  			p.onNeedUpdateRequests(reason)
  1873  		}
  1874  	})
  1875  }
  1876  
  1877  func (c *PeerConn) assertNoRequests() {
  1878  	if !c.requestState.Requests.IsEmpty() {
  1879  		panic(c.requestState.Requests.GetCardinality())
  1880  	}
  1881  }
  1882  
  1883  func (c *PeerConn) cancelAllRequests() {
  1884  	c.requestState.Requests.IterateSnapshot(func(x RequestIndex) bool {
  1885  		c.cancel(x)
  1886  		return true
  1887  	})
  1888  	c.assertNoRequests()
  1889  }
  1890  
  1891  func (p *PeerConn) uncancelledRequests() uint64 {
  1892  	return p.requestState.Requests.GetCardinality()
  1893  }
  1894  
  1895  func (p *PeerConn) isLowOnRequests() bool {
  1896  	return p.requestState.Requests.IsEmpty() && p.requestState.Cancelled.IsEmpty()
  1897  }
  1898  
  1899  func (c *PeerConn) checkReceivedChunk(req RequestIndex, msg *pp.Message, ppReq Request) (intended bool, err error) {
  1900  	if c.validReceiveChunks[req] <= 0 {
  1901  		ChunksReceived.Add("unexpected", 1)
  1902  		err = errors.New("received unexpected chunk")
  1903  		return
  1904  	}
  1905  	c.decExpectedChunkReceive(req)
  1906  
  1907  	if c.peerChoking && c.peerAllowedFast.Contains(pieceIndex(ppReq.Index)) {
  1908  		ChunksReceived.Add("due to allowed fast", 1)
  1909  	}
  1910  	// The request needs to be deleted immediately to prevent cancels occurring asynchronously when
  1911  	// have actually already received the piece, while we have the Client unlocked to write the data
  1912  	// out.
  1913  	{
  1914  		if c.requestState.Requests.Contains(req) {
  1915  			for _, f := range c.callbacks.ReceivedRequested {
  1916  				f(PeerMessageEvent{c.peerPtr(), msg})
  1917  			}
  1918  		}
  1919  		// Request has been satisfied.
  1920  		if c.deleteRequest(req) || c.requestState.Cancelled.CheckedRemove(req) {
  1921  			intended = true
  1922  			if c.isLowOnRequests() {
  1923  				c.onNeedUpdateRequests("Peer.receiveChunk deleted request")
  1924  			}
  1925  		} else {
  1926  			ChunksReceived.Add("unintended", 1)
  1927  		}
  1928  	}
  1929  
  1930  	return
  1931  }
  1932  
  1933  // Reconcile bytes transferred before connection was associated with a torrent.
  1934  func (c *PeerConn) reconcileHandshakeStats() {
  1935  	panicif.True(c.reconciledHandshakeStats)
  1936  	if c._stats != (ConnStats{
  1937  		// Handshakes should only increment these fields:
  1938  		BytesWritten: c._stats.BytesWritten,
  1939  		BytesRead:    c._stats.BytesRead,
  1940  	}) {
  1941  		panic("bad stats")
  1942  	}
  1943  	// Add the stat data so far to relevant Torrent stats that were skipped before the handshake
  1944  	// completed.
  1945  	c.relevantConnStats(&c.t.connStats)(func(cs *ConnStats) bool {
  1946  		cs.BytesRead.Add(c._stats.BytesRead.Int64())
  1947  		cs.BytesWritten.Add(c._stats.BytesWritten.Int64())
  1948  		return true
  1949  	})
  1950  	c.reconciledHandshakeStats = true
  1951  }