github.com/anacrolix/torrent@v1.61.0/peer.go (about)

     1  package torrent
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io"
     7  	"iter"
     8  	"log/slog"
     9  	"net"
    10  	"strings"
    11  	"sync"
    12  	"time"
    13  
    14  	"github.com/RoaringBitmap/roaring"
    15  	"github.com/anacrolix/chansync"
    16  	. "github.com/anacrolix/generics"
    17  	"github.com/anacrolix/log"
    18  	"github.com/anacrolix/missinggo/v2/bitmap"
    19  	"github.com/anacrolix/missinggo/v2/panicif"
    20  	"github.com/anacrolix/multiless"
    21  
    22  	"github.com/anacrolix/torrent/mse"
    23  	pp "github.com/anacrolix/torrent/peer_protocol"
    24  	typedRoaring "github.com/anacrolix/torrent/typed-roaring"
    25  )
    26  
    27  type (
    28  	// Generic Peer-like fields. Could be WebSeed, BitTorrent over TCP, uTP or WebRTC.
    29  	Peer struct {
    30  		// First to ensure 64-bit alignment for atomics. See #262.
    31  		_stats ConnStats
    32  
    33  		cl *Client
    34  		t  *Torrent
    35  
    36  		legacyPeerImpl
    37  		peerImpl  newHotPeerImpl
    38  		callbacks *Callbacks
    39  
    40  		RemoteAddr              PeerRemoteAddr
    41  		Discovery               PeerSource
    42  		trusted                 bool
    43  		closed                  chansync.SetOnce
    44  		closedCtx               context.Context
    45  		closedCtxCancel         context.CancelFunc
    46  		lastUsefulChunkReceived time.Time
    47  
    48  		lastStartedExpectingToReceiveChunks time.Time
    49  		cumulativeExpectedToReceiveChunks   time.Duration
    50  		// Pieces we've accepted chunks for from the peer.
    51  		peerTouchedPieces map[pieceIndex]struct{}
    52  
    53  		logger  log.Logger
    54  		slogger *slog.Logger
    55  
    56  		// Belongs in PeerConn:
    57  
    58  		outgoing bool
    59  		Network  string
    60  		// The local address as observed by the remote peer. WebRTC seems to get this right without needing hints from the
    61  		// config.
    62  		localPublicAddr peerLocalPublicAddr
    63  		bannableAddr    Option[bannableAddr]
    64  		// True if the connection is operating over MSE obfuscation.
    65  		headerEncrypted bool
    66  		cryptoMethod    mse.CryptoMethod
    67  
    68  		lastMessageReceived time.Time
    69  		completedHandshake  time.Time
    70  		lastChunkSent       time.Time
    71  
    72  		// Stuff controlled by the local peer.
    73  		needRequestUpdate    updateRequestReason
    74  		updateRequestsTimer  *time.Timer
    75  		lastRequestUpdate    time.Time
    76  		peakRequests         maxRequests
    77  		lastBecameInterested time.Time
    78  		priorInterest        time.Duration
    79  
    80  		choking bool
    81  
    82  		// Stuff controlled by the remote peer.
    83  		peerInterested        bool
    84  		peerChoking           bool
    85  		PeerPrefersEncryption bool // as indicated by 'e' field in extension handshake
    86  		// The highest possible number of pieces the torrent could have based on
    87  		// communication with the peer. Generally only useful until we have the
    88  		// torrent info.
    89  		peerMinPieces pieceIndex
    90  
    91  		peerAllowedFast typedRoaring.Bitmap[pieceIndex]
    92  	}
    93  
    94  	PeerSource string
    95  
    96  	PeerRemoteAddr interface {
    97  		String() string
    98  	}
    99  
   100  	peerRequests = orderedBitmap[RequestIndex]
   101  
   102  	updateRequestReason string
   103  )
   104  
   105  const (
   106  	PeerSourceUtHolepunch     = "C"
   107  	PeerSourceTracker         = "Tr"
   108  	PeerSourceIncoming        = "I"
   109  	PeerSourceDhtGetPeers     = "Hg" // Peers we found by searching a DHT.
   110  	PeerSourceDhtAnnouncePeer = "Ha" // Peers that were announced to us by a DHT.
   111  	PeerSourcePex             = "X"
   112  	// The peer was given directly, such as through a magnet link.
   113  	PeerSourceDirect = "M"
   114  )
   115  
   116  // These are grouped because we might vary update request behaviour depending on the reason. I'm not
   117  // sure about the fact that multiple reasons can be triggered before an update runs, and only the
   118  // first will count. Possibly we should be signalling what behaviours are appropriate in the next
   119  // update instead.
   120  const (
   121  	peerUpdateRequestsPeerCancelReason   updateRequestReason = "Peer.cancel"
   122  	peerUpdateRequestsRemoteRejectReason updateRequestReason = "Peer.remoteRejectedRequest"
   123  )
   124  
   125  // Returns the Torrent a Peer belongs to. Shouldn't change for the lifetime of the Peer. May be nil
   126  // if we are the receiving end of a connection and the handshake hasn't been received or accepted
   127  // yet.
   128  func (p *Peer) Torrent() *Torrent {
   129  	return p.t
   130  }
   131  
   132  func (p *Peer) Stats() (ret PeerStats) {
   133  	p.locker().RLock()
   134  	defer p.locker().RUnlock()
   135  	ret.ConnStats = p._stats.Copy()
   136  	ret.DownloadRate = p.downloadRate()
   137  	ret.LastWriteUploadRate = p.peerImpl.lastWriteUploadRate()
   138  	ret.RemotePieceCount = p.remotePieceCount()
   139  	return
   140  }
   141  
   142  func (cn *Peer) updateExpectingChunks() {
   143  	if cn.peerImpl.expectingChunks() {
   144  		if cn.lastStartedExpectingToReceiveChunks.IsZero() {
   145  			cn.lastStartedExpectingToReceiveChunks = time.Now()
   146  		}
   147  	} else {
   148  		if !cn.lastStartedExpectingToReceiveChunks.IsZero() {
   149  			cn.cumulativeExpectedToReceiveChunks += time.Since(cn.lastStartedExpectingToReceiveChunks)
   150  			cn.lastStartedExpectingToReceiveChunks = time.Time{}
   151  		}
   152  	}
   153  }
   154  
   155  func (cn *Peer) locker() *lockWithDeferreds {
   156  	return cn.t.cl.locker()
   157  }
   158  
   159  // The best guess at number of pieces in the torrent for this peer.
   160  func (cn *Peer) bestPeerNumPieces() pieceIndex {
   161  	if cn.t.haveInfo() {
   162  		return cn.t.numPieces()
   163  	}
   164  	return cn.peerMinPieces
   165  }
   166  
   167  // How many pieces we think the peer has.
   168  func (cn *Peer) remotePieceCount() pieceIndex {
   169  	have := pieceIndex(cn.peerPieces().GetCardinality())
   170  	if all, _ := cn.peerHasAllPieces(); all {
   171  		have = cn.bestPeerNumPieces()
   172  	}
   173  	return have
   174  }
   175  
   176  func (cn *Peer) completedString() string {
   177  	return fmt.Sprintf("%d/%d", cn.remotePieceCount(), cn.bestPeerNumPieces())
   178  }
   179  
   180  func eventAgeString(t time.Time) string {
   181  	if t.IsZero() {
   182  		return "never"
   183  	}
   184  	return fmt.Sprintf("%.2fs ago", time.Since(t).Seconds())
   185  }
   186  
   187  func (cn *Peer) downloadRate() float64 {
   188  	num := cn._stats.BytesReadUsefulData.Int64()
   189  	if num == 0 {
   190  		return 0
   191  	}
   192  	return float64(num) / cn.totalExpectingTime().Seconds()
   193  }
   194  
   195  // Deprecated: Use Peer.Stats.
   196  func (p *Peer) DownloadRate() float64 {
   197  	return p.Stats().DownloadRate
   198  }
   199  
   200  func (cn *Peer) writeStatus(w io.Writer) {
   201  	// \t isn't preserved in <pre> blocks?
   202  	if cn.closed.IsSet() {
   203  		fmt.Fprint(w, "CLOSED: ")
   204  	}
   205  	fmt.Fprintln(w, strings.Join(cn.peerImplStatusLines(), "\n"))
   206  	cn.peerImplWriteStatus(w)
   207  	fmt.Fprintf(w,
   208  		"%d pieces touched, good chunks: %v/%v, dr: %.1f KiB/s\n",
   209  		len(cn.peerTouchedPieces),
   210  		&cn._stats.ChunksReadUseful,
   211  		&cn._stats.ChunksRead,
   212  		cn.downloadRate()/(1<<10),
   213  	)
   214  	fmt.Fprintf(w, "\n")
   215  }
   216  
   217  func (p *Peer) close() {
   218  	if !p.closed.Set() {
   219  		return
   220  	}
   221  	// Not set until Torrent is known.
   222  	if p.closedCtx != nil {
   223  		p.closedCtxCancel()
   224  	}
   225  	if p.updateRequestsTimer != nil {
   226  		p.updateRequestsTimer.Stop()
   227  	}
   228  	p.legacyPeerImpl.onClose()
   229  	if p.t != nil {
   230  		p.t.decPeerPieceAvailability(p)
   231  	}
   232  	for _, f := range p.callbacks.PeerClosed {
   233  		f(p)
   234  	}
   235  }
   236  
   237  func (p *Peer) Close() error {
   238  	p.locker().Lock()
   239  	defer p.locker().Unlock()
   240  	p.close()
   241  	return nil
   242  }
   243  
   244  // Peer definitely has a piece, for purposes of requesting. So it's not sufficient that we think
   245  // they do (known=true).
   246  func (cn *Peer) peerHasPiece(piece pieceIndex) bool {
   247  	if all, known := cn.peerHasAllPieces(); all && known {
   248  		return true
   249  	}
   250  	return cn.peerPieces().ContainsInt(piece)
   251  }
   252  
   253  // 64KiB, but temporarily less to work around an issue with WebRTC. TODO: Update when
   254  // https://github.com/pion/datachannel/issues/59 is fixed.
   255  const (
   256  	writeBufferHighWaterLen = 1 << 15
   257  	writeBufferLowWaterLen  = writeBufferHighWaterLen / 2
   258  )
   259  
   260  var (
   261  	interestedMsgLen = len(pp.Message{Type: pp.Interested}.MustMarshalBinary())
   262  	requestMsgLen    = len(pp.Message{Type: pp.Request}.MustMarshalBinary())
   263  	// This is the maximum request count that could fit in the write buffer if it's at or below the
   264  	// low water mark when we run maybeUpdateActualRequestState.
   265  	maxLocalToRemoteRequests = (writeBufferHighWaterLen - writeBufferLowWaterLen - interestedMsgLen) / requestMsgLen
   266  )
   267  
   268  func (cn *Peer) totalExpectingTime() (ret time.Duration) {
   269  	ret = cn.cumulativeExpectedToReceiveChunks
   270  	if !cn.lastStartedExpectingToReceiveChunks.IsZero() {
   271  		ret += time.Since(cn.lastStartedExpectingToReceiveChunks)
   272  	}
   273  	return
   274  }
   275  
   276  // The function takes a message to be sent, and returns true if more messages
   277  // are okay.
   278  type messageWriter func(pp.Message) bool
   279  
   280  // All ConnStats that include this connection. Some objects are not known until the handshake is
   281  // complete, after which it's expected to reconcile the differences.
   282  func (cn *Peer) modifyRelevantConnStats(f func(*ConnStats)) {
   283  	// Every peer has basic ConnStats for now.
   284  	f(&cn._stats)
   285  	incAll := func(stats *ConnStats) bool {
   286  		f(stats)
   287  		return true
   288  	}
   289  	cn.upstreamConnStats()(incAll)
   290  }
   291  
   292  // Yields relevant upstream ConnStats. Skips Torrent if it isn't set.
   293  func (cn *Peer) upstreamConnStats() iter.Seq[*ConnStats] {
   294  	return func(yield func(*ConnStats) bool) {
   295  		// PeerConn can be nil when it hasn't completed handshake.
   296  		if cn.t != nil {
   297  			cn.relevantConnStats(&cn.t.connStats)(yield)
   298  		}
   299  		cn.relevantConnStats(&cn.cl.connStats)(yield)
   300  	}
   301  }
   302  
   303  func (cn *Peer) readBytes(n int64) {
   304  	cn.modifyRelevantConnStats(add(n, func(cs *ConnStats) *Count { return &cs.BytesRead }))
   305  }
   306  
   307  func (c *Peer) lastHelpful() (ret time.Time) {
   308  	ret = c.lastUsefulChunkReceived
   309  	if c.t.seeding() && c.lastChunkSent.After(ret) {
   310  		ret = c.lastChunkSent
   311  	}
   312  	return
   313  }
   314  
   315  // Returns whether any part of the chunk would lie outside a piece of the given length.
   316  func chunkOverflowsPiece(cs ChunkSpec, pieceLength pp.Integer) bool {
   317  	switch {
   318  	default:
   319  		return false
   320  	case cs.Begin+cs.Length > pieceLength:
   321  	// Check for integer overflow
   322  	case cs.Begin > pp.IntegerMax-cs.Length:
   323  	}
   324  	return true
   325  }
   326  
   327  func runSafeExtraneous(f func()) {
   328  	if true {
   329  		go f()
   330  	} else {
   331  		f()
   332  	}
   333  }
   334  
   335  func (c *Peer) doChunkReadStats(size int64) {
   336  	c.modifyRelevantConnStats(func(cs *ConnStats) { cs.receivedChunk(size) })
   337  }
   338  
   339  // Handle a received chunk from a peer. TODO: Break this out into non-wire protocol specific
   340  // handling. Avoid shoehorning into a pp.Message.
   341  func (c *Peer) receiveChunk(msg *pp.Message) error {
   342  	ChunksReceived.Add("total", 1)
   343  
   344  	ppReq := newRequestFromMessage(msg)
   345  	t := c.t
   346  	err := t.checkValidReceiveChunk(ppReq)
   347  	if err != nil {
   348  		err = log.WithLevel(log.Warning, err)
   349  		return err
   350  	}
   351  	req := c.t.requestIndexFromRequest(ppReq)
   352  
   353  	recordBlockForSmartBan := sync.OnceFunc(func() {
   354  		c.recordBlockForSmartBan(req, msg.Piece)
   355  	})
   356  	// This needs to occur before we return, but we try to do it when the client is unlocked. It
   357  	// can't be done before checking if chunks are valid because they won't be deallocated from the
   358  	// smart ban cache by piece hashing if they're out of bounds.
   359  	defer recordBlockForSmartBan()
   360  
   361  	if c.peerChoking {
   362  		ChunksReceived.Add("while choked", 1)
   363  	}
   364  
   365  	intended, err := c.peerImpl.checkReceivedChunk(req, msg, ppReq)
   366  	if err != nil {
   367  		return err
   368  	}
   369  
   370  	cl := t.cl
   371  
   372  	// Do we actually want this chunk?
   373  	if t.haveChunk(ppReq) {
   374  		// panic(fmt.Sprintf("%+v", ppReq))
   375  		ChunksReceived.Add("redundant", 1)
   376  		c.modifyRelevantConnStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadWasted }))
   377  		return nil
   378  	}
   379  
   380  	piece := t.piece(ppReq.Index.Int())
   381  
   382  	c.modifyRelevantConnStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadUseful }))
   383  	c.modifyRelevantConnStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulData }))
   384  	if intended {
   385  		c.modifyRelevantConnStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulIntendedData }))
   386  	}
   387  	for _, f := range c.t.cl.config.Callbacks.ReceivedUsefulData {
   388  		f(ReceivedUsefulDataEvent{c, msg})
   389  	}
   390  	c.lastUsefulChunkReceived = time.Now()
   391  
   392  	// Need to record that it hasn't been written yet, before we attempt to do
   393  	// anything with it.
   394  	piece.incrementPendingWrites()
   395  	// Record that we have the chunk, so we aren't trying to download it while
   396  	// waiting for it to be written to storage.
   397  	piece.unpendChunkIndex(chunkIndexFromChunkSpec(ppReq.ChunkSpec, t.chunkSize))
   398  
   399  	// Cancel pending requests for this chunk from *other* peers.
   400  	if p := t.requestingPeer(req); p != nil {
   401  		if p.peerPtr() == c {
   402  			p.logger.Slogger().Error("received chunk but still pending request", "peer", p, "req", req)
   403  			panic("should not be pending request from conn that just received it")
   404  		}
   405  		p.cancel(req)
   406  	}
   407  
   408  	err = func() error {
   409  		cl.unlock()
   410  		defer cl.lock()
   411  		// Opportunistically do this here while we aren't holding the client lock.
   412  		recordBlockForSmartBan()
   413  		concurrentChunkWrites.Add(1)
   414  		defer concurrentChunkWrites.Add(-1)
   415  		// Write the chunk out. Note that the upper bound on chunk writing concurrency will be the
   416  		// number of connections. We write inline with receiving the chunk (with this lock dance),
   417  		// because we want to handle errors synchronously and I haven't thought of a nice way to
   418  		// defer any concurrency to the storage and have that notify the client of errors. TODO: Do
   419  		// that instead.
   420  		return t.writeChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
   421  	}()
   422  
   423  	piece.decrementPendingWrites()
   424  
   425  	if err != nil {
   426  		c.logger.WithDefaultLevel(log.Error).Printf("writing received chunk %v: %v", req, err)
   427  		t.pendRequest(req)
   428  		// Necessary to pass TestReceiveChunkStorageFailureSeederFastExtensionDisabled. I think a
   429  		// request update runs while we're writing the chunk that just failed. Then we never do a
   430  		// fresh update after pending the failed request.
   431  		c.onNeedUpdateRequests("Peer.receiveChunk error writing chunk")
   432  		t.onWriteChunkErr(err)
   433  		return nil
   434  	}
   435  
   436  	c.onDirtiedPiece(pieceIndex(ppReq.Index))
   437  
   438  	// We need to ensure the piece is only queued once, so only the last chunk writer gets this job.
   439  	if t.pieceAllDirty(pieceIndex(ppReq.Index)) && piece.pendingWrites == 0 {
   440  		t.queuePieceCheck(pieceIndex(ppReq.Index))
   441  		// We don't pend all chunks here anymore because we don't want code dependent on the dirty
   442  		// chunk status (such as the haveChunk call above) to have to check all the various other
   443  		// piece states like queued for hash, hashing etc. This does mean that we need to be sure
   444  		// that chunk pieces are pended at an appropriate time later however.
   445  	}
   446  
   447  	cl.event.Broadcast()
   448  	// We do this because we've written a chunk, and may change PieceState.Partial.
   449  	t.deferPublishPieceStateChange(pieceIndex(ppReq.Index))
   450  
   451  	return nil
   452  }
   453  
   454  func (c *Peer) onDirtiedPiece(piece pieceIndex) {
   455  	if c.peerTouchedPieces == nil {
   456  		c.peerTouchedPieces = make(map[pieceIndex]struct{})
   457  	}
   458  	c.peerTouchedPieces[piece] = struct{}{}
   459  	ds := &c.t.pieces[piece].dirtiers
   460  	if *ds == nil {
   461  		*ds = make(map[*Peer]struct{})
   462  	}
   463  	(*ds)[c] = struct{}{}
   464  }
   465  
   466  func (cn *Peer) netGoodPiecesDirtied() int64 {
   467  	return cn._stats.PiecesDirtiedGood.Int64() - cn._stats.PiecesDirtiedBad.Int64()
   468  }
   469  
   470  func (c *Peer) peerHasWantedPieces() bool {
   471  	if all, _ := c.peerHasAllPieces(); all {
   472  		return !c.t.haveAllPieces() && !c.t._pendingPieces.IsEmpty()
   473  	}
   474  	if !c.t.haveInfo() {
   475  		return !c.peerPieces().IsEmpty()
   476  	}
   477  	return c.peerPieces().Intersects(&c.t._pendingPieces)
   478  }
   479  
   480  func (c *Peer) peerPriority() (peerPriority, error) {
   481  	return bep40Priority(c.remoteIpPort(), c.localPublicAddr)
   482  }
   483  
   484  func (c *Peer) remoteIp() net.IP {
   485  	host, _, _ := net.SplitHostPort(c.RemoteAddr.String())
   486  	return net.ParseIP(host)
   487  }
   488  
   489  func (c *Peer) remoteIpPort() IpPort {
   490  	ipa, _ := tryIpPortFromNetAddr(c.RemoteAddr)
   491  	return IpPort{ipa.IP, uint16(ipa.Port)}
   492  }
   493  
   494  func (c *Peer) trust() connectionTrust {
   495  	return connectionTrust{c.trusted, c.netGoodPiecesDirtied()}
   496  }
   497  
   498  type connectionTrust struct {
   499  	Implicit            bool
   500  	NetGoodPiecesDirted int64
   501  }
   502  
   503  func (l connectionTrust) Cmp(r connectionTrust) int {
   504  	return multiless.New().Bool(l.Implicit, r.Implicit).Int64(l.NetGoodPiecesDirted, r.NetGoodPiecesDirted).OrderingInt()
   505  }
   506  
   507  // Returns a new Bitmap that includes bits for all pieces the peer could have based on their claims.
   508  func (cn *Peer) newPeerPieces() *roaring.Bitmap {
   509  	// TODO: Can we use copy on write?
   510  	ret := cn.peerPieces().Clone()
   511  	if all, _ := cn.peerHasAllPieces(); all {
   512  		if cn.t.haveInfo() {
   513  			ret.AddRange(0, bitmap.BitRange(cn.t.numPieces()))
   514  		} else {
   515  			ret.AddRange(0, bitmap.ToEnd)
   516  		}
   517  	}
   518  	return ret
   519  }
   520  
   521  func (p *Peer) TryAsPeerConn() (*PeerConn, bool) {
   522  	pc, ok := p.legacyPeerImpl.(*PeerConn)
   523  	return pc, ok
   524  }
   525  
   526  type peerLocalPublicAddr = IpPort
   527  
   528  func (p *Peer) decPeakRequests() {
   529  	// // This can occur when peak requests are altered by the update request timer to be lower than
   530  	// // the actual number of outstanding requests. Let's let it go negative and see what happens. I
   531  	// // wonder what happens if maxRequests is not signed.
   532  	// if p.peakRequests < 1 {
   533  	// 	panic(p.peakRequests)
   534  	// }
   535  	p.peakRequests--
   536  }
   537  
   538  func (p *Peer) recordBlockForSmartBan(req RequestIndex, blockData []byte) {
   539  	if p.bannableAddr.Ok {
   540  		p.t.smartBanCache.RecordBlock(p.bannableAddr.Value, req, blockData)
   541  	}
   542  }
   543  
   544  func (p *Peer) initClosedCtx() {
   545  	panicif.NotNil(p.closedCtx)
   546  	p.closedCtx, p.closedCtxCancel = context.WithCancel(p.t.closedCtx)
   547  }
   548  
   549  // Iterates base and peer-impl specific ConnStats from all.
   550  func (p *Peer) relevantConnStats(all *AllConnStats) iter.Seq[*ConnStats] {
   551  	return func(yield func(*ConnStats) bool) {
   552  		yield(&all.ConnStats)
   553  		yield(p.peerImpl.allConnStatsImplField(all))
   554  	}
   555  }