github.com/anacrolix/torrent@v1.61.0/requesting.go (about)

     1  package torrent
     2  
     3  import (
     4  	"cmp"
     5  	"context"
     6  	"encoding/gob"
     7  	"fmt"
     8  	"reflect"
     9  	"runtime/pprof"
    10  	"time"
    11  	"unsafe"
    12  
    13  	"github.com/RoaringBitmap/roaring"
    14  	g "github.com/anacrolix/generics"
    15  	"github.com/anacrolix/generics/heap"
    16  	"github.com/anacrolix/log"
    17  	"github.com/anacrolix/multiless"
    18  
    19  	requestStrategy "github.com/anacrolix/torrent/internal/request-strategy"
    20  	"github.com/anacrolix/torrent/metainfo"
    21  	typedRoaring "github.com/anacrolix/torrent/typed-roaring"
    22  )
    23  
    24  type (
    25  	// Since we have to store all the requests in memory, we can't reasonably exceed what could be
    26  	// indexed with the memory space available.
    27  	maxRequests = int
    28  )
    29  
    30  func (t *Torrent) requestStrategyPieceOrderState(i int) requestStrategy.PieceRequestOrderState {
    31  	t.slogger().Debug("requestStrategyPieceOrderState", "pieceIndex", i)
    32  	return requestStrategy.PieceRequestOrderState{
    33  		Priority:     t.piece(i).purePriority(),
    34  		Partial:      t.piecePartiallyDownloaded(i),
    35  		Availability: t.piece(i).availability(),
    36  	}
    37  }
    38  
    39  func init() {
    40  	gob.Register(peerId{})
    41  }
    42  
    43  type peerId struct {
    44  	*Peer
    45  	ptr uintptr
    46  }
    47  
    48  func (p peerId) Uintptr() uintptr {
    49  	return p.ptr
    50  }
    51  
    52  func (p peerId) GobEncode() (b []byte, _ error) {
    53  	*(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
    54  		Data: uintptr(unsafe.Pointer(&p.ptr)),
    55  		Len:  int(unsafe.Sizeof(p.ptr)),
    56  		Cap:  int(unsafe.Sizeof(p.ptr)),
    57  	}
    58  	return
    59  }
    60  
    61  func (p *peerId) GobDecode(b []byte) error {
    62  	if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
    63  		panic(len(b))
    64  	}
    65  	ptr := unsafe.Pointer(&b[0])
    66  	p.ptr = *(*uintptr)(ptr)
    67  	log.Printf("%p", ptr)
    68  	dst := reflect.SliceHeader{
    69  		Data: uintptr(unsafe.Pointer(&p.Peer)),
    70  		Len:  int(unsafe.Sizeof(p.Peer)),
    71  		Cap:  int(unsafe.Sizeof(p.Peer)),
    72  	}
    73  	copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
    74  	return nil
    75  }
    76  
    77  type (
    78  	// A request index is a chunk indexed across the entire torrent. It's a single integer and can
    79  	// be converted to a protocol request. TODO: This should be private.
    80  	RequestIndex = requestStrategy.RequestIndex
    81  	// This is request index but per-piece.
    82  	chunkIndexType    = requestStrategy.ChunkIndex
    83  	webseedSliceIndex RequestIndex
    84  )
    85  
    86  type desiredPeerRequests struct {
    87  	requestIndexes []RequestIndex
    88  	peer           *PeerConn
    89  	pieceStates    []g.Option[requestStrategy.PieceRequestOrderState]
    90  }
    91  
    92  func (p *desiredPeerRequests) lessByValue(leftRequest, rightRequest RequestIndex) bool {
    93  	t := p.peer.t
    94  	leftPieceIndex := t.pieceIndexOfRequestIndex(leftRequest)
    95  	rightPieceIndex := t.pieceIndexOfRequestIndex(rightRequest)
    96  	ml := multiless.New()
    97  	// Push requests that can't be served right now to the end. But we don't throw them away unless
    98  	// there's a better alternative. This is for when we're using the fast extension and get choked
    99  	// but our requests could still be good when we get unchoked.
   100  	if p.peer.peerChoking {
   101  		ml = ml.Bool(
   102  			!p.peer.peerAllowedFast.Contains(leftPieceIndex),
   103  			!p.peer.peerAllowedFast.Contains(rightPieceIndex),
   104  		)
   105  	}
   106  	leftPiece := p.pieceStates[leftPieceIndex].UnwrapPtr()
   107  	rightPiece := p.pieceStates[rightPieceIndex].UnwrapPtr()
   108  	// Putting this first means we can steal requests from lesser-performing peers for our first few
   109  	// new requests.
   110  	priority := func() PiecePriority {
   111  		// Technically we would be happy with the cached priority here, except we don't actually
   112  		// cache it anymore, and Torrent.PiecePriority just does another lookup of *Piece to resolve
   113  		// the priority through Piece.purePriority, which is probably slower.
   114  		leftPriority := leftPiece.Priority
   115  		rightPriority := rightPiece.Priority
   116  		ml = ml.Int(
   117  			-int(leftPriority),
   118  			-int(rightPriority),
   119  		)
   120  		if !ml.Ok() {
   121  			if leftPriority != rightPriority {
   122  				panic("expected equal")
   123  			}
   124  		}
   125  		return leftPriority
   126  	}()
   127  	if ml.Ok() {
   128  		return ml.MustLess()
   129  	}
   130  	leftRequestState := t.requestState[leftRequest]
   131  	rightRequestState := t.requestState[rightRequest]
   132  	leftPeer := leftRequestState.peer.Value()
   133  	rightPeer := rightRequestState.peer.Value()
   134  	// Prefer chunks already requested from this peer.
   135  	ml = ml.Bool(rightPeer == p.peer, leftPeer == p.peer)
   136  	// Prefer unrequested chunks.
   137  	ml = ml.Bool(rightPeer == nil, leftPeer == nil)
   138  	if ml.Ok() {
   139  		return ml.MustLess()
   140  	}
   141  	if leftPeer != nil {
   142  		// The right peer should also be set, or we'd have resolved the computation by now.
   143  		ml = ml.Uint64(
   144  			rightPeer.requestState.Requests.GetCardinality(),
   145  			leftPeer.requestState.Requests.GetCardinality(),
   146  		)
   147  		// Could either of the lastRequested be Zero? That's what checking an existing peer is for.
   148  		leftLast := leftRequestState.when
   149  		rightLast := rightRequestState.when
   150  		if leftLast.IsZero() || rightLast.IsZero() {
   151  			panic("expected non-zero last requested times")
   152  		}
   153  		// We want the most-recently requested on the left. Clients like Transmission serve requests
   154  		// in received order, so the most recently-requested is the one that has the longest until
   155  		// it will be served and therefore is the best candidate to cancel.
   156  		ml = ml.CmpInt64(rightLast.Sub(leftLast).Nanoseconds())
   157  	}
   158  	// Just trigger on any webseed requests present on the Torrent. That suggests that the Torrent
   159  	// or files are prioritized enough to compete with PeerConn requests. Later we could filter on
   160  	// webseeds actually requesting or supporting requests for the pieces we're comparing.
   161  	if t.hasActiveWebseedRequests() {
   162  		// Prefer the highest possible request index, since webseeds prefer the lowest. Additionally,
   163  		// this should mean remote clients serve in reverse order so we meet webseeds responses in
   164  		// the middle.
   165  		ml = ml.Cmp(-cmp.Compare(leftRequest, rightRequest))
   166  	} else {
   167  		ml = ml.Int(
   168  			leftPiece.Availability,
   169  			rightPiece.Availability)
   170  		if priority == PiecePriorityReadahead {
   171  			// TODO: For readahead in particular, it would be even better to consider distance from the
   172  			// reader position so that reads earlier in a torrent don't starve reads later in the
   173  			// torrent. This would probably require reconsideration of how readahead priority works.
   174  			ml = ml.Int(leftPieceIndex, rightPieceIndex)
   175  		} else {
   176  			ml = ml.Int(t.pieceRequestOrder[leftPieceIndex], t.pieceRequestOrder[rightPieceIndex])
   177  		}
   178  		ml = multiless.EagerOrdered(ml, leftRequest, rightRequest)
   179  	}
   180  	// Prefer request indexes in order for storage write performance. Since the heap request heap
   181  	// does not contain duplicates, if we order at the request index level we should never have any
   182  	// ambiguity.
   183  	return ml.MustLess()
   184  }
   185  
   186  type desiredRequestState struct {
   187  	Requests   desiredPeerRequests
   188  	Interested bool
   189  }
   190  
   191  func (cl *Client) getRequestablePieces(key clientPieceRequestOrderKeySumType, f requestStrategy.RequestPieceFunc) {
   192  	input := key.getRequestStrategyInput(cl)
   193  	order := cl.pieceRequestOrder[key].pieces
   194  	requestStrategy.GetRequestablePieces(input, order, f)
   195  }
   196  
   197  func (t *Torrent) getRequestablePieces(f requestStrategy.RequestPieceFunc) {
   198  	t.cl.getRequestablePieces(t.clientPieceRequestOrderKey(), f)
   199  }
   200  
   201  // This gets the best-case request state. That means handling pieces limited by capacity, preferring
   202  // earlier pieces, low availability etc. It pays no attention to existing requests on the peer or
   203  // other peers. Those are handled later.
   204  func (p *PeerConn) getDesiredRequestState() (desired desiredRequestState) {
   205  	t := p.t
   206  	if !t.haveInfo() {
   207  		return
   208  	}
   209  	if t.closed.IsSet() {
   210  		return
   211  	}
   212  	if t.dataDownloadDisallowed.Bool() {
   213  		return
   214  	}
   215  	requestHeap := desiredPeerRequests{
   216  		peer:           p,
   217  		pieceStates:    t.requestPieceStates,
   218  		requestIndexes: t.requestIndexes,
   219  	}
   220  	clear(requestHeap.pieceStates)
   221  	t.logPieceRequestOrder()
   222  	// Caller-provided allocation for roaring bitmap iteration.
   223  	var it typedRoaring.Iterator[RequestIndex]
   224  	t.getRequestablePieces(
   225  		func(ih metainfo.Hash, pieceIndex int, pieceExtra requestStrategy.PieceRequestOrderState) bool {
   226  			if ih != *t.canonicalShortInfohash() {
   227  				return true
   228  			}
   229  			if !p.peerHasPiece(pieceIndex) {
   230  				return true
   231  			}
   232  			requestHeap.pieceStates[pieceIndex].Set(pieceExtra)
   233  			allowedFast := p.peerAllowedFast.Contains(pieceIndex)
   234  			t.iterUndirtiedRequestIndexesInPiece(&it, pieceIndex, func(r requestStrategy.RequestIndex) {
   235  				if !allowedFast {
   236  					// We must signal interest to request this. TODO: We could set interested if the
   237  					// peers pieces (minus the allowed fast set) overlap with our missing pieces if
   238  					// there are any readers, or any pending pieces.
   239  					desired.Interested = true
   240  					// We can make or will allow sustaining a request here if we're not choked, or
   241  					// have made the request previously (presumably while unchoked), and haven't had
   242  					// the peer respond yet (and the request was retained because we are using the
   243  					// fast extension).
   244  					if p.peerChoking && !p.requestState.Requests.Contains(r) {
   245  						// We can't request this right now.
   246  						return
   247  					}
   248  				}
   249  				cancelled := &p.requestState.Cancelled
   250  				if !cancelled.IsEmpty() && cancelled.Contains(r) {
   251  					// Can't re-request while awaiting acknowledgement.
   252  					return
   253  				}
   254  				requestHeap.requestIndexes = append(requestHeap.requestIndexes, r)
   255  			})
   256  			return true
   257  		},
   258  	)
   259  	t.assertPendingRequests()
   260  	desired.Requests = requestHeap
   261  	return
   262  }
   263  
   264  // Update requests if there's a reason assigned.
   265  func (p *PeerConn) maybeUpdateActualRequestState() {
   266  	if p.needRequestUpdate == "" {
   267  		return
   268  	}
   269  	p.updateRequestsWithReason(p.needRequestUpdate)
   270  }
   271  
   272  // Updates requests right now with the given reason. Clobbers any deferred reason if there was one.
   273  // Does all the necessary checks and includes profiler tags to assign the overhead.
   274  func (p *PeerConn) updateRequestsWithReason(reason updateRequestReason) {
   275  	if p.closed.IsSet() {
   276  		return
   277  	}
   278  	if reason == peerUpdateRequestsTimerReason {
   279  		since := time.Since(p.lastRequestUpdate)
   280  		if since < updateRequestsTimerDuration {
   281  			panic(since)
   282  		}
   283  	}
   284  	if p.t.cl.config.Debug {
   285  		p.logger.Slogger().Debug("updating requests", "reason", p.needRequestUpdate)
   286  	}
   287  	pprof.Do(
   288  		context.Background(),
   289  		pprof.Labels("update request", string(reason)),
   290  		func(_ context.Context) {
   291  			p.updateRequests()
   292  		},
   293  	)
   294  	// Is there any chance we should leave this to run again, and have the caller clear it if they
   295  	// called with this reason?
   296  	p.needRequestUpdate = ""
   297  	p.lastRequestUpdate = time.Now()
   298  	if enableUpdateRequestsTimer {
   299  		p.updateRequestsTimer.Reset(updateRequestsTimerDuration)
   300  	}
   301  }
   302  
   303  func (p *PeerConn) updateRequests() {
   304  	next := p.getDesiredRequestState()
   305  	p.applyRequestState(next)
   306  	p.t.cacheNextRequestIndexesForReuse(next.Requests.requestIndexes)
   307  }
   308  
   309  func (t *Torrent) cacheNextRequestIndexesForReuse(slice []RequestIndex) {
   310  	// The incoming slice can be smaller when getDesiredRequestState short circuits on some
   311  	// conditions.
   312  	if cap(slice) > cap(t.requestIndexes) {
   313  		t.requestIndexes = slice[:0]
   314  	}
   315  }
   316  
   317  // Whether we should allow sending not interested ("losing interest") to the peer. I noticed
   318  // qBitTorrent seems to punish us for sending not interested when we're streaming and don't
   319  // currently need anything.
   320  func (p *Peer) allowSendNotInterested() bool {
   321  	// Except for caching, we're not likely to lose pieces very soon.
   322  	if p.t.haveAllPieces() {
   323  		return true
   324  	}
   325  	all, known := p.peerHasAllPieces()
   326  	if all || !known {
   327  		return false
   328  	}
   329  	// Allow losing interest if we have all the pieces the peer has.
   330  	return roaring.AndNot(p.peerPieces(), &p.t._completedPieces).IsEmpty()
   331  }
   332  
   333  // Transmit/action the request state to the peer. This includes work-stealing from other peers and
   334  // some piece order randomization within the preferred state calculated earlier in next. Cancels are
   335  // not done here, those are handled synchronously. We only track pending cancel acknowledgements.
   336  func (p *PeerConn) applyRequestState(next desiredRequestState) {
   337  	current := &p.requestState
   338  	// Make interest sticky
   339  	if !next.Interested && p.requestState.Interested {
   340  		if !p.allowSendNotInterested() {
   341  			next.Interested = true
   342  		}
   343  	}
   344  	if !p.setInterested(next.Interested) {
   345  		return
   346  	}
   347  	more := true
   348  	orig := next.Requests.requestIndexes
   349  	requestHeap := heap.InterfaceForSlice(
   350  		&next.Requests.requestIndexes,
   351  		next.Requests.lessByValue,
   352  	)
   353  	heap.Init(requestHeap)
   354  
   355  	t := p.t
   356  	originalRequestCount := current.Requests.GetCardinality()
   357  	for {
   358  		if requestHeap.Len() == 0 {
   359  			break
   360  		}
   361  		numPending := maxRequests(current.Requests.GetCardinality() + current.Cancelled.GetCardinality())
   362  		if numPending >= p.nominalMaxRequests() {
   363  			break
   364  		}
   365  		req := heap.Pop(requestHeap)
   366  		if cap(next.Requests.requestIndexes) != cap(orig) {
   367  			panic("changed")
   368  		}
   369  
   370  		// don't add requests on receipt of a reject - because this causes request back
   371  		// to potentially permanently unresponsive peers - which just adds network noise.  If
   372  		// the peer can handle more requests it will send an "unchoked" message - which
   373  		// will cause it to get added back to the request queue
   374  		if p.needRequestUpdate == peerUpdateRequestsRemoteRejectReason {
   375  			continue
   376  		}
   377  
   378  		existing := t.requestingPeer(req)
   379  		if existing != nil && existing != p {
   380  			// don't steal on cancel - because this is triggered by t.cancelRequest below
   381  			// which means that the cancelled can immediately try to steal back a request
   382  			// it has lost which can lead to circular cancel/add processing
   383  			if p.needRequestUpdate == peerUpdateRequestsPeerCancelReason {
   384  				continue
   385  			}
   386  
   387  			// Don't steal from the poor.
   388  			diff := int64(current.Requests.GetCardinality()) + 1 - (int64(existing.uncancelledRequests()) - 1)
   389  			// Steal a request that leaves us with one more request than the existing peer
   390  			// connection if the stealer more recently received a chunk.
   391  			if diff > 1 || (diff == 1 && !p.lastUsefulChunkReceived.After(existing.lastUsefulChunkReceived)) {
   392  				continue
   393  			}
   394  			t.cancelRequest(req)
   395  		}
   396  		more = p.mustRequest(req)
   397  		if !more {
   398  			break
   399  		}
   400  	}
   401  	if !more {
   402  		// This might fail if we incorrectly determine that we can fit up to the maximum allowed
   403  		// requests into the available write buffer space. We don't want that to happen because it
   404  		// makes our peak requests dependent on how much was already in the buffer.
   405  		panic(fmt.Sprintf(
   406  			"couldn't fill apply entire request state [newRequests=%v]",
   407  			current.Requests.GetCardinality()-originalRequestCount))
   408  	}
   409  	newPeakRequests := maxRequests(current.Requests.GetCardinality() - originalRequestCount)
   410  	// log.Printf(
   411  	// 	"requests %v->%v (peak %v->%v) reason %q (peer %v)",
   412  	// 	originalRequestCount, current.Requests.GetCardinality(), p.peakRequests, newPeakRequests, p.needRequestUpdate, p)
   413  	p.peakRequests = newPeakRequests
   414  }
   415  
   416  // This could be set to 10s to match the unchoke/request update interval recommended by some
   417  // specifications. I've set it shorter to trigger it more often for testing for now.
   418  const (
   419  	updateRequestsTimerDuration = 3 * time.Second
   420  	enableUpdateRequestsTimer   = false
   421  )