github.com/anacrolix/torrent@v1.61.0/torrent.go (about) 1 package torrent 2 3 import ( 4 "bytes" 5 "cmp" 6 "container/heap" 7 "context" 8 "crypto/sha1" 9 "errors" 10 "fmt" 11 "hash" 12 "io" 13 "iter" 14 "log/slog" 15 "maps" 16 "math" 17 "math/rand" 18 "net/netip" 19 "net/url" 20 "slices" 21 "strings" 22 "text/tabwriter" 23 "time" 24 "unique" 25 "unsafe" 26 "weak" 27 28 "github.com/RoaringBitmap/roaring" 29 "github.com/anacrolix/chansync" 30 "github.com/anacrolix/chansync/events" 31 "github.com/anacrolix/dht/v2" 32 . "github.com/anacrolix/generics" 33 g "github.com/anacrolix/generics" 34 "github.com/anacrolix/log" 35 "github.com/anacrolix/missinggo/v2" 36 "github.com/anacrolix/missinggo/v2/bitmap" 37 "github.com/anacrolix/missinggo/v2/panicif" 38 "github.com/anacrolix/missinggo/v2/pubsub" 39 "github.com/anacrolix/multiless" 40 "github.com/anacrolix/sync" 41 "github.com/pion/webrtc/v4" 42 "golang.org/x/sync/errgroup" 43 "golang.org/x/time/rate" 44 45 "github.com/anacrolix/torrent/bencode" 46 "github.com/anacrolix/torrent/internal/check" 47 "github.com/anacrolix/torrent/internal/nestedmaps" 48 request_strategy "github.com/anacrolix/torrent/internal/request-strategy" 49 "github.com/anacrolix/torrent/merkle" 50 "github.com/anacrolix/torrent/metainfo" 51 pp "github.com/anacrolix/torrent/peer_protocol" 52 utHolepunch "github.com/anacrolix/torrent/peer_protocol/ut-holepunch" 53 "github.com/anacrolix/torrent/segments" 54 "github.com/anacrolix/torrent/storage" 55 "github.com/anacrolix/torrent/tracker" 56 typedRoaring "github.com/anacrolix/torrent/typed-roaring" 57 "github.com/anacrolix/torrent/types/infohash" 58 infohash_v2 "github.com/anacrolix/torrent/types/infohash-v2" 59 "github.com/anacrolix/torrent/webseed" 60 "github.com/anacrolix/torrent/webtorrent" 61 ) 62 63 var errTorrentClosed = errors.New("torrent closed") 64 65 type torrentSlogGroupInput struct { 66 name any 67 canonicalIh shortInfohash 68 } 69 70 // Maintains state of torrent within a Client. Many methods should not be called before the info is 71 // available, see .Info and .GotInfo. 72 type Torrent struct { 73 // Torrent-level aggregate statistics. First in struct to ensure 64-bit 74 // alignment. See #262. 75 connStats AllConnStats 76 counters TorrentStatCounters 77 78 cl *Client 79 logger log.Logger 80 81 baseSlogger *slog.Logger // Without dynamic group attrs 82 _slogger *slog.Logger // With the latest group attrs 83 _slogGroup slog.Attr // The slog group for merging into other loggers. 84 lastSlogGroupInput torrentSlogGroupInput // To guard against generating slog.Group 85 86 networkingEnabled chansync.Flag 87 dataDownloadDisallowed chansync.Flag 88 dataUploadDisallowed bool 89 userOnWriteChunkErr func(error) 90 91 closed chansync.SetOnce 92 // A background Context cancelled when the Torrent is closed. Added to minimize extra goroutines 93 // in tracker handlers. 94 closedCtx context.Context 95 closedCtxCancel context.CancelCauseFunc 96 onClose []func() 97 98 infoHash g.Option[metainfo.Hash] 99 infoHashV2 g.Option[infohash_v2.T] 100 101 pieces []Piece 102 103 // The order pieces are requested if there's no stronger reason like availability or priority. 104 pieceRequestOrder []int 105 // Values are the piece indices that changed. 106 pieceStateChanges pubsub.PubSub[PieceStateChange] 107 // The size of chunks to request from peers over the wire. This is 108 // normally 16KiB by convention these days. 109 chunkSize pp.Integer 110 chunkPool sync.Pool 111 // Total length of the torrent in bytes. Stored because it's not O(1) to 112 // get this from the info dict. 113 _length Option[int64] 114 115 // The storage to open when the info dict becomes available. 116 storageOpener *storage.Client 117 // Storage for torrent data. 118 storage *storage.Torrent 119 // Read-locked for using storage, and write-locked for Closing. 120 storageLock sync.RWMutex 121 122 announceList metainfo.AnnounceList 123 124 // The info dict. nil if we don't have it (yet). 125 info *metainfo.Info 126 // For scoping routines that depend on needing the info. Saves spinning up lots of helper 127 // routines. Cancelled when the Torrent is Closed too. 128 getInfoCtx context.Context 129 // Put a nice reason in :) 130 getInfoCtxCancel context.CancelCauseFunc 131 files *[]*File 132 fileSegmentsIndex g.Option[segments.Index] 133 134 _chunksPerRegularPiece chunkIndexType 135 136 webSeeds map[webseedUrlKey]*webseedPeer 137 // Active peer connections, running message stream loops. TODO: Make this open (not-closed) 138 // connections only. 139 conns map[*PeerConn]struct{} 140 maxEstablishedConns int 141 // Set of addrs to which we're attempting to connect. Connections are 142 // half-open until all handshakes are completed. 143 halfOpen map[string]map[outgoingConnAttemptKey]*PeerInfo 144 145 // Reserve of peers to connect to. A peer can be both here and in the 146 // active connections if were told about the peer after connecting with 147 // them. That encourages us to reconnect to peers that are well known in 148 // the swarm. 149 peers prioritizedPeers 150 // An announcer for each tracker URL. Note this includes non-regular trackers too. 151 trackerAnnouncers map[torrentTrackerAnnouncerKey]torrentTrackerAnnouncer 152 regularTrackerAnnounceState map[torrentTrackerAnnouncerKey]*announceState 153 // How many times we've initiated a DHT announce. TODO: Move into stats. 154 numDHTAnnounces int 155 156 // Name used if the info name isn't available. Should be cleared when the 157 // Info does become available. 158 nameMu sync.RWMutex 159 displayName string 160 161 // The bencoded bytes of the info dict. This is actively manipulated if 162 // the info bytes aren't initially available, and we try to fetch them 163 // from peers. 164 metadataBytes []byte 165 // Each element corresponds to the 16KiB metadata pieces. If true, we have 166 // received that piece. 167 metadataCompletedChunks []bool 168 metadataChanged sync.Cond 169 170 // Closed when .Info is obtained. This could be chansync.SetOnce but we already have sync around 171 // IsSet from nameMu. Switching will probably only increase memory use. 172 gotMetainfoC chan struct{} 173 174 readers map[*reader]struct{} 175 _readerNowPieces bitmap.Bitmap 176 _readerReadaheadPieces bitmap.Bitmap 177 178 // A cache of pieces we need to get. Calculated from various piece and file priorities and 179 // completion states elsewhere. Includes piece data and piece v2 hashes. Used for efficient set 180 // logic with peer pieces. 181 _pendingPieces roaring.Bitmap 182 // A cache of completed piece indices. 183 _completedPieces roaring.Bitmap 184 // Pieces that need to be hashed. 185 piecesQueuedForHash typedRoaring.Bitmap[pieceIndex] 186 activePieceHashes int 187 188 connsWithAllPieces map[*Peer]struct{} 189 190 // Last active PeerConn request for each chunk. 191 requestState map[RequestIndex]requestState 192 // Chunks we've written to since the corresponding piece was last checked. 193 dirtyChunks typedRoaring.Bitmap[RequestIndex] 194 195 pex pexState 196 197 // Is On when all pieces are complete, no hashing is pending or occurring. 198 complete chansync.Flag 199 200 // Torrent sources in use keyed by the source string. string -> error. If the slot is occupied 201 // there's a worker for it. 202 activeSources sync.Map 203 // One source fetch at a time. We use mutex in the original definition. 204 sourceMutex sync.Mutex 205 206 smartBanCache smartBanCache 207 208 // Large allocations reused between request state updates. 209 requestPieceStates []g.Option[request_strategy.PieceRequestOrderState] 210 requestIndexes []RequestIndex 211 212 // Disable actions after updating piece priorities, for benchmarking. 213 disableTriggers bool 214 // See AddTorrentOpts.DisableInitialPieceCheck 215 initialPieceCheckDisabled bool 216 // See AddTorrentOpts.IgnoreUnverifiedPieceCompletion 217 ignoreUnverifiedPieceCompletion bool 218 219 // Relating to tracker Completed transition event 220 sawInitiallyIncompleteData bool 221 } 222 223 type torrentTrackerAnnouncerKey struct { 224 ShortInfohash shortInfohash 225 url trackerAnnouncerKey 226 } 227 228 func (me torrentTrackerAnnouncerKey) Compare(other torrentTrackerAnnouncerKey) int { 229 return cmp.Or( 230 me.ShortInfohash.Compare(other.ShortInfohash), 231 cmp.Compare(me.url, other.url)) 232 } 233 234 // Has the modified scheme for announcer-per-IP protocol and such-forth. 235 type trackerAnnouncerKey string 236 237 type outgoingConnAttemptKey = *PeerInfo 238 239 func (t *Torrent) length() int64 { 240 return t._length.Value 241 } 242 243 func (t *Torrent) selectivePieceAvailabilityFromPeers(i pieceIndex) (count int) { 244 // This could be done with roaring.BitSliceIndexing. 245 t.iterPeers(func(peer *Peer) { 246 if _, ok := t.connsWithAllPieces[peer]; ok { 247 return 248 } 249 if peer.peerHasPiece(i) { 250 count++ 251 } 252 }) 253 return 254 } 255 256 func (t *Torrent) decPieceAvailability(i pieceIndex) { 257 if !t.haveInfo() { 258 return 259 } 260 p := t.piece(i) 261 if p.relativeAvailability <= 0 { 262 panic(p.relativeAvailability) 263 } 264 p.relativeAvailability-- 265 t.updatePieceRequestOrderPiece(i) 266 } 267 268 func (t *Torrent) incPieceAvailability(i pieceIndex) { 269 // If we don't have the info, this should be reconciled when we do. 270 if t.haveInfo() { 271 p := t.piece(i) 272 p.relativeAvailability++ 273 t.updatePieceRequestOrderPiece(i) 274 } 275 } 276 277 func (t *Torrent) readerNowPieces() bitmap.Bitmap { 278 return t._readerNowPieces 279 } 280 281 func (t *Torrent) readerReadaheadPieces() bitmap.Bitmap { 282 return t._readerReadaheadPieces 283 } 284 285 func (t *Torrent) ignorePieceForRequests(i pieceIndex) bool { 286 return t.piece(i).ignoreForRequests() 287 } 288 289 // Returns a channel that is closed when the Torrent is closed. 290 func (t *Torrent) Closed() events.Done { 291 return t.closed.Done() 292 } 293 294 // KnownSwarm returns the known subset of the peers in the Torrent's swarm, including active, 295 // pending, and half-open peers. 296 func (t *Torrent) KnownSwarm() (ks []PeerInfo) { 297 // Add pending peers to the list 298 t.peers.Each(func(peer PeerInfo) { 299 ks = append(ks, peer) 300 }) 301 302 // Add half-open peers to the list 303 for _, attempts := range t.halfOpen { 304 for _, peer := range attempts { 305 ks = append(ks, *peer) 306 } 307 } 308 309 // Add active peers to the list 310 t.cl.rLock() 311 defer t.cl.rUnlock() 312 for conn := range t.conns { 313 ks = append(ks, PeerInfo{ 314 Id: conn.PeerID, 315 Addr: conn.RemoteAddr, 316 Source: conn.Discovery, 317 // > If the connection is encrypted, that's certainly enough to set SupportsEncryption. 318 // > But if we're not connected to them with an encrypted connection, I couldn't say 319 // > what's appropriate. We can carry forward the SupportsEncryption value as we 320 // > received it from trackers/DHT/PEX, or just use the encryption state for the 321 // > connection. It's probably easiest to do the latter for now. 322 // https://github.com/anacrolix/torrent/pull/188 323 SupportsEncryption: conn.headerEncrypted, 324 }) 325 } 326 327 return 328 } 329 330 func (t *Torrent) setChunkSize(size pp.Integer) { 331 t.chunkSize = size 332 t.chunkPool = sync.Pool{ 333 New: func() interface{} { 334 b := make([]byte, size) 335 return &b 336 }, 337 } 338 } 339 340 func (t *Torrent) pieceComplete(piece pieceIndex) bool { 341 return t._completedPieces.Contains(bitmap.BitIndex(piece)) 342 } 343 344 func (t *Torrent) pieceCompleteUncached(piece pieceIndex) (ret storage.Completion) { 345 p := t.piece(piece) 346 if t.ignoreUnverifiedPieceCompletion && p.numVerifies == 0 { 347 return 348 } 349 if t.storage == nil { 350 return storage.Completion{Complete: false, Ok: false} 351 } 352 return p.Storage().Completion() 353 } 354 355 func (t *Torrent) appendUnclosedConns(ret []*PeerConn) []*PeerConn { 356 return t.appendConns(ret, func(conn *PeerConn) bool { 357 return !conn.closed.IsSet() 358 }) 359 } 360 361 func (t *Torrent) appendConns(ret []*PeerConn, f func(*PeerConn) bool) []*PeerConn { 362 for c := range t.conns { 363 if f(c) { 364 ret = append(ret, c) 365 } 366 } 367 return ret 368 } 369 370 // Don't call this directly, call Torrent.addPeers* to handle events. 371 func (t *Torrent) addPeer(p PeerInfo) (added bool) { 372 cl := t.cl 373 torrent.Add(fmt.Sprintf("peers added by source %q", p.Source), 1) 374 if t.closed.IsSet() { 375 return false 376 } 377 if ipAddr, ok := tryIpPortFromNetAddr(p.Addr); ok { 378 if cl.badPeerIPPort(ipAddr.IP, ipAddr.Port) { 379 torrent.Add("peers not added because of bad addr", 1) 380 // cl.logger.Printf("peers not added because of bad addr: %v", p) 381 return false 382 } 383 } 384 if replaced, ok := t.peers.AddReturningReplacedPeer(p); ok { 385 torrent.Add("peers replaced", 1) 386 if !replaced.equal(p) { 387 t.logger.WithDefaultLevel(log.Debug).Printf("added %v replacing %v", p, replaced) 388 added = true 389 } 390 } else { 391 added = true 392 } 393 t.openNewConns() 394 for t.peers.Len() > cl.config.TorrentPeersHighWater { 395 _, ok := t.peers.DeleteMin() 396 if ok { 397 torrent.Add("excess reserve peers discarded", 1) 398 } 399 } 400 return 401 } 402 403 func (t *Torrent) invalidateMetadata() { 404 for i := 0; i < len(t.metadataCompletedChunks); i++ { 405 t.metadataCompletedChunks[i] = false 406 } 407 t.nameMu.Lock() 408 // Why the fuck would info be set? 409 panicif.NotNil(t.info) 410 t.nameMu.Unlock() 411 } 412 413 func (t *Torrent) saveMetadataPiece(index int, data []byte) { 414 if t.haveInfo() { 415 return 416 } 417 if index >= len(t.metadataCompletedChunks) { 418 t.logger.Printf("%s: ignoring metadata piece %d", t, index) 419 return 420 } 421 copy(t.metadataBytes[(1<<14)*index:], data) 422 t.metadataCompletedChunks[index] = true 423 } 424 425 func (t *Torrent) metadataPieceCount() int { 426 return (len(t.metadataBytes) + (1 << 14) - 1) / (1 << 14) 427 } 428 429 func (t *Torrent) haveMetadataPiece(piece int) bool { 430 if t.haveInfo() { 431 return (1<<14)*piece < len(t.metadataBytes) 432 } else { 433 return piece < len(t.metadataCompletedChunks) && t.metadataCompletedChunks[piece] 434 } 435 } 436 437 func (t *Torrent) metadataSize() int { 438 return len(t.metadataBytes) 439 } 440 441 func (t *Torrent) makePieces() { 442 t.pieces = make([]Piece, t.info.NumPieces()) 443 for i := range t.pieces { 444 piece := &t.pieces[i] 445 piece.t = t 446 piece.index = i 447 piece.noPendingWrites.L = &piece.pendingWritesMutex 448 if t.info.HasV1() { 449 piece.hash = (*metainfo.Hash)(unsafe.Pointer( 450 unsafe.SliceData(t.info.Pieces[i*sha1.Size : (i+1)*sha1.Size]))) 451 } 452 files := *t.files 453 // TODO: This can be done more efficiently by retaining a file iterator between loops. 454 beginFile := pieceFirstFileIndex(piece.torrentBeginOffset(), files) 455 endFile := pieceEndFileIndex(piece.torrentEndOffset(), files) 456 piece.beginFile = beginFile 457 piece.endFile = endFile 458 } 459 } 460 461 func (t *Torrent) addPieceLayersLocked(layers map[string]string) (errs []error) { 462 if layers == nil { 463 return 464 } 465 files: 466 for _, f := range *t.files { 467 if f.numPieces() <= 1 { 468 continue 469 } 470 if !f.piecesRoot.Ok { 471 err := fmt.Errorf("no piece root set for file %v", f) 472 errs = append(errs, err) 473 continue files 474 } 475 compactLayer, ok := layers[string(f.piecesRoot.Value[:])] 476 var hashes [][32]byte 477 if ok { 478 var err error 479 hashes, err = merkle.CompactLayerToSliceHashes(compactLayer) 480 if err != nil { 481 err = fmt.Errorf("bad piece layers for file %q: %w", f, err) 482 errs = append(errs, err) 483 continue files 484 } 485 } else { 486 if f.length > t.info.PieceLength { 487 // BEP 52 is pretty strongly worded about this, even though we should be able to 488 // recover: If a v2 torrent is added by magnet link or infohash, we need to fetch 489 // piece layers ourselves anyway, and that's how we can recover from this. 490 t.logger.Levelf(log.Warning, "no piece layers for file %q", f) 491 } 492 continue files 493 } 494 if len(hashes) != f.numPieces() { 495 errs = append( 496 errs, 497 fmt.Errorf("file %q: got %v hashes expected %v", f, len(hashes), f.numPieces()), 498 ) 499 continue files 500 } 501 root := merkle.RootWithPadHash(hashes, metainfo.HashForPiecePad(t.info.PieceLength)) 502 if root != f.piecesRoot.Value { 503 errs = append(errs, fmt.Errorf("%v: expected hash %x got %x", f, f.piecesRoot.Value, root)) 504 continue files 505 } 506 for i := range f.numPieces() { 507 pi := f.BeginPieceIndex() + i 508 p := t.piece(pi) 509 p.setV2Hash(hashes[i]) 510 } 511 } 512 return 513 } 514 515 func (t *Torrent) AddPieceLayers(layers map[string]string) (errs []error) { 516 t.cl.lock() 517 defer t.cl.unlock() 518 return t.addPieceLayersLocked(layers) 519 } 520 521 // Returns the index of the first file containing the piece. files must be 522 // ordered by offset. 523 func pieceFirstFileIndex(pieceOffset int64, files []*File) int { 524 for i, f := range files { 525 if f.offset+f.length > pieceOffset { 526 return i 527 } 528 } 529 return 0 530 } 531 532 // Returns the index after the last file containing the piece. files must be 533 // ordered by offset. 534 func pieceEndFileIndex(pieceEndOffset int64, files []*File) int { 535 for i, f := range files { 536 if f.offset >= pieceEndOffset { 537 return i 538 } 539 } 540 return len(files) 541 } 542 543 func (t *Torrent) cacheLength() { 544 var l int64 545 for _, f := range t.info.UpvertedFiles() { 546 l += f.Length 547 } 548 t._length = Some(l) 549 } 550 551 // TODO: This shouldn't fail for storage reasons. Instead we should handle storage failure 552 // separately. 553 func (t *Torrent) setInfo(info *metainfo.Info) error { 554 if err := validateInfo(info); err != nil { 555 return fmt.Errorf("bad info: %w", err) 556 } 557 if t.storageOpener != nil { 558 var err error 559 ctx := log.ContextWithLogger(context.Background(), t.logger) 560 t.storage, err = t.storageOpener.OpenTorrent(ctx, info, *t.canonicalShortInfohash()) 561 if err != nil { 562 return fmt.Errorf("error opening torrent storage: %w", err) 563 } 564 } 565 t.nameMu.Lock() 566 t.info = info 567 panicif.True(t.fileSegmentsIndex.Set(info.FileSegmentsIndex()).Ok) 568 t.getInfoCtxCancel(errors.New("got info")) 569 t.nameMu.Unlock() 570 t._chunksPerRegularPiece = chunkIndexType(intCeilDiv(pp.Integer(t.usualPieceSize()), t.chunkSize)) 571 t.deferUpdateComplete() 572 t.displayName = "" // Save a few bytes lol. 573 t.initFiles() 574 t.cacheLength() 575 t.makePieces() 576 return nil 577 } 578 579 func (t *Torrent) pieceRequestOrderKey(i int) request_strategy.PieceRequestOrderKey { 580 return request_strategy.PieceRequestOrderKey{ 581 InfoHash: unique.Make(*t.canonicalShortInfohash()), 582 Index: i, 583 } 584 } 585 586 // This seems to be all the follow-up tasks after info is set, that can't fail. 587 func (t *Torrent) onSetInfo() { 588 t.pieceRequestOrder = rand.Perm(t.numPieces()) 589 t.initPieceRequestOrder() 590 MakeSliceWithLength(&t.requestPieceStates, t.numPieces()) 591 for i := range t.pieces { 592 p := &t.pieces[i] 593 // Need to add relativeAvailability before updating piece completion, as that may result in 594 // conns being dropped. 595 if p.relativeAvailability != 0 { 596 panic(p.relativeAvailability) 597 } 598 p.relativeAvailability = t.selectivePieceAvailabilityFromPeers(i) 599 t.addRequestOrderPiece(i) 600 t.setInitialPieceCompletionFromStorage(i) 601 t.queueInitialPieceCheck(i) 602 } 603 t.cl.event.Broadcast() 604 close(t.gotMetainfoC) 605 t.updateWantPeersEvent() 606 t.requestState = make(map[RequestIndex]requestState) 607 panicif.Err(t.startPieceHashers()) 608 t.iterPeers(func(p *Peer) { 609 p.onGotInfo(t.info) 610 p.onNeedUpdateRequests("onSetInfo") 611 }) 612 } 613 614 // Checks the info bytes hash to expected values. Fills in any missing infohashes. 615 func (t *Torrent) hashInfoBytes(b []byte, info *metainfo.Info) error { 616 v1Hash := infohash.HashBytes(b) 617 v2Hash := infohash_v2.HashBytes(b) 618 cl := t.cl 619 if t.infoHash.Ok && !t.infoHashV2.Ok { 620 if v1Hash == t.infoHash.Value { 621 if info.HasV2() { 622 t.infoHashV2.Set(v2Hash) 623 cl.torrentsByShortHash[*v2Hash.ToShort()] = t 624 } 625 } else if *v2Hash.ToShort() == t.infoHash.Value { 626 if !info.HasV2() { 627 return errors.New("invalid v2 info") 628 } 629 t.infoHashV2.Set(v2Hash) 630 t.infoHash.SetNone() 631 if info.HasV1() { 632 cl.torrentsByShortHash[v1Hash] = t 633 t.infoHash.Set(v1Hash) 634 } 635 } 636 } else if t.infoHash.Ok && t.infoHashV2.Ok { 637 if v1Hash != t.infoHash.Value { 638 return errors.New("incorrect v1 infohash") 639 } 640 if v2Hash != t.infoHashV2.Value { 641 return errors.New("incorrect v2 infohash") 642 } 643 } else if !t.infoHash.Ok && t.infoHashV2.Ok { 644 if v2Hash != t.infoHashV2.Value { 645 return errors.New("incorrect v2 infohash") 646 } 647 if info.HasV1() { 648 t.infoHash.Set(v1Hash) 649 cl.torrentsByShortHash[v1Hash] = t 650 } 651 } else { 652 panic("no expected infohashes") 653 } 654 return nil 655 } 656 657 // Called when metadata for a torrent becomes available. 658 func (t *Torrent) setInfoBytesLocked(b []byte) (err error) { 659 var info metainfo.Info 660 err = bencode.Unmarshal(b, &info) 661 if err != nil { 662 err = fmt.Errorf("unmarshalling info bytes: %w", err) 663 return 664 } 665 err = t.hashInfoBytes(b, &info) 666 if err != nil { 667 return 668 } 669 t.metadataBytes = b 670 t.metadataCompletedChunks = nil 671 if t.info != nil { 672 return nil 673 } 674 err = t.setInfo(&info) 675 if err != nil { 676 return 677 } 678 t.onSetInfo() 679 return nil 680 } 681 682 // Used in tests. 683 func (t *Torrent) setInfoUnlocked(info *metainfo.Info) (err error) { 684 t.cl.lock() 685 defer t.cl.unlock() 686 err = t.setInfo(info) 687 if err != nil { 688 return 689 } 690 t.onSetInfo() 691 return 692 } 693 694 func (t *Torrent) haveAllMetadataPieces() bool { 695 if t.haveInfo() { 696 return true 697 } 698 if t.metadataCompletedChunks == nil { 699 return false 700 } 701 for _, have := range t.metadataCompletedChunks { 702 if !have { 703 return false 704 } 705 } 706 return true 707 } 708 709 // TODO: Propagate errors to disconnect peer. 710 func (t *Torrent) setMetadataSize(size int) (err error) { 711 if t.haveInfo() { 712 // We already know the correct metadata size. 713 return 714 } 715 if uint32(size) > maxMetadataSize { 716 return log.WithLevel(log.Warning, errors.New("bad size")) 717 } 718 if len(t.metadataBytes) == size { 719 return 720 } 721 t.metadataBytes = make([]byte, size) 722 t.metadataCompletedChunks = make([]bool, (size+(1<<14)-1)/(1<<14)) 723 t.metadataChanged.Broadcast() 724 for c := range t.conns { 725 c.requestPendingMetadata() 726 } 727 return 728 } 729 730 // Returns the best name for the torrent. This is either the name in the info dict, or a display 731 // name if the info isn't known yet. 732 func (t *Torrent) bestName() (_ g.Option[string]) { 733 t.nameMu.RLock() 734 defer t.nameMu.RUnlock() 735 if t.haveInfo() { 736 return g.Some(t.info.BestName()) 737 } 738 if t.displayName != "" { 739 return g.Some(t.displayName) 740 } 741 return 742 } 743 744 // The current working name for the torrent. Either the name in the info dict, or a display name 745 // given such as by the dn value in a magnet link, or "". 746 func (t *Torrent) name() string { 747 return t.bestName().UnwrapOr("infohash:" + t.canonicalShortInfohash().HexString()) 748 } 749 750 func (t *Torrent) pieceState(index pieceIndex) (ret PieceState) { 751 p := &t.pieces[index] 752 ret.Priority = p.effectivePriority() 753 ret.Completion = p.completion() 754 ret.QueuedForHash = p.queuedForHash() 755 ret.Hashing = p.hashing 756 ret.Checking = ret.QueuedForHash || ret.Hashing 757 ret.Marking = p.marking 758 if ret.Ok && !ret.Complete && t.piecePartiallyDownloaded(index) { 759 ret.Partial = true 760 } 761 if t.info.HasV2() && !p.hashV2.Ok && p.hasPieceLayer() { 762 ret.MissingPieceLayerHash = true 763 } 764 return 765 } 766 767 func (t *Torrent) metadataPieceSize(piece int) int { 768 return metadataPieceSize(len(t.metadataBytes), piece) 769 } 770 771 func (t *Torrent) newMetadataExtensionMessage(c *PeerConn, msgType pp.ExtendedMetadataRequestMsgType, piece int, data []byte) pp.Message { 772 return pp.Message{ 773 Type: pp.Extended, 774 ExtendedID: c.PeerExtensionIDs[pp.ExtensionNameMetadata], 775 ExtendedPayload: append(bencode.MustMarshal(pp.ExtendedMetadataRequestMsg{ 776 Piece: piece, 777 TotalSize: len(t.metadataBytes), 778 Type: msgType, 779 }), data...), 780 } 781 } 782 783 type pieceAvailabilityRun struct { 784 Count pieceIndex 785 Availability int 786 } 787 788 func (me pieceAvailabilityRun) String() string { 789 return fmt.Sprintf("%v(%v)", me.Count, me.Availability) 790 } 791 792 func (t *Torrent) pieceAvailabilityRuns() (ret []pieceAvailabilityRun) { 793 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) { 794 ret = append(ret, pieceAvailabilityRun{Availability: el.(int), Count: int(count)}) 795 }) 796 for i := range t.pieces { 797 rle.Append(t.pieces[i].availability(), 1) 798 } 799 rle.Flush() 800 return 801 } 802 803 func (t *Torrent) pieceAvailabilityFrequencies() (freqs []int) { 804 freqs = make([]int, t.numActivePeers()+1) 805 for i := range t.pieces { 806 freqs[t.piece(i).availability()]++ 807 } 808 return 809 } 810 811 func (t *Torrent) pieceStateRuns() (ret PieceStateRuns) { 812 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) { 813 ret = append(ret, PieceStateRun{ 814 PieceState: el.(PieceState), 815 Length: int(count), 816 }) 817 }) 818 for index := range t.pieces { 819 rle.Append(t.pieceState(index), 1) 820 } 821 rle.Flush() 822 return 823 } 824 825 // Produces a small string representing a PieceStateRun. 826 func (psr PieceStateRun) String() (ret string) { 827 ret = fmt.Sprintf("%d", psr.Length) 828 ret += func() string { 829 switch psr.Priority { 830 case PiecePriorityNext: 831 return "N" 832 case PiecePriorityNormal: 833 return "." 834 case PiecePriorityReadahead: 835 return "R" 836 case PiecePriorityNow: 837 return "!" 838 case PiecePriorityHigh: 839 return "H" 840 default: 841 return "" 842 } 843 }() 844 if psr.Hashing { 845 ret += "H" 846 } 847 if psr.QueuedForHash { 848 ret += "Q" 849 } 850 if psr.Marking { 851 ret += "M" 852 } 853 if psr.Partial { 854 ret += "P" 855 } 856 if psr.Ok && psr.Complete { 857 ret += "C" 858 } 859 if !psr.Ok { 860 ret += "?" 861 } 862 if psr.MissingPieceLayerHash { 863 ret += "h" 864 } 865 return 866 } 867 868 func (t *Torrent) writeStatus(w io.Writer) { 869 if t.infoHash.Ok { 870 fmt.Fprintf(w, "Infohash: %s\n", t.infoHash.Value.HexString()) 871 } 872 if t.infoHashV2.Ok { 873 fmt.Fprintf(w, "Infohash v2: %s\n", t.infoHashV2.Value.HexString()) 874 } 875 fmt.Fprintf(w, "Metadata length: %d\n", t.metadataSize()) 876 if !t.haveInfo() { 877 fmt.Fprintf(w, "Metadata have: ") 878 for _, h := range t.metadataCompletedChunks { 879 fmt.Fprintf(w, "%c", func() rune { 880 if h { 881 return 'H' 882 } else { 883 return '.' 884 } 885 }()) 886 } 887 fmt.Fprintln(w) 888 } 889 // Note this might be shared with other torrents. 890 fmt.Fprintf(w, "Piece request order length: %v\n", func() any { 891 pro := t.getPieceRequestOrder() 892 if pro == nil { 893 return nil 894 } 895 return pro.Len() 896 }()) 897 fmt.Fprintf(w, "Piece length: %s\n", 898 func() string { 899 if t.haveInfo() { 900 return fmt.Sprintf("%v (%v chunks)", 901 t.usualPieceSize(), 902 float64(t.usualPieceSize())/float64(t.chunkSize)) 903 } else { 904 return "no info" 905 } 906 }(), 907 ) 908 if t.info != nil { 909 fmt.Fprintf(w, "Num Pieces: %d (%d completed)\n", t.numPieces(), t.numPiecesCompleted()) 910 fmt.Fprintf(w, "Piece States: %s\n", t.pieceStateRuns()) 911 // Generates a huge, unhelpful listing when piece availability is very scattered. Prefer 912 // availability frequencies instead. 913 if false { 914 fmt.Fprintf(w, "Piece availability: %v\n", strings.Join(func() (ret []string) { 915 for _, run := range t.pieceAvailabilityRuns() { 916 ret = append(ret, run.String()) 917 } 918 return 919 }(), " ")) 920 } 921 fmt.Fprintf(w, "Piece availability frequency: %v\n", strings.Join( 922 func() (ret []string) { 923 for avail, freq := range t.pieceAvailabilityFrequencies() { 924 if freq == 0 { 925 continue 926 } 927 ret = append(ret, fmt.Sprintf("%v: %v", avail, freq)) 928 } 929 return 930 }(), 931 ", ")) 932 } 933 fmt.Fprintf(w, "Reader Pieces:") 934 t.forReaderOffsetPieces(func(begin, end pieceIndex) (again bool) { 935 fmt.Fprintf(w, " %d:%d", begin, end) 936 return true 937 }) 938 fmt.Fprintln(w) 939 940 fmt.Fprintf(w, "Enabled trackers:\n") 941 { 942 tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0) 943 fmt.Fprintf(tw, " URL\tExtra\n") 944 sortedTrackerAnnouncers := slices.SortedFunc( 945 maps.Values(t.trackerAnnouncers), 946 func(l, r torrentTrackerAnnouncer) int { 947 lu := l.URL() 948 ru := r.URL() 949 var luns, runs url.URL = *lu, *ru 950 luns.Scheme = "" 951 runs.Scheme = "" 952 var ml multiless.Computation 953 ml = multiless.EagerOrdered(ml, luns.String(), runs.String()) 954 ml = multiless.EagerOrdered(ml, lu.String(), ru.String()) 955 return ml.OrderingInt() 956 }, 957 ) 958 for _, ta := range sortedTrackerAnnouncers { 959 fmt.Fprintf(tw, " %q\t%v\n", ta.URL(), ta.statusLine()) 960 } 961 tw.Flush() 962 } 963 964 fmt.Fprintf(w, "DHT Announces: %d\n", t.numDHTAnnounces) 965 966 dumpStats(w, t.statsLocked()) 967 968 fmt.Fprintf(w, "webseeds:\n") 969 970 t.writePeerStatuses(w, func(yield func(*Peer) bool) { 971 for _, ws := range t.webSeeds { 972 if !yield(&ws.peer) { 973 return 974 } 975 } 976 }) 977 978 // Peers without priorities first, then those with. I'm undecided about how to order peers 979 // without priorities. 980 peerConns := slices.SortedFunc(maps.Keys(t.conns), func(l, r *PeerConn) int { 981 ml := multiless.New() 982 lpp := g.ResultFromTuple(l.peerPriority()).ToOption() 983 rpp := g.ResultFromTuple(r.peerPriority()).ToOption() 984 ml = ml.Bool(lpp.Ok, rpp.Ok) 985 ml = ml.Uint32(rpp.Value, lpp.Value) 986 return ml.OrderingInt() 987 }) 988 989 fmt.Fprintf(w, "%v peer conns:\n", len(peerConns)) 990 var peerIter iter.Seq[*Peer] = func(yield func(*Peer) bool) { 991 for _, pc := range peerConns { 992 if !yield(&pc.Peer) { 993 return 994 } 995 } 996 } 997 t.writePeerStatuses(w, peerIter) 998 } 999 1000 func (t *Torrent) writePeerStatuses(w io.Writer, peers iter.Seq[*Peer]) { 1001 var buf bytes.Buffer 1002 for c := range peers { 1003 fmt.Fprintf(w, "- ") 1004 buf.Reset() 1005 c.writeStatus(&buf) 1006 w.Write(bytes.TrimRight( 1007 bytes.ReplaceAll(buf.Bytes(), []byte("\n"), []byte("\n ")), 1008 " ")) 1009 } 1010 } 1011 1012 func (t *Torrent) haveInfo() bool { 1013 return t.info != nil 1014 } 1015 1016 // Returns a run-time generated MetaInfo that includes the info bytes and 1017 // announce-list as currently known to the client. 1018 func (t *Torrent) newMetaInfo() metainfo.MetaInfo { 1019 return metainfo.MetaInfo{ 1020 CreationDate: time.Now().Unix(), 1021 Comment: "dynamic metainfo from client", 1022 CreatedBy: "https://github.com/anacrolix/torrent", 1023 AnnounceList: t.announceList.Clone(), 1024 InfoBytes: func() []byte { 1025 if t.haveInfo() { 1026 return t.metadataBytes 1027 } else { 1028 return nil 1029 } 1030 }(), 1031 UrlList: func() []string { 1032 ret := make([]string, 0, len(t.webSeeds)) 1033 for url := range t.webSeeds { 1034 ret = append(ret, url.Value()) 1035 } 1036 return ret 1037 }(), 1038 PieceLayers: t.pieceLayers(), 1039 } 1040 } 1041 1042 // Returns a count of bytes that are not complete in storage, and not pending being written to 1043 // storage. This value is from the perspective of the download manager, and may not agree with the 1044 // actual state in storage. If you want read data synchronously you should use a Reader. See 1045 // https://github.com/anacrolix/torrent/issues/828. 1046 func (t *Torrent) BytesMissing() (n int64) { 1047 t.cl.rLock() 1048 n = t.bytesMissingLocked() 1049 t.cl.rUnlock() 1050 return 1051 } 1052 1053 func (t *Torrent) bytesMissingLocked() int64 { 1054 return t.bytesLeft() 1055 } 1056 1057 func iterFlipped(b *roaring.Bitmap, end uint64, cb func(uint32) bool) { 1058 roaring.Flip(b, 0, end).Iterate(cb) 1059 } 1060 1061 func (t *Torrent) bytesLeft() (left int64) { 1062 iterFlipped(&t._completedPieces, uint64(t.numPieces()), func(x uint32) bool { 1063 p := t.piece(pieceIndex(x)) 1064 left += int64(p.length() - p.numDirtyBytes()) 1065 return true 1066 }) 1067 return 1068 } 1069 1070 // Bytes left to give in tracker announces. 1071 func (t *Torrent) bytesLeftAnnounce() int64 { 1072 if t.haveInfo() { 1073 return t.bytesLeft() 1074 } else { 1075 return -1 1076 } 1077 } 1078 1079 func (t *Torrent) piecePartiallyDownloaded(piece pieceIndex) bool { 1080 if t.pieceComplete(piece) { 1081 return false 1082 } 1083 if t.pieceAllDirty(piece) { 1084 return false 1085 } 1086 return t.pieces[piece].hasDirtyChunks() 1087 } 1088 1089 func (t *Torrent) usualPieceSize() int { 1090 return int(t.info.PieceLength) 1091 } 1092 1093 func (t *Torrent) numPieces() pieceIndex { 1094 return t.info.NumPieces() 1095 } 1096 1097 func (t *Torrent) numPiecesCompleted() (num pieceIndex) { 1098 return pieceIndex(t._completedPieces.GetCardinality()) 1099 } 1100 1101 func (t *Torrent) close(wg *sync.WaitGroup) { 1102 // Should only be called from the Client. 1103 panicif.False(t.closed.Set()) 1104 t.eachShortInfohash(func(short [20]byte) { 1105 delete(t.cl.torrentsByShortHash, short) 1106 }) 1107 t.deferUpdateRegularTrackerAnnouncing() 1108 t.closedCtxCancel(errTorrentClosed) 1109 t.getInfoCtxCancel(errTorrentClosed) 1110 for _, f := range t.onClose { 1111 f() 1112 } 1113 if t.storage != nil { 1114 wg.Add(1) 1115 go func() { 1116 defer wg.Done() 1117 t.storageLock.Lock() 1118 defer t.storageLock.Unlock() 1119 if f := t.storage.Close; f != nil { 1120 err1 := f() 1121 if err1 != nil { 1122 t.logger.WithDefaultLevel(log.Warning).Printf("error closing storage: %v", err1) 1123 } 1124 } 1125 }() 1126 } 1127 t.iterPeers(func(p *Peer) { 1128 p.close() 1129 }) 1130 if t.storage != nil { 1131 t.deletePieceRequestOrder() 1132 } 1133 t.assertAllPiecesRelativeAvailabilityZero() 1134 t.pex.Reset() 1135 t.cl.event.Broadcast() 1136 t.pieceStateChanges.Close() 1137 t.updateWantPeersEvent() 1138 g.MustDelete(t.cl.torrents, t) 1139 // This doesn't work yet because requests remove themselves after they close, and we don't 1140 // remove them synchronously. 1141 if false { 1142 if len(t.cl.torrents) == 0 { 1143 panicif.NotZero(len(t.cl.activeWebseedRequests)) 1144 } 1145 } 1146 return 1147 } 1148 1149 func (t *Torrent) assertAllPiecesRelativeAvailabilityZero() { 1150 for i := range t.pieces { 1151 p := t.piece(i) 1152 if p.relativeAvailability != 0 { 1153 panic(fmt.Sprintf("piece %v has relative availability %v", i, p.relativeAvailability)) 1154 } 1155 } 1156 } 1157 1158 // The whole-torrent first byte position. 1159 func (t *Torrent) requestIndexBegin(r RequestIndex) int64 { 1160 return t.requestOffset(t.requestIndexToRequest(r)) 1161 } 1162 1163 func (t *Torrent) requestIndexEnd(r RequestIndex) int64 { 1164 req := t.requestIndexToRequest(r) 1165 return t.requestOffset(req) + int64(req.Length) 1166 } 1167 1168 func (t *Torrent) requestOffset(r Request) int64 { 1169 return torrentRequestOffset(t.length(), int64(t.usualPieceSize()), r) 1170 } 1171 1172 // Return the request that would include the given offset into the torrent data. Returns !ok if 1173 // there is no such request. 1174 func (t *Torrent) offsetRequest(off int64) (req Request, ok bool) { 1175 return torrentOffsetRequest(t.length(), t.info.PieceLength, int64(t.chunkSize), off) 1176 } 1177 1178 func (t *Torrent) getRequestIndexContainingOffset(off int64) RequestIndex { 1179 req, ok := t.offsetRequest(off) 1180 panicif.False(ok) 1181 return t.requestIndexFromRequest(req) 1182 } 1183 1184 func (t *Torrent) writeChunk(piece int, begin int64, data []byte) (err error) { 1185 n, err := t.piece(piece).Storage().WriteAt(data, begin) 1186 if err == nil && n != len(data) { 1187 err = io.ErrShortWrite 1188 } 1189 return err 1190 } 1191 1192 func (t *Torrent) bitfield() (bf []bool) { 1193 bf = make([]bool, t.numPieces()) 1194 t._completedPieces.Iterate(func(piece uint32) (again bool) { 1195 bf[piece] = true 1196 return true 1197 }) 1198 return 1199 } 1200 1201 func (t *Torrent) pieceNumChunks(piece pieceIndex) chunkIndexType { 1202 return chunkIndexType(intCeilDiv(t.pieceLength(piece), t.chunkSize)) 1203 } 1204 1205 func (t *Torrent) chunksPerRegularPiece() chunkIndexType { 1206 return t._chunksPerRegularPiece 1207 } 1208 1209 func (t *Torrent) pendAllChunkSpecs(pieceIndex pieceIndex) { 1210 t.dirtyChunks.RemoveRange( 1211 uint64(t.pieceRequestIndexBegin(pieceIndex)), 1212 uint64(t.pieceRequestIndexBegin(pieceIndex+1))) 1213 } 1214 1215 func (t *Torrent) pieceLength(piece pieceIndex) pp.Integer { 1216 if t.info.PieceLength == 0 { 1217 // There will be no variance amongst pieces. Only pain. 1218 return 0 1219 } 1220 if t.info.FilesArePieceAligned() { 1221 p := t.piece(piece) 1222 file := p.mustGetOnlyFile() 1223 if piece == file.EndPieceIndex()-1 { 1224 return pp.Integer(file.length - (p.torrentBeginOffset() - file.offset)) 1225 } 1226 return pp.Integer(t.usualPieceSize()) 1227 } 1228 if piece == t.numPieces()-1 { 1229 ret := pp.Integer(t.length() % t.info.PieceLength) 1230 if ret != 0 { 1231 return ret 1232 } 1233 } 1234 return pp.Integer(t.info.PieceLength) 1235 } 1236 1237 func (t *Torrent) getBlockCheckingWriterForPiece(piece pieceIndex) blockCheckingWriter { 1238 return blockCheckingWriter{ 1239 cache: &t.smartBanCache, 1240 requestIndex: t.pieceRequestIndexBegin(piece), 1241 chunkBuffer: t.getChunkBuffer(), 1242 } 1243 } 1244 1245 func (t *Torrent) hasSmartbanDataForPiece(piece pieceIndex) bool { 1246 return t.smartBanCache.HasPeerForBlocks(iterRange(t.pieceRequestIndexBegin(piece), t.pieceRequestIndexBegin(piece+1))) 1247 } 1248 1249 func (t *Torrent) countBytesHashed(n int64) { 1250 t.counters.BytesHashed.Add(n) 1251 t.cl.counters.BytesHashed.Add(n) 1252 } 1253 1254 func (t *Torrent) hashPiece(piece pieceIndex) ( 1255 correct bool, 1256 // These are peers that sent us blocks that differ from what we hash here. TODO: Track Peer not 1257 // bannable addr for peer types that are rebuked differently. 1258 differingPeers map[bannableAddr]struct{}, 1259 err error, 1260 ) { 1261 p := t.piece(piece) 1262 p.waitNoPendingWrites() 1263 storagePiece := p.Storage() 1264 1265 if p.hash != nil { 1266 // Does the backend want to do its own hashing? 1267 if i, ok := storagePiece.PieceImpl.(storage.SelfHashing); ok { 1268 var sum metainfo.Hash 1269 // log.Printf("A piece decided to self-hash: %d", piece) 1270 sum, err = i.SelfHash() 1271 if err == nil { 1272 t.countBytesHashed(int64(p.length())) 1273 } 1274 correct = sum == *p.hash 1275 // Can't do smart banning without reading the piece. The smartBanCache is still cleared 1276 // in finishHash regardless. 1277 return 1278 } 1279 h := pieceHash.New() 1280 differingPeers, err = t.hashPieceWithSpecificHash(piece, h) 1281 // For a hybrid torrent, we work with the v2 files, but if we use a v1 hash, we can assume 1282 // that the pieces are padded with zeroes. 1283 if t.info.FilesArePieceAligned() { 1284 paddingLen := p.Info().V1Length() - p.Info().Length() 1285 written, err := io.CopyN(h, zeroReader, paddingLen) 1286 if written != paddingLen { 1287 panic(fmt.Sprintf( 1288 "piece %v: wrote %v bytes of padding, expected %v, error: %v", 1289 piece, 1290 written, 1291 paddingLen, 1292 err, 1293 )) 1294 } 1295 t.countBytesHashed(written) 1296 } 1297 var sum [20]byte 1298 sumExactly(sum[:], h.Sum) 1299 correct = sum == *p.hash 1300 } else if p.hashV2.Ok { 1301 h := merkle.NewHash() 1302 differingPeers, err = t.hashPieceWithSpecificHash(piece, h) 1303 var sum [32]byte 1304 // What about the final piece in a torrent? From BEP 52: "The layer is chosen so that one 1305 // hash covers piece length bytes". Note that if a piece doesn't have a hash in piece layers 1306 // it's because it's not larger than the piece length. 1307 sumExactly(sum[:], func(b []byte) []byte { 1308 return h.SumMinLength(b, int(t.info.PieceLength)) 1309 }) 1310 correct = sum == p.hashV2.Value 1311 } else { 1312 expected := p.mustGetOnlyFile().piecesRoot.Unwrap() 1313 h := merkle.NewHash() 1314 differingPeers, err = t.hashPieceWithSpecificHash(piece, h) 1315 var sum [32]byte 1316 // This is *not* padded to piece length. 1317 sumExactly(sum[:], h.Sum) 1318 correct = sum == expected 1319 } 1320 return 1321 } 1322 1323 func sumExactly(dst []byte, sum func(b []byte) []byte) { 1324 n := len(sum(dst[:0])) 1325 if n != len(dst) { 1326 panic(n) 1327 } 1328 } 1329 1330 func (t *Torrent) hashPieceWithSpecificHash(piece pieceIndex, h hash.Hash) ( 1331 // These are peers that sent us blocks that differ from what we hash here. 1332 differingPeers map[bannableAddr]struct{}, 1333 err error, 1334 ) { 1335 var w io.Writer = h 1336 if t.hasSmartbanDataForPiece(piece) { 1337 smartBanWriter := t.getBlockCheckingWriterForPiece(piece) 1338 defer func() { 1339 t.putChunkBuffer(smartBanWriter.chunkBuffer) 1340 smartBanWriter.chunkBuffer = nil 1341 }() 1342 defer func() { 1343 if err != nil { 1344 // Skip smart banning since we can't blame them for storage issues. A short write would 1345 // ban peers for all recorded blocks that weren't just written. 1346 return 1347 } 1348 // Flush now, even though we may not have finished writing to the piece hash, since 1349 // further data is padding only and should not have come from peers. 1350 smartBanWriter.Flush() 1351 differingPeers = smartBanWriter.badPeers 1352 }() 1353 w = io.MultiWriter(h, &smartBanWriter) 1354 } 1355 p := t.piece(piece) 1356 storagePiece := p.Storage() 1357 var written int64 1358 written, err = storagePiece.WriteTo(w) 1359 t.countBytesHashed(written) 1360 return 1361 } 1362 1363 func (t *Torrent) haveAnyPieces() bool { 1364 return !t._completedPieces.IsEmpty() 1365 } 1366 1367 func (t *Torrent) haveAllPieces() bool { 1368 if !t.haveInfo() { 1369 return false 1370 } 1371 return t._completedPieces.GetCardinality() == bitmap.BitRange(t.numPieces()) 1372 } 1373 1374 func (t *Torrent) havePiece(index pieceIndex) bool { 1375 return t.haveInfo() && t.pieceComplete(index) 1376 } 1377 1378 func (t *Torrent) maybeDropMutuallyCompletePeer( 1379 // I'm not sure about taking peer here, not all peer implementations actually drop. Maybe that's 1380 // okay? 1381 p *PeerConn, 1382 ) { 1383 if !t.cl.config.DropMutuallyCompletePeers { 1384 return 1385 } 1386 if !t.haveAllPieces() { 1387 return 1388 } 1389 if all, known := p.peerHasAllPieces(); !(known && all) { 1390 return 1391 } 1392 if p.useful() { 1393 return 1394 } 1395 p.logger.Levelf(log.Debug, "is mutually complete; dropping") 1396 p.drop() 1397 } 1398 1399 func (t *Torrent) haveRequestIndexChunk(reqIndex RequestIndex) bool { 1400 return t.haveChunk(t.requestIndexToRequest(reqIndex)) 1401 } 1402 1403 func (t *Torrent) haveChunk(r Request) bool { 1404 if !t.haveInfo() { 1405 return false 1406 } 1407 if t.pieceComplete(pieceIndex(r.Index)) { 1408 return true 1409 } 1410 p := t.piece(int(r.Index)) 1411 return !p.pendingChunk(r.ChunkSpec, t.chunkSize) 1412 } 1413 1414 func chunkIndexFromChunkSpec(cs ChunkSpec, chunkSize pp.Integer) chunkIndexType { 1415 return chunkIndexType(cs.Begin / chunkSize) 1416 } 1417 1418 func (t *Torrent) wantPieceIndex(index pieceIndex) bool { 1419 return !t._pendingPieces.IsEmpty() && t._pendingPieces.Contains(uint32(index)) 1420 } 1421 1422 // A pool of []*PeerConn, to reduce allocations in functions that need to index or sort Torrent 1423 // conns (which is a map). 1424 var peerConnSlices sync.Pool 1425 1426 func getPeerConnSlice(cap int) []*PeerConn { 1427 getInterface := peerConnSlices.Get() 1428 if getInterface == nil { 1429 return make([]*PeerConn, 0, cap) 1430 } else { 1431 return getInterface.([]*PeerConn)[:0] 1432 } 1433 } 1434 1435 // Calls the given function with a slice of unclosed conns. It uses a pool to reduce allocations as 1436 // this is a frequent occurrence. 1437 func (t *Torrent) withUnclosedConns(f func([]*PeerConn)) { 1438 sl := t.appendUnclosedConns(getPeerConnSlice(len(t.conns))) 1439 f(sl) 1440 peerConnSlices.Put(sl) 1441 } 1442 1443 func (t *Torrent) worstBadConnFromSlice(opts worseConnLensOpts, sl []*PeerConn) *PeerConn { 1444 wcs := worseConnSlice{conns: sl} 1445 wcs.initKeys(opts) 1446 heap.Init(&wcs) 1447 for wcs.Len() != 0 { 1448 c := heap.Pop(&wcs).(*PeerConn) 1449 if opts.incomingIsBad && !c.outgoing { 1450 return c 1451 } 1452 if opts.outgoingIsBad && c.outgoing { 1453 return c 1454 } 1455 if c._stats.ChunksReadWasted.Int64() >= 6 && c._stats.ChunksReadWasted.Int64() > c._stats.ChunksReadUseful.Int64() { 1456 return c 1457 } 1458 // If the connection is in the worst half of the established 1459 // connection quota and is older than a minute. 1460 if wcs.Len() >= (t.maxEstablishedConns+1)/2 { 1461 // Give connections 1 minute to prove themselves. 1462 if time.Since(c.completedHandshake) > time.Minute { 1463 return c 1464 } 1465 } 1466 } 1467 return nil 1468 } 1469 1470 // The worst connection is one that hasn't been sent, or sent anything useful for the longest. A bad 1471 // connection is one that usually sends us unwanted pieces, or has been in the worse half of the 1472 // established connections for more than a minute. This is O(n log n). If there was a way to not 1473 // consider the position of a conn relative to the total number, it could be reduced to O(n). 1474 func (t *Torrent) worstBadConn(opts worseConnLensOpts) (ret *PeerConn) { 1475 t.withUnclosedConns(func(ucs []*PeerConn) { 1476 ret = t.worstBadConnFromSlice(opts, ucs) 1477 }) 1478 return 1479 } 1480 1481 type PieceStateChange struct { 1482 Index int 1483 PieceState 1484 } 1485 1486 func (t *Torrent) deferPublishPieceStateChange(piece pieceIndex) { 1487 p := t.piece(piece) 1488 t.cl.unlockHandlers.changedPieceStates[p] = struct{}{} 1489 } 1490 1491 func (t *Torrent) pieceNumPendingChunks(piece pieceIndex) pp.Integer { 1492 if t.pieceComplete(piece) { 1493 return 0 1494 } 1495 return pp.Integer(t.pieceNumChunks(piece) - t.pieces[piece].numDirtyChunks()) 1496 } 1497 1498 func (t *Torrent) pieceAllDirty(piece pieceIndex) bool { 1499 return t.pieces[piece].allChunksDirty() 1500 } 1501 1502 func (t *Torrent) readersChanged() { 1503 t.updateReaderPieces() 1504 t.updateAllPiecePriorities("Torrent.readersChanged") 1505 } 1506 1507 func (t *Torrent) updateReaderPieces() { 1508 t._readerNowPieces, t._readerReadaheadPieces = t.readerPiecePriorities() 1509 } 1510 1511 func (t *Torrent) readerPosChanged(from, to pieceRange) { 1512 if from == to { 1513 return 1514 } 1515 t.updateReaderPieces() 1516 // Order the ranges, high and low. 1517 l, h := from, to 1518 if l.begin > h.begin { 1519 l, h = h, l 1520 } 1521 if l.end < h.begin { 1522 // Two distinct ranges. 1523 t.updatePiecePriorities(l.begin, l.end, "Torrent.readerPosChanged") 1524 t.updatePiecePriorities(h.begin, h.end, "Torrent.readerPosChanged") 1525 } else { 1526 // Ranges overlap. 1527 t.updatePiecePriorities(l.begin, max(l.end, h.end), "Torrent.readerPosChanged") 1528 } 1529 } 1530 1531 func (t *Torrent) maybeNewConns() { 1532 // Tickle the accept routine. 1533 t.cl.event.Broadcast() 1534 t.openNewConns() 1535 } 1536 1537 func (t *Torrent) updatePeerRequestsForPiece(piece pieceIndex, reason updateRequestReason) { 1538 if !t._pendingPieces.Contains(uint32(piece)) { 1539 // Non-pending pieces are usually cancelled more synchronously. 1540 return 1541 } 1542 for c := range t.conns { 1543 // This is a lot of effort to avoid using continue... 1544 func() { 1545 if !c.isLowOnRequests() { 1546 return 1547 } 1548 if !c.peerHasPiece(piece) { 1549 return 1550 } 1551 if c.requestState.Interested && c.peerChoking && !c.peerAllowedFast.Contains(piece) { 1552 return 1553 } 1554 c.onNeedUpdateRequests(reason) 1555 }() 1556 } 1557 } 1558 1559 // Stuff to do when pending pieces changes. We avoid running this in some benchmarks. 1560 func (t *Torrent) onPiecePendingTriggers(piece pieceIndex) { 1561 t.maybeNewConns() 1562 t.deferPublishPieceStateChange(piece) 1563 t.deferUpdateRegularTrackerAnnouncing() 1564 } 1565 1566 // Pending pieces is an old bitmap of stuff we want. I think it's more nuanced than that now with 1567 // storage caps and cross-Torrent priorities. 1568 func (t *Torrent) updatePendingPieces(piece pieceIndex) bool { 1569 p := t.piece(piece) 1570 newPrio := p.effectivePriority() 1571 if newPrio == PiecePriorityNone && p.haveHash() { 1572 return t._pendingPieces.CheckedRemove(uint32(piece)) 1573 } else { 1574 return t._pendingPieces.CheckedAdd(uint32(piece)) 1575 } 1576 } 1577 1578 // Maybe return whether peer requests should be updated so reason doesn't have to be passed? 1579 func (t *Torrent) updatePiecePriorityNoRequests(piece pieceIndex) (updateRequests bool) { 1580 // I think because the piece request order gets removed at close. 1581 if !t.closed.IsSet() { 1582 // It would be possible to filter on pure-priority changes here to avoid churning the piece 1583 // request order. If there's a storage cap then it's possible that pieces are moved around 1584 // so that new requests can be issued. 1585 updateRequests = t.updatePieceRequestOrderPiece(piece) && t.hasStorageCap() 1586 } 1587 if t.updatePendingPieces(piece) { 1588 if !t.disableTriggers { 1589 // This used to happen after updating requests, but I don't think the order matters. 1590 t.onPiecePendingTriggers(piece) 1591 } 1592 // Something was added or removed. 1593 updateRequests = true 1594 } 1595 return 1596 } 1597 1598 func (t *Torrent) updatePiecePriority(piece pieceIndex, reason updateRequestReason) { 1599 //t.logger.Slogger().Debug("updatePiecePriority", "piece", piece, "reason", reason) 1600 if t.updatePiecePriorityNoRequests(piece) && !t.disableTriggers { 1601 t.updatePeerRequestsForPiece(piece, reason) 1602 } 1603 } 1604 1605 func (t *Torrent) updateAllPiecePriorities(reason updateRequestReason) { 1606 if !t.haveInfo() { 1607 return 1608 } 1609 t.updatePiecePriorities(0, t.numPieces(), reason) 1610 } 1611 1612 // Update all piece priorities in one hit. This function should have the same output as 1613 // updatePiecePriority, but across all pieces. 1614 func (t *Torrent) updatePiecePriorities(begin, end pieceIndex, reason updateRequestReason) { 1615 t.logger.Slogger().Debug("updating piece priorities", "begin", begin, "end", end) 1616 for i := begin; i < end; i++ { 1617 t.updatePiecePriority(i, reason) 1618 } 1619 t.logPieceRequestOrder() 1620 } 1621 1622 // Helps debug piece priorities for capped storage. 1623 func (t *Torrent) logPieceRequestOrder() { 1624 level := slog.LevelDebug 1625 logger := t.slogger() 1626 if !logger.Enabled(context.Background(), level) { 1627 return 1628 } 1629 pro := t.getPieceRequestOrder() 1630 // This might require some optimization around Record to avoid performance issues when 1631 // benchmarking. 1632 if false { 1633 if pro != nil { 1634 for item := range pro.Iter { 1635 t.slogger().Debug("piece request order item", 1636 "infohash", item.Key.InfoHash, 1637 "piece", item.Key.Index, 1638 "state", item.State) 1639 } 1640 } 1641 } 1642 } 1643 1644 // Returns the range of pieces [begin, end) that contains the extent of bytes. 1645 func (t *Torrent) byteRegionPieces(off, size int64) (begin, end pieceIndex) { 1646 if off >= t.length() { 1647 return 1648 } 1649 if off < 0 { 1650 size += off 1651 off = 0 1652 } 1653 if size <= 0 { 1654 return 1655 } 1656 begin = pieceIndex(off / t.info.PieceLength) 1657 end = pieceIndex((off + size + t.info.PieceLength - 1) / t.info.PieceLength) 1658 if end > t.info.NumPieces() { 1659 end = t.info.NumPieces() 1660 } 1661 return 1662 } 1663 1664 // Returns true if all iterations complete without breaking. Returns the read regions for all 1665 // readers. The reader regions should not be merged as some callers depend on this method to 1666 // enumerate readers. 1667 func (t *Torrent) forReaderOffsetPieces(f func(begin, end pieceIndex) (more bool)) (all bool) { 1668 for r := range t.readers { 1669 p := r.pieces 1670 if p.begin >= p.end { 1671 continue 1672 } 1673 if !f(p.begin, p.end) { 1674 return false 1675 } 1676 } 1677 return true 1678 } 1679 1680 func (t *Torrent) pendRequest(req RequestIndex) { 1681 t.piece(t.pieceIndexOfRequestIndex(req)).pendChunkIndex(req % t.chunksPerRegularPiece()) 1682 } 1683 1684 func (t *Torrent) pieceCompletionChanged(piece pieceIndex, reason updateRequestReason) { 1685 t.cl.event.Broadcast() 1686 if t.pieceComplete(piece) { 1687 t.onPieceCompleted(piece) 1688 } else { 1689 t.onIncompletePiece(piece) 1690 } 1691 t.updatePiecePriority(piece, reason) 1692 } 1693 1694 func (t *Torrent) numReceivedConns() (ret int) { 1695 for c := range t.conns { 1696 if c.Discovery == PeerSourceIncoming { 1697 ret++ 1698 } 1699 } 1700 return 1701 } 1702 1703 func (t *Torrent) numOutgoingConns() (ret int) { 1704 for c := range t.conns { 1705 if c.outgoing { 1706 ret++ 1707 } 1708 } 1709 return 1710 } 1711 1712 func (t *Torrent) maxHalfOpen() int { 1713 // Note that if we somehow exceed the maximum established conns, we want 1714 // the negative value to have an effect. 1715 establishedHeadroom := int64(t.maxEstablishedConns - len(t.conns)) 1716 extraIncoming := int64(t.numReceivedConns() - t.maxEstablishedConns/2) 1717 // We want to allow some experimentation with new peers, and to try to 1718 // upset an oversupply of received connections. 1719 return int(min( 1720 max(5, extraIncoming)+establishedHeadroom, 1721 int64(t.cl.config.HalfOpenConnsPerTorrent), 1722 )) 1723 } 1724 1725 func (t *Torrent) openNewConns() (initiated int) { 1726 if len(t.cl.dialers) == 0 { 1727 return 1728 } 1729 if !t.wantOutgoingConns() { 1730 return 1731 } 1732 numPeers := t.peers.Len() 1733 for numPeers != 0 { 1734 if len(t.halfOpen) >= t.maxHalfOpen() { 1735 return 1736 } 1737 if t.cl.numHalfOpen >= t.cl.config.TotalHalfOpenConns { 1738 return 1739 } 1740 p := t.peers.PopMax() 1741 numPeers-- 1742 if numPeers == t.cl.config.TorrentPeersLowWater { 1743 t.deferUpdateRegularTrackerAnnouncing() 1744 } 1745 opts := outgoingConnOpts{ 1746 peerInfo: p, 1747 t: t, 1748 requireRendezvous: false, 1749 skipHolepunchRendezvous: false, 1750 receivedHolepunchConnect: false, 1751 HeaderObfuscationPolicy: t.cl.config.HeaderObfuscationPolicy, 1752 } 1753 initiateConn(opts, false) 1754 initiated++ 1755 if t.cl.check.Try() { 1756 panicif.NotEq(numPeers, t.peers.Len()) 1757 } 1758 } 1759 return 1760 } 1761 1762 func (t *Torrent) setPieceCompletion(piece pieceIndex, uncached g.Option[bool]) { 1763 changed := t.setCachedPieceCompletion(piece, uncached) 1764 t.afterSetPieceCompletion(piece, changed) 1765 } 1766 1767 func (t *Torrent) setPieceCompletionFromStorage(piece pieceIndex) bool { 1768 changed := t.setCachedPieceCompletionFromStorage(piece) 1769 t.afterSetPieceCompletion(piece, changed) 1770 return changed 1771 } 1772 1773 func (t *Torrent) setInitialPieceCompletionFromStorage(piece pieceIndex) { 1774 t.setCachedPieceCompletionFromStorage(piece) 1775 t.afterSetPieceCompletion(piece, true) 1776 } 1777 1778 // Sets the cached piece completion directly from storage. 1779 func (t *Torrent) setCachedPieceCompletionFromStorage(piece pieceIndex) bool { 1780 uncached := t.pieceCompleteUncached(piece) 1781 if uncached.Err != nil { 1782 t.slogger().Error("error getting piece completion", "err", uncached.Err) 1783 t.disallowDataDownloadLocked() 1784 } 1785 return t.setCachedPieceCompletion(piece, g.OptionFromTuple(uncached.Complete, uncached.Ok)) 1786 } 1787 1788 // Returns true if the value was changed. 1789 func (t *Torrent) setCachedPieceCompletion(piece int, uncached g.Option[bool]) bool { 1790 p := t.piece(piece) 1791 // TODO: Here we should probably be storing Option[bool] for completion and filtering out 1792 // errors. 1793 cached := p.completion() 1794 cachedOpt := g.OptionFromTuple(cached.Complete, cached.Ok) 1795 changed := cachedOpt != uncached 1796 if !p.storageCompletionHasBeenOk && uncached.Ok && !uncached.Value { 1797 t.sawInitiallyIncompleteData = true 1798 // TODO: Possibly update other types of trackers too? 1799 t.deferUpdateRegularTrackerAnnouncing() 1800 } 1801 p.storageCompletionOk = uncached.Ok 1802 if !p.storageCompletionHasBeenOk { 1803 p.storageCompletionHasBeenOk = p.storageCompletionOk 1804 } 1805 x := uint32(piece) 1806 if uncached.Ok && uncached.Value { 1807 if t._completedPieces.CheckedAdd(x) { 1808 // This is missing conditions... do we care? 1809 if t.haveAllPieces() { 1810 // We may be able to send Completed event. 1811 t.deferUpdateRegularTrackerAnnouncing() 1812 } 1813 } 1814 } else { 1815 t._completedPieces.Remove(x) 1816 } 1817 return changed 1818 1819 } 1820 1821 // Pulls piece completion state from storage and performs any state updates if it changes. 1822 func (t *Torrent) updatePieceCompletion(piece pieceIndex) bool { 1823 return t.setPieceCompletionFromStorage(piece) 1824 } 1825 1826 // Pulls piece completion state from storage and performs any state updates if it changes. 1827 func (t *Torrent) afterSetPieceCompletion(piece pieceIndex, changed bool) { 1828 p := t.piece(piece) 1829 cmpl := p.completion() 1830 complete := cmpl.Ok && cmpl.Complete 1831 p.t.updatePieceRequestOrderPiece(piece) 1832 t.deferUpdateComplete() 1833 if complete && len(p.dirtiers) != 0 { 1834 t.logger.Printf("marked piece %v complete but still has dirtiers", piece) 1835 } 1836 if changed { 1837 t.pieceCompletionChanged(piece, "Torrent.updatePieceCompletion") 1838 } 1839 if complete { 1840 t.openNewConns() 1841 } 1842 } 1843 1844 // Non-blocking read. Client lock is not required. 1845 func (t *Torrent) readAt(b []byte, off int64) (n int, err error) { 1846 r := t.storageReader() 1847 n, err = r.ReadAt(b, off) 1848 panicif.Err(r.Close()) 1849 return 1850 } 1851 1852 // Returns an error if the metadata was completed, but couldn't be set for some reason. Blame it on 1853 // the last peer to contribute. TODO: Actually we shouldn't blame peers for failure to open storage 1854 // etc. Also we should probably cached metadata pieces per-Peer, to isolate failure appropriately. 1855 func (t *Torrent) maybeCompleteMetadata() error { 1856 if t.haveInfo() { 1857 // Nothing to do. 1858 return nil 1859 } 1860 if !t.haveAllMetadataPieces() { 1861 // Don't have enough metadata pieces. 1862 return nil 1863 } 1864 err := t.setInfoBytesLocked(t.metadataBytes) 1865 if err != nil { 1866 t.invalidateMetadata() 1867 return fmt.Errorf("error setting info bytes: %w", err) 1868 } 1869 if t.cl.config.Debug { 1870 t.logger.Printf("%s: got metadata from peers", t) 1871 } 1872 return nil 1873 } 1874 1875 func (t *Torrent) readerPiecePriorities() (now, readahead bitmap.Bitmap) { 1876 t.forReaderOffsetPieces(func(begin, end pieceIndex) bool { 1877 if end > begin { 1878 now.Add(bitmap.BitIndex(begin)) 1879 readahead.AddRange(bitmap.BitRange(begin)+1, bitmap.BitRange(end)) 1880 } 1881 return true 1882 }) 1883 return 1884 } 1885 1886 func (t *Torrent) needData() bool { 1887 if t.closed.IsSet() { 1888 return false 1889 } 1890 if !t.haveInfo() { 1891 return true 1892 } 1893 t.checkPendingPiecesMatchesRequestOrder() 1894 return !t._pendingPieces.IsEmpty() 1895 } 1896 1897 func appendMissingStrings(old, new []string) (ret []string) { 1898 ret = old 1899 new: 1900 for _, n := range new { 1901 for _, o := range old { 1902 if o == n { 1903 continue new 1904 } 1905 } 1906 ret = append(ret, n) 1907 } 1908 return 1909 } 1910 1911 func appendMissingTrackerTiers(existing [][]string, minNumTiers int) (ret [][]string) { 1912 ret = existing 1913 for minNumTiers > len(ret) { 1914 ret = append(ret, nil) 1915 } 1916 return 1917 } 1918 1919 func (t *Torrent) addTrackers(announceList [][]string) { 1920 if t.isDropped() { 1921 // Can't alter dropped Torrent because it may have skipped registering announce states with 1922 // the announce dispatcher. 1923 return 1924 } 1925 fullAnnounceList := &t.announceList 1926 t.announceList = appendMissingTrackerTiers(*fullAnnounceList, len(announceList)) 1927 for tierIndex, trackerURLs := range announceList { 1928 (*fullAnnounceList)[tierIndex] = appendMissingStrings((*fullAnnounceList)[tierIndex], trackerURLs) 1929 } 1930 t.startMissingTrackerScrapers() 1931 t.updateWantPeersEvent() 1932 } 1933 1934 func (t *Torrent) modifyTrackers(announceList [][]string) { 1935 var workers errgroup.Group 1936 for _, v := range t.trackerAnnouncers { 1937 workers.Go(func() error { 1938 v.Stop() 1939 return nil 1940 }) 1941 } 1942 workers.Wait() 1943 t.announceList = nil 1944 t.addTrackers(announceList) 1945 } 1946 1947 // Don't call this before the info is available. 1948 func (t *Torrent) bytesCompleted() int64 { 1949 if !t.haveInfo() { 1950 return 0 1951 } 1952 return t.length() - t.bytesLeft() 1953 } 1954 1955 func (t *Torrent) SetInfoBytes(b []byte) (err error) { 1956 t.cl.lock() 1957 defer t.cl.unlock() 1958 err = t.getClosedErr() 1959 if err != nil { 1960 return 1961 } 1962 return t.setInfoBytesLocked(b) 1963 } 1964 1965 // Returns true if connection is removed from torrent.Conns. 1966 func (t *Torrent) deletePeerConn(c *PeerConn) (ret bool) { 1967 if !c.closed.IsSet() { 1968 panic("connection is not closed") 1969 // There are behaviours prevented by the closed state that will fail 1970 // if the connection has been deleted. 1971 } 1972 _, ret = t.conns[c] 1973 delete(t.conns, c) 1974 // Avoid adding a drop event more than once. Probably we should track whether we've generated 1975 // the drop event against the PexConnState instead. 1976 if ret { 1977 if !t.cl.config.DisablePEX { 1978 t.pex.Drop(c) 1979 } 1980 } 1981 torrent.Add("deleted connections", 1) 1982 c.deleteAllRequests("Torrent.deletePeerConn") 1983 if len(t.conns) == 0 { 1984 panicif.NotZero(len(t.requestState)) 1985 } 1986 t.assertPendingRequests() 1987 if t.numActivePeers() == 0 && len(t.connsWithAllPieces) != 0 { 1988 panic(t.connsWithAllPieces) 1989 } 1990 return 1991 } 1992 1993 func (t *Torrent) decPeerPieceAvailability(p *Peer) { 1994 if t.deleteConnWithAllPieces(p) { 1995 return 1996 } 1997 if !t.haveInfo() { 1998 return 1999 } 2000 p.peerPieces().Iterate(func(i uint32) bool { 2001 p.t.decPieceAvailability(pieceIndex(i)) 2002 return true 2003 }) 2004 } 2005 2006 func (t *Torrent) assertPendingRequests() { 2007 if !check.Enabled { 2008 return 2009 } 2010 // var actual pendingRequests 2011 // if t.haveInfo() { 2012 // actual.m = make([]int, t.numChunks()) 2013 // } 2014 // t.iterPeers(func(p *Peer) { 2015 // p.requestState.Requests.Iterate(func(x uint32) bool { 2016 // actual.Inc(x) 2017 // return true 2018 // }) 2019 // }) 2020 // diff := cmp.Diff(actual.m, t.pendingRequests.m) 2021 // if diff != "" { 2022 // panic(diff) 2023 // } 2024 } 2025 2026 func (t *Torrent) dropConnection(c *PeerConn) { 2027 t.cl.event.Broadcast() 2028 c.close() 2029 2030 for _, cb := range c.callbacks.StatusUpdated { 2031 cb(StatusUpdatedEvent{ 2032 Event: PeerDisconnected, 2033 PeerId: c.PeerID, 2034 }) 2035 } 2036 t.logger.WithDefaultLevel(log.Debug).Printf("dropping connection to %+q, sent peerconn update", c.PeerID) 2037 2038 if t.deletePeerConn(c) { 2039 t.openNewConns() 2040 } 2041 } 2042 2043 // Peers as in contact information for dialing out. We should try to get some peers, even if we 2044 // don't currently need them so as not to waste announces or have unnecessary latency or events. 2045 func (t *Torrent) wantPeers() bool { 2046 if t.closed.IsSet() { 2047 return false 2048 } 2049 return t.peers.Len() <= t.cl.config.TorrentPeersLowWater 2050 } 2051 2052 // This used to update a chansync/event for wanting peers for the per-Torrent tracker announcers, 2053 // but now those are client-level and not persistent. 2054 func (t *Torrent) updateWantPeersEvent() { 2055 t.deferUpdateRegularTrackerAnnouncing() 2056 } 2057 2058 // Regular tracker announcing is dispatched as a single "actor". Probably needs to incorporate all 2059 // tracker types at some point. 2060 func (t *Torrent) deferUpdateRegularTrackerAnnouncing() { 2061 t.cl.unlockHandlers.deferUpdateTorrentRegularTrackerAnnouncing(t) 2062 } 2063 2064 func (t *Torrent) updateRegularTrackerAnnouncing() { 2065 // Note this uses the map that only contains regular tracker URLs. 2066 t.cl.regularTrackerAnnounceDispatcher.updateTorrentInput(t) 2067 } 2068 2069 // Returns whether the client should make effort to seed the torrent. 2070 func (t *Torrent) seeding() bool { 2071 cl := t.cl 2072 if t.closed.IsSet() { 2073 return false 2074 } 2075 if t.dataUploadDisallowed { 2076 return false 2077 } 2078 if cl.config.NoUpload { 2079 return false 2080 } 2081 if !cl.config.Seed { 2082 return false 2083 } 2084 if cl.config.DisableAggressiveUpload && t.needData() { 2085 return false 2086 } 2087 return true 2088 } 2089 2090 func (t *Torrent) onWebRtcConn( 2091 c webtorrent.DataChannelConn, 2092 dcc webtorrent.DataChannelContext, 2093 ) { 2094 defer c.Close() 2095 netConn := webrtcNetConn{ 2096 ReadWriteCloser: c, 2097 DataChannelContext: dcc, 2098 } 2099 peerRemoteAddr := netConn.RemoteAddr() 2100 //t.logger.Levelf(log.Critical, "onWebRtcConn remote addr: %v", peerRemoteAddr) 2101 if t.cl.badPeerAddr(peerRemoteAddr) { 2102 return 2103 } 2104 localAddrIpPort := missinggo.IpPortFromNetAddr(netConn.LocalAddr()) 2105 2106 pc, err := t.cl.initiateProtocolHandshakes( 2107 context.Background(), 2108 netConn, 2109 t, 2110 false, 2111 newConnectionOpts{ 2112 outgoing: dcc.LocalOffered, 2113 remoteAddr: peerRemoteAddr, 2114 localPublicAddr: localAddrIpPort, 2115 network: webrtcNetwork, 2116 connString: fmt.Sprintf("webrtc offer_id %x: %v", dcc.OfferId, regularNetConnPeerConnConnString(netConn)), 2117 }, 2118 ) 2119 if err != nil { 2120 t.logger.WithDefaultLevel(log.Error).Printf("error in handshaking webrtc connection: %v", err) 2121 return 2122 } 2123 if dcc.LocalOffered { 2124 pc.Discovery = PeerSourceTracker 2125 } else { 2126 pc.Discovery = PeerSourceIncoming 2127 } 2128 pc.conn.SetWriteDeadline(time.Time{}) 2129 t.cl.lock() 2130 defer t.cl.unlock() 2131 err = t.runHandshookConn(pc) 2132 if err != nil { 2133 t.logger.WithDefaultLevel(log.Debug).Printf("error running handshook webrtc conn: %v", err) 2134 } 2135 } 2136 2137 func (t *Torrent) logRunHandshookConn(pc *PeerConn, logAll bool, level log.Level) { 2138 err := t.runHandshookConn(pc) 2139 if err != nil || logAll { 2140 t.logger.WithDefaultLevel(level).Levelf(log.ErrorLevel(err), "error running handshook conn: %v", err) 2141 } 2142 } 2143 2144 func (t *Torrent) runHandshookConnLoggingErr(pc *PeerConn) { 2145 t.logRunHandshookConn(pc, false, log.Debug) 2146 } 2147 2148 func (t *Torrent) startWebsocketAnnouncer(u url.URL, shortInfohash [20]byte) torrentTrackerAnnouncer { 2149 wtc, release := t.cl.websocketTrackers.Get(u.String(), shortInfohash) 2150 // This needs to run before the Torrent is dropped from the Client, to prevent a new 2151 // webtorrent.TrackerClient for the same info hash before the old one is cleaned up. 2152 t.onClose = append(t.onClose, release) 2153 wst := websocketTrackerStatus{u, wtc} 2154 go func() { 2155 err := wtc.Announce(tracker.Started, shortInfohash) 2156 if err != nil { 2157 level := log.Warning 2158 if t.closed.IsSet() { 2159 level = log.Debug 2160 } 2161 t.logger.Levelf(level, "error doing initial announce to %q: %v", u.String(), err) 2162 } 2163 }() 2164 return wst 2165 } 2166 2167 func (t *Torrent) startScrapingTracker(_url string) { 2168 if _url == "" { 2169 return 2170 } 2171 u, err := url.Parse(_url) 2172 if err != nil { 2173 // URLs with a leading '*' appear to be a uTorrent convention to disable trackers. 2174 if _url[0] != '*' { 2175 t.logger.Levelf(log.Warning, "error parsing tracker url: %v", err) 2176 } 2177 return 2178 } 2179 if u.Scheme == "udp" { 2180 u.Scheme = "udp4" 2181 t.startScrapingTracker(u.String()) 2182 u.Scheme = "udp6" 2183 t.startScrapingTracker(u.String()) 2184 return 2185 } 2186 announcerKey := trackerAnnouncerKey(_url) 2187 for ih := range t.iterShortInfohashes() { 2188 t.startScrapingTrackerWithInfohash(u, announcerKey, ih) 2189 } 2190 } 2191 2192 func (t *Torrent) startScrapingTrackerWithInfohash(u *url.URL, urlStr trackerAnnouncerKey, shortInfohash [20]byte) { 2193 announcerKey := torrentTrackerAnnouncerKey{ 2194 ShortInfohash: shortInfohash, 2195 url: urlStr, 2196 } 2197 if _, ok := t.trackerAnnouncers[announcerKey]; ok { 2198 return 2199 } 2200 sl := func() torrentTrackerAnnouncer { 2201 switch u.Scheme { 2202 case "ws", "wss": 2203 if t.cl.config.DisableWebtorrent { 2204 return nil 2205 } 2206 return t.startWebsocketAnnouncer(*u, shortInfohash) 2207 case "udp4": 2208 if t.cl.config.DisableIPv4Peers || t.cl.config.DisableIPv4 { 2209 return nil 2210 } 2211 case "udp6": 2212 if t.cl.config.DisableIPv6 { 2213 return nil 2214 } 2215 } 2216 t.cl.startTrackerAnnouncer(u, urlStr) 2217 t.initRegularTrackerAnnounceState(announcerKey) 2218 return torrentRegularTrackerAnnouncer{ 2219 u: u, 2220 getAnnounceState: func() announceState { 2221 return *t.regularTrackerAnnounceState[announcerKey] 2222 }, 2223 } 2224 }() 2225 if sl == nil { 2226 return 2227 } 2228 g.MakeMapIfNil(&t.trackerAnnouncers) 2229 if g.MapInsert(t.trackerAnnouncers, announcerKey, sl).Ok { 2230 panic("tracker announcer already exists") 2231 } 2232 } 2233 2234 // We need a key in regularTrackerAnnounceState to ensure we propagate next announce state values. 2235 func (t *Torrent) initRegularTrackerAnnounceState(key torrentTrackerAnnouncerKey) { 2236 g.MakeMapIfNil(&t.regularTrackerAnnounceState) 2237 t.cl.regularTrackerAnnounceDispatcher.addKey(key) 2238 t.regularTrackerAnnounceState[key] = t.cl.regularTrackerAnnounceDispatcher.announceStates[key] 2239 t.deferUpdateRegularTrackerAnnouncing() 2240 } 2241 2242 // Adds and starts tracker scrapers for tracker URLs that aren't already 2243 // running. 2244 func (t *Torrent) startMissingTrackerScrapers() { 2245 if t.cl.config.DisableTrackers { 2246 return 2247 } 2248 for _, tier := range t.announceList { 2249 for _, url := range tier { 2250 t.startScrapingTracker(url) 2251 } 2252 } 2253 } 2254 2255 // Returns an AnnounceRequest with fields filled out to defaults and current 2256 // values. 2257 func (t *Torrent) announceRequest( 2258 event tracker.AnnounceEvent, 2259 shortInfohash [20]byte, 2260 ) tracker.AnnounceRequest { 2261 // Note that IPAddress is not set. It's set for UDP inside the tracker code, since it's 2262 // dependent on the network in use. 2263 return tracker.AnnounceRequest{ 2264 Event: event, 2265 NumWant: func() int32 { 2266 if t.wantPeers() && len(t.cl.dialers) > 0 { 2267 // Windozer has UDP packet limit. See: 2268 // https://github.com/anacrolix/torrent/issues/764 2269 return 200 2270 } else { 2271 return 0 2272 } 2273 }(), 2274 Port: uint16(t.cl.incomingPeerPort()), 2275 PeerId: t.cl.peerID, 2276 InfoHash: shortInfohash, 2277 Key: t.cl.announceKey(), 2278 2279 // The following are vaguely described in BEP 3. 2280 2281 Left: t.bytesLeftAnnounce(), 2282 Uploaded: t.connStats.BytesWrittenData.Int64(), 2283 // There's no mention of wasted or unwanted download in the BEP. 2284 Downloaded: t.connStats.BytesReadUsefulData.Int64(), 2285 } 2286 } 2287 2288 // Adds peers revealed in an announce until the announce ends, or we have 2289 // enough peers. 2290 func (t *Torrent) consumeDhtAnnouncePeers(pvs <-chan dht.PeersValues) { 2291 cl := t.cl 2292 for v := range pvs { 2293 peerInfos := make([]PeerInfo, 0, len(v.Peers)) 2294 for _, cp := range v.Peers { 2295 if cp.Port == 0 { 2296 // Can't do anything with this. 2297 continue 2298 } 2299 peerInfos = append(peerInfos, PeerInfo{ 2300 Addr: ipPortAddr{cp.IP, cp.Port}, 2301 Source: PeerSourceDhtGetPeers, 2302 }) 2303 } 2304 if len(peerInfos) > 0 { 2305 cl.lock() 2306 t.addPeers(peerInfos) 2307 cl.unlock() 2308 2309 } 2310 } 2311 } 2312 2313 // Announce using the provided DHT server. Peers are consumed automatically. done is closed when the 2314 // announce ends. stop will force the announce to end. This interface is really old-school, and 2315 // calls a private one that is much more modern. Both v1 and v2 info hashes are announced if they 2316 // exist. 2317 func (t *Torrent) AnnounceToDht(s DhtServer) (done <-chan struct{}, stop func(), err error) { 2318 var ihs [][20]byte 2319 t.cl.lock() 2320 t.eachShortInfohash(func(short [20]byte) { 2321 ihs = append(ihs, short) 2322 }) 2323 t.cl.unlock() 2324 ctx, stop := context.WithCancel(context.Background()) 2325 eg, ctx := errgroup.WithContext(ctx) 2326 for _, ih := range ihs { 2327 var ann DhtAnnounce 2328 ann, err = s.Announce(ih, t.cl.incomingPeerPort(), true) 2329 if err != nil { 2330 stop() 2331 return 2332 } 2333 eg.Go(func() error { 2334 return t.dhtAnnounceConsumer(ctx, ann) 2335 }) 2336 } 2337 _done := make(chan struct{}) 2338 done = _done 2339 go func() { 2340 defer stop() 2341 defer close(_done) 2342 eg.Wait() 2343 }() 2344 return 2345 } 2346 2347 // Announce using the provided DHT server. Peers are consumed automatically. done is closed when the 2348 // announce ends. stop will force the announce to end. 2349 func (t *Torrent) dhtAnnounceConsumer( 2350 ctx context.Context, 2351 ps DhtAnnounce, 2352 ) ( 2353 err error, 2354 ) { 2355 defer ps.Close() 2356 done := make(chan struct{}) 2357 go func() { 2358 defer close(done) 2359 t.consumeDhtAnnouncePeers(ps.Peers()) 2360 }() 2361 select { 2362 case <-ctx.Done(): 2363 return context.Cause(ctx) 2364 case <-done: 2365 return nil 2366 } 2367 } 2368 2369 func (t *Torrent) timeboxedAnnounceToDht(s DhtServer) error { 2370 _, stop, err := t.AnnounceToDht(s) 2371 if err != nil { 2372 return err 2373 } 2374 select { 2375 case <-t.closed.Done(): 2376 // Arbitrary, but reported in 2377 // https://github.com/anacrolix/torrent/issues/1005#issuecomment-2856881633. Should able to 2378 // remove timeboxing entirely at some point. 2379 case <-time.After(15 * time.Minute): 2380 } 2381 stop() 2382 return nil 2383 } 2384 2385 func (t *Torrent) dhtAnnouncer(s DhtServer) { 2386 cl := t.cl 2387 cl.lock() 2388 defer cl.unlock() 2389 for { 2390 for { 2391 if t.closed.IsSet() { 2392 return 2393 } 2394 // We're also announcing ourselves as a listener, so we don't just want peer addresses. 2395 // TODO: We can include the announce_peer step depending on whether we can receive 2396 // inbound connections. We should probably only announce once every 15 mins too. 2397 if !t.wantAnyConns() { 2398 goto wait 2399 } 2400 // TODO: Determine if there's a listener on the port we're announcing. 2401 if len(cl.dialers) == 0 && len(cl.listeners) == 0 { 2402 goto wait 2403 } 2404 break 2405 wait: 2406 cl.event.Wait() 2407 } 2408 func() { 2409 t.numDHTAnnounces++ 2410 cl.unlock() 2411 defer cl.lock() 2412 err := t.timeboxedAnnounceToDht(s) 2413 if err != nil { 2414 t.logger.WithDefaultLevel(log.Warning).Printf("error announcing %q to DHT: %s", t, err) 2415 // Assume DNS issues. This is hacky, but DHT announcing needs be overhauled and 2416 // managed at a client level without unnecessary goroutines, just like with regular 2417 // trackers. Works around https://github.com/anacrolix/torrent/issues/1029. 2418 time.Sleep(5 * time.Minute) 2419 } 2420 }() 2421 } 2422 } 2423 2424 func (t *Torrent) addPeers(peers []PeerInfo) (added int) { 2425 return t.addPeersIter(slices.Values(peers)) 2426 } 2427 2428 func (t *Torrent) addPeersIter(peers iter.Seq[PeerInfo]) (added int) { 2429 wantPeers := t.wantPeers() 2430 for p := range peers { 2431 if t.addPeer(p) { 2432 added++ 2433 } 2434 } 2435 if t.wantPeers() != wantPeers { 2436 t.deferUpdateRegularTrackerAnnouncing() 2437 } 2438 return 2439 } 2440 2441 // The returned TorrentStats may require alignment in memory. See 2442 // https://github.com/anacrolix/torrent/issues/383. 2443 func (t *Torrent) Stats() TorrentStats { 2444 t.cl.rLock() 2445 defer t.cl.rUnlock() 2446 return t.statsLocked() 2447 } 2448 2449 func (t *Torrent) gauges() (ret TorrentGauges) { 2450 ret.ActivePeers = len(t.conns) 2451 ret.HalfOpenPeers = len(t.halfOpen) 2452 ret.PendingPeers = t.peers.Len() 2453 ret.TotalPeers = t.numTotalPeers() 2454 ret.ConnectedSeeders = 0 2455 for c := range t.conns { 2456 if all, ok := c.peerHasAllPieces(); all && ok { 2457 ret.ConnectedSeeders++ 2458 } 2459 } 2460 ret.PiecesComplete = t.numPiecesCompleted() 2461 return 2462 } 2463 2464 func (t *Torrent) statsLocked() (ret TorrentStats) { 2465 ret.AllConnStats = t.connStats.Copy() 2466 ret.TorrentStatCounters = copyCountFields(&t.counters) 2467 ret.TorrentGauges = t.gauges() 2468 return 2469 } 2470 2471 // The total number of peers in the torrent. 2472 func (t *Torrent) numTotalPeers() int { 2473 peers := make(map[string]struct{}) 2474 for conn := range t.conns { 2475 ra := conn.RemoteAddr 2476 if ra == nil { 2477 // It's been closed and doesn't support RemoteAddr. 2478 continue 2479 } 2480 peers[ra.String()] = struct{}{} 2481 } 2482 for addr := range t.halfOpen { 2483 peers[addr] = struct{}{} 2484 } 2485 t.peers.Each(func(peer PeerInfo) { 2486 peers[peer.Addr.String()] = struct{}{} 2487 }) 2488 return len(peers) 2489 } 2490 2491 // Returns true if the connection is added. 2492 func (t *Torrent) addPeerConn(c *PeerConn) (err error) { 2493 defer func() { 2494 if err == nil { 2495 torrent.Add("added connections", 1) 2496 } 2497 }() 2498 if t.closed.IsSet() { 2499 return errTorrentClosed 2500 } 2501 for c0 := range t.conns { 2502 if c.PeerID != c0.PeerID { 2503 continue 2504 } 2505 if !t.cl.config.DropDuplicatePeerIds { 2506 continue 2507 } 2508 if c.hasPreferredNetworkOver(c0) { 2509 c0.close() 2510 t.deletePeerConn(c0) 2511 } else { 2512 return errors.New("existing connection preferred") 2513 } 2514 } 2515 if len(t.conns) >= t.maxEstablishedConns { 2516 numOutgoing := t.numOutgoingConns() 2517 numIncoming := len(t.conns) - numOutgoing 2518 c := t.worstBadConn(worseConnLensOpts{ 2519 // We've already established that we have too many connections at this point, so we just 2520 // need to match what kind we have too many of vs. what we're trying to add now. 2521 incomingIsBad: (numIncoming-numOutgoing > 1) && c.outgoing, 2522 outgoingIsBad: (numOutgoing-numIncoming > 1) && !c.outgoing, 2523 }) 2524 if c == nil { 2525 return errors.New("don't want conn") 2526 } 2527 c.close() 2528 t.deletePeerConn(c) 2529 } 2530 if len(t.conns) >= t.maxEstablishedConns { 2531 panic(len(t.conns)) 2532 } 2533 t.conns[c] = struct{}{} 2534 t.cl.event.Broadcast() 2535 // We'll never receive the "p" extended handshake parameter. 2536 if !t.cl.config.DisablePEX && !c.PeerExtensionBytes.SupportsExtended() { 2537 t.pex.Add(c) 2538 } 2539 return nil 2540 } 2541 2542 func (t *Torrent) newConnsAllowed() bool { 2543 if !t.networkingEnabled.Bool() { 2544 return false 2545 } 2546 if t.closed.IsSet() { 2547 return false 2548 } 2549 if rl := t.cl.config.DownloadRateLimiter; rl != nil && rl.Tokens() <= 0 { 2550 return false 2551 } 2552 if t.needData() { 2553 return true 2554 } 2555 return t.seeding() && t.haveAnyPieces() 2556 } 2557 2558 func (t *Torrent) wantAnyConns() bool { 2559 if !t.newConnsAllowed() { 2560 return false 2561 } 2562 return len(t.conns) < t.maxEstablishedConns 2563 } 2564 2565 func (t *Torrent) wantOutgoingConns() bool { 2566 if !t.newConnsAllowed() { 2567 return false 2568 } 2569 if len(t.conns) < t.maxEstablishedConns { 2570 // Shortcut: We can take any connection direction right now. 2571 return true 2572 } 2573 numIncomingConns := len(t.conns) - t.numOutgoingConns() 2574 return t.worstBadConn(worseConnLensOpts{ 2575 incomingIsBad: numIncomingConns-t.numOutgoingConns() > 1, 2576 outgoingIsBad: false, 2577 }) != nil 2578 } 2579 2580 func (t *Torrent) wantIncomingConns() bool { 2581 if !t.newConnsAllowed() { 2582 return false 2583 } 2584 if len(t.conns) < t.maxEstablishedConns { 2585 // Shortcut: We can take any connection direction right now. 2586 return true 2587 } 2588 numIncomingConns := len(t.conns) - t.numOutgoingConns() 2589 return t.worstBadConn(worseConnLensOpts{ 2590 incomingIsBad: false, 2591 outgoingIsBad: t.numOutgoingConns()-numIncomingConns > 1, 2592 }) != nil 2593 } 2594 2595 func (t *Torrent) SetMaxEstablishedConns(max int) (oldMax int) { 2596 t.cl.lock() 2597 defer t.cl.unlock() 2598 oldMax = t.maxEstablishedConns 2599 t.maxEstablishedConns = max 2600 wcs := worseConnSlice{ 2601 conns: t.appendConns(nil, func(*PeerConn) bool { 2602 return true 2603 }), 2604 } 2605 wcs.initKeys(worseConnLensOpts{}) 2606 heap.Init(&wcs) 2607 for len(t.conns) > t.maxEstablishedConns && wcs.Len() > 0 { 2608 t.dropConnection(heap.Pop(&wcs).(*PeerConn)) 2609 } 2610 t.openNewConns() 2611 return oldMax 2612 } 2613 2614 func (t *Torrent) pieceHashed(piece pieceIndex, passed bool, hashIoErr error) { 2615 p := t.piece(piece) 2616 p.numVerifies++ 2617 p.numVerifiesCond.Broadcast() 2618 t.cl.event.Broadcast() 2619 if t.closed.IsSet() { 2620 return 2621 } 2622 2623 // Don't score the first time a piece is hashed, it could be an initial check. 2624 if t.initialPieceCheckDisabled || p.numVerifies != 1 { 2625 if passed { 2626 pieceHashedCorrect.Add(1) 2627 } else { 2628 log.Fmsg( 2629 "piece %d failed hash: %d connections contributed", piece, len(p.dirtiers), 2630 ).AddValues(t, p).LogLevel(log.Info, t.logger) 2631 pieceHashedNotCorrect.Add(1) 2632 } 2633 } 2634 2635 p.marking = true 2636 t.deferPublishPieceStateChange(piece) 2637 defer func() { 2638 p.marking = false 2639 t.deferPublishPieceStateChange(piece) 2640 }() 2641 2642 if passed { 2643 t.incrementPiecesDirtiedStats(p, (*ConnStats).incrementPiecesDirtiedGood) 2644 t.clearPieceTouchers(piece) 2645 t.cl.unlock() 2646 p.race++ 2647 err := p.Storage().MarkComplete() 2648 if err != nil { 2649 t.slogger().Error("error marking piece complete", "piece", piece, "err", err) 2650 } 2651 t.cl.lock() 2652 2653 if t.closed.IsSet() { 2654 return 2655 } 2656 t.pendAllChunkSpecs(piece) 2657 t.setPieceCompletion(piece, g.Some(true)) 2658 } else { 2659 if len(p.dirtiers) != 0 && p.allChunksDirty() && hashIoErr == nil { 2660 // Peers contributed to all the data for this piece hash failure, and the failure was 2661 // not due to errors in the storage (such as data being dropped in a cache). 2662 t.incrementPiecesDirtiedStats(p, (*ConnStats).incrementPiecesDirtiedBad) 2663 2664 bannableTouchers := make([]*Peer, 0, len(p.dirtiers)) 2665 for c := range p.dirtiers { 2666 if !c.trusted { 2667 bannableTouchers = append(bannableTouchers, c) 2668 } 2669 } 2670 t.clearPieceTouchers(piece) 2671 slices.SortFunc(bannableTouchers, comparePeerTrust) 2672 2673 if t.cl.config.Debug { 2674 t.logger.Printf( 2675 "bannable conns by trust for piece %d: %v", 2676 piece, 2677 func() (ret []connectionTrust) { 2678 for _, c := range bannableTouchers { 2679 ret = append(ret, c.trust()) 2680 } 2681 return 2682 }(), 2683 ) 2684 } 2685 2686 if len(bannableTouchers) >= 1 { 2687 c := bannableTouchers[0] 2688 if len(bannableTouchers) != 1 { 2689 t.logger.Levelf(log.Debug, "would have banned %v for touching piece %v after failed piece check", c.remoteIp(), piece) 2690 } else { 2691 // Turns out it's still useful to ban peers like this because if there's only a 2692 // single peer for a piece, and we never progress that piece to completion, we 2693 // will never smart-ban them. Discovered in 2694 // https://github.com/anacrolix/torrent/issues/715. 2695 t.slogger().Info( 2696 "piece failed hash. banning peer", 2697 "piece", piece, 2698 "peer", c) 2699 c.providedBadData() 2700 // TODO: Check if we now have no available peers for pieces we want. 2701 } 2702 } 2703 } 2704 2705 // This pattern is copied from MarkComplete above. Note the pattern. 2706 t.cl.unlock() 2707 p.race++ 2708 err := p.Storage().MarkNotComplete() 2709 if err != nil { 2710 t.slogger().Error("error marking piece not complete", "piece", piece, "err", err) 2711 } 2712 t.cl.lock() 2713 if t.closed.IsSet() { 2714 return 2715 } 2716 t.onIncompletePiece(piece) 2717 // Set it directly without querying storage again. It makes no difference if the lock is 2718 // held since it can be clobbered right after again anyway. This comes after inCompletePiece 2719 // because that's how it was before. 2720 t.setPieceCompletion(p.index, g.Some(false)) 2721 } 2722 } 2723 2724 func (t *Torrent) cancelRequestsForPiece(piece pieceIndex) { 2725 start := t.pieceRequestIndexBegin(piece) 2726 end := start + t.pieceNumChunks(piece) 2727 for ri := start; ri < end; ri++ { 2728 t.cancelRequest(ri) 2729 } 2730 } 2731 2732 func (t *Torrent) onPieceCompleted(piece pieceIndex) { 2733 t.pendAllChunkSpecs(piece) 2734 t.cancelRequestsForPiece(piece) 2735 t.piece(piece).readerCond.Broadcast() 2736 for conn := range t.conns { 2737 conn.have(piece) 2738 t.maybeDropMutuallyCompletePeer(conn) 2739 } 2740 } 2741 2742 // Called when a piece is found to be not complete. 2743 func (t *Torrent) onIncompletePiece(piece pieceIndex) { 2744 if t.pieceAllDirty(piece) { 2745 t.pendAllChunkSpecs(piece) 2746 } 2747 if t.ignorePieceForRequests(piece) { 2748 // t.logger.Printf("piece %d incomplete and unwanted", piece) 2749 return 2750 } 2751 // We could drop any connections that we told we have a piece that we 2752 // don't here. But there's a test failure, and it seems clients don't care 2753 // if you request pieces that you already claim to have. Pruning bad 2754 // connections might just remove any connections that aren't treating us 2755 // favourably anyway. 2756 2757 // for c := range t.conns { 2758 // if c.sentHave(piece) { 2759 // c.drop() 2760 // } 2761 // } 2762 t.iterPeers(func(conn *Peer) { 2763 if conn.peerHasPiece(piece) { 2764 conn.onNeedUpdateRequests("piece incomplete") 2765 } 2766 }) 2767 } 2768 2769 // Torrent piece hashers are sticky and will try to keep hashing pieces in the same Torrent to keep 2770 // the storage hot. 2771 func (t *Torrent) startPieceHashers() error { 2772 if t.closed.IsSet() { 2773 return errTorrentClosed 2774 } 2775 for t.considerStartingHashers() { 2776 if !t.startSinglePieceHasher() { 2777 break 2778 } 2779 } 2780 return nil 2781 } 2782 2783 func (t *Torrent) startSinglePieceHasher() bool { 2784 pi := t.getPieceToHash() 2785 if !pi.Ok { 2786 return false 2787 } 2788 t.startHash(pi.Value) 2789 go t.pieceHasher(pi.Value) 2790 return true 2791 } 2792 2793 // Sticky to a Torrent. Might as well since that keeps the storage hot. 2794 func (t *Torrent) pieceHasher(initial pieceIndex) { 2795 t.finishHash(initial) 2796 for { 2797 piOpt := t.getPieceToHash() 2798 if !piOpt.Ok { 2799 break 2800 } 2801 pi := piOpt.Value 2802 t.startHash(pi) 2803 t.cl.unlock() 2804 t.finishHash(pi) 2805 } 2806 t.cl.startPieceHashers() 2807 t.cl.unlock() 2808 } 2809 2810 func (t *Torrent) startHash(pi pieceIndex) { 2811 p := t.piece(pi) 2812 t.piecesQueuedForHash.Remove(pi) 2813 p.hashing = true 2814 t.deferPublishPieceStateChange(pi) 2815 t.updatePiecePriority(pi, "Torrent.startHash") 2816 t.storageLock.RLock() 2817 t.activePieceHashes++ 2818 t.cl.activePieceHashers++ 2819 } 2820 2821 func (t *Torrent) getPieceToHash() (_ g.Option[pieceIndex]) { 2822 for i := range t.piecesQueuedForHash.Iterate { 2823 p := t.piece(i) 2824 if p.hashing || p.marking { 2825 continue 2826 } 2827 return g.Some(i) 2828 } 2829 return 2830 } 2831 2832 func (t *Torrent) dropBannedPeers() { 2833 t.iterPeers(func(p *Peer) { 2834 remoteIp := p.remoteIp() 2835 if remoteIp == nil { 2836 if p.bannableAddr.Ok { 2837 t.logger.WithDefaultLevel(log.Debug).Printf("can't get remote ip for peer %v", p) 2838 } 2839 return 2840 } 2841 netipAddr := netip.MustParseAddr(remoteIp.String()) 2842 if Some(netipAddr) != p.bannableAddr { 2843 t.logger.WithDefaultLevel(log.Debug).Printf( 2844 "peer remote ip does not match its bannable addr [peer=%v, remote ip=%v, bannable addr=%v]", 2845 p, remoteIp, p.bannableAddr) 2846 } 2847 if _, ok := t.cl.badPeerIPs[netipAddr]; ok { 2848 // Should this be a close? 2849 p.drop() 2850 t.logger.WithDefaultLevel(log.Debug).Printf("dropped %v for banned remote IP %v", p, netipAddr) 2851 } 2852 }) 2853 } 2854 2855 // Storage lock is held. Release storage lock after we're done reading and relock Client. Return 2856 // with Client lock still held. 2857 func (t *Torrent) finishHash(index pieceIndex) { 2858 p := t.piece(index) 2859 // Do we really need to spell out that it's a copy error? If it's a failure to hash the hash 2860 // will just be wrong. 2861 correct, failedPeers, copyErr := t.hashPiece(index) 2862 t.storageLock.RUnlock() 2863 level := slog.LevelDebug 2864 switch copyErr { 2865 case nil, io.EOF: 2866 default: 2867 level = slog.LevelWarn 2868 } 2869 t.cl.lock() 2870 t.slogger().Log(context.Background(), level, "finished hashing piece", 2871 "piece", index, 2872 "correct", correct, 2873 "failedPeers", failedPeers, 2874 "err", copyErr) 2875 if correct { 2876 if len(failedPeers) > 0 { 2877 for peer := range failedPeers { 2878 t.cl.banPeerIP(peer.AsSlice()) 2879 t.slogger().Info("smart banned peer", "peer", peer, "piece", index) 2880 } 2881 t.dropBannedPeers() 2882 } 2883 t.smartBanCache.ForgetBlockSeq(iterRange(t.pieceRequestIndexBegin(index), t.pieceRequestIndexBegin(index+1))) 2884 } 2885 p.hashing = false 2886 t.pieceHashed(index, correct, copyErr) 2887 t.updatePiecePriority(index, "Torrent.finishHash") 2888 t.activePieceHashes-- 2889 if t.activePieceHashes == 0 { 2890 t.deferUpdateComplete() 2891 } 2892 t.cl.activePieceHashers-- 2893 } 2894 2895 // Return the connections that touched a piece, and clear the entries while doing it. 2896 func (t *Torrent) clearPieceTouchers(pi pieceIndex) { 2897 p := t.piece(pi) 2898 for c := range p.dirtiers { 2899 delete(c.peerTouchedPieces, pi) 2900 delete(p.dirtiers, c) 2901 } 2902 } 2903 2904 // Queue a check if one hasn't occurred before for the piece, and the completion state is unknown. 2905 func (t *Torrent) queueInitialPieceCheck(i pieceIndex) { 2906 if t.initialPieceCheckDisabled { 2907 return 2908 } 2909 p := t.piece(i) 2910 if p.numVerifies != 0 { 2911 return 2912 } 2913 // If a hash is already occurring we've satisfied the initial piece check condition. 2914 if p.hashing { 2915 return 2916 } 2917 if p.storageCompletionOk { 2918 return 2919 } 2920 // Should only get closed or missing hash errors here which are ok. 2921 _, _ = t.queuePieceCheck(i) 2922 } 2923 2924 func (t *Torrent) queuePieceCheck(pieceIndex pieceIndex) (targetVerifies pieceVerifyCount, err error) { 2925 piece := t.piece(pieceIndex) 2926 if !piece.haveHash() { 2927 // Should we just queue the hash anyway? 2928 err = errors.New("piece hash unknown") 2929 return 2930 } 2931 targetVerifies = piece.nextNovelHashCount() 2932 if piece.queuedForHash() { 2933 return 2934 } 2935 t.piecesQueuedForHash.Add(pieceIndex) 2936 t.deferUpdateComplete() 2937 t.deferPublishPieceStateChange(pieceIndex) 2938 t.updatePiecePriority(pieceIndex, "Torrent.queuePieceCheck") 2939 err = t.startPieceHashers() 2940 return 2941 } 2942 2943 // Deprecated: Use Torrent.VerifyDataContext. 2944 func (t *Torrent) VerifyData() error { 2945 return t.VerifyDataContext(context.Background()) 2946 } 2947 2948 // Forces all the pieces to be re-hashed. See also Piece.VerifyDataContext. This 2949 // should not be called before the Info is available. TODO: Make this operate 2950 // concurrently within the configured piece hashers limit. 2951 func (t *Torrent) VerifyDataContext(ctx context.Context) error { 2952 for i := 0; i < t.NumPieces(); i++ { 2953 err := t.Piece(i).VerifyDataContext(ctx) 2954 if err != nil { 2955 err = fmt.Errorf("verifying piece %v: %w", i, err) 2956 return err 2957 } 2958 } 2959 return nil 2960 } 2961 2962 func (t *Torrent) connectingToPeerAddr(addrStr string) bool { 2963 return len(t.halfOpen[addrStr]) != 0 2964 } 2965 2966 func (t *Torrent) hasPeerConnForAddr(x PeerRemoteAddr) bool { 2967 addrStr := x.String() 2968 for c := range t.conns { 2969 ra := c.RemoteAddr 2970 if ra.String() == addrStr { 2971 return true 2972 } 2973 } 2974 return false 2975 } 2976 2977 func (t *Torrent) getHalfOpenPath( 2978 addrStr string, 2979 attemptKey outgoingConnAttemptKey, 2980 ) nestedmaps.Path[*PeerInfo] { 2981 return nestedmaps.Next(nestedmaps.Next(nestedmaps.Begin(&t.halfOpen), addrStr), attemptKey) 2982 } 2983 2984 func (t *Torrent) addHalfOpen(addrStr string, attemptKey *PeerInfo) { 2985 path := t.getHalfOpenPath(addrStr, attemptKey) 2986 if path.Exists() { 2987 panic("should be unique") 2988 } 2989 path.Set(attemptKey) 2990 t.cl.numHalfOpen++ 2991 } 2992 2993 // Start the process of connecting to the given peer for the given torrent if appropriate. I'm not 2994 // sure all the PeerInfo fields are being used. 2995 func initiateConn( 2996 opts outgoingConnOpts, 2997 ignoreLimits bool, 2998 ) { 2999 t := opts.t 3000 peer := opts.peerInfo 3001 if peer.Id == t.cl.peerID { 3002 return 3003 } 3004 if t.cl.badPeerAddr(peer.Addr) && !peer.Trusted { 3005 return 3006 } 3007 addr := peer.Addr 3008 addrStr := addr.String() 3009 if !ignoreLimits { 3010 if t.connectingToPeerAddr(addrStr) { 3011 return 3012 } 3013 } 3014 if t.hasPeerConnForAddr(addr) { 3015 return 3016 } 3017 attemptKey := &peer 3018 t.addHalfOpen(addrStr, attemptKey) 3019 go t.cl.outgoingConnection( 3020 opts, 3021 attemptKey, 3022 ) 3023 } 3024 3025 // Adds a trusted, pending peer for each of the given Client's addresses. Typically used in tests to 3026 // quickly make one Client visible to the Torrent of another Client. 3027 func (t *Torrent) AddClientPeer(cl *Client) int { 3028 return t.AddPeers(func() (ps []PeerInfo) { 3029 for _, la := range cl.ListenAddrs() { 3030 ps = append(ps, PeerInfo{ 3031 Addr: la, 3032 Trusted: true, 3033 }) 3034 } 3035 return 3036 }()) 3037 } 3038 3039 func (t *Torrent) hashingPiece(i pieceIndex) bool { 3040 return t.pieces[i].hashing 3041 } 3042 3043 func (t *Torrent) pieceQueuedForHash(i pieceIndex) bool { 3044 return t.piecesQueuedForHash.Contains(i) 3045 } 3046 3047 func (t *Torrent) dialTimeout() time.Duration { 3048 return reducedDialTimeout(t.cl.config.MinDialTimeout, t.cl.config.NominalDialTimeout, t.cl.config.HalfOpenConnsPerTorrent, t.peers.Len()) 3049 } 3050 3051 func (t *Torrent) piece(i int) *Piece { 3052 return &t.pieces[i] 3053 } 3054 3055 func (t *Torrent) pieceForOffset(off int64) *Piece { 3056 // Avoid conversion to int by doing indexing directly. Should we check the offset is allowed for 3057 // that piece? 3058 return &t.pieces[off/t.info.PieceLength] 3059 } 3060 3061 func (t *Torrent) onWriteChunkErr(err error) { 3062 if t.userOnWriteChunkErr != nil { 3063 go t.userOnWriteChunkErr(err) 3064 return 3065 } 3066 t.logger.WithDefaultLevel(log.Critical).Printf("default chunk write error handler: disabling data download") 3067 t.disallowDataDownloadLocked() 3068 } 3069 3070 func (t *Torrent) DisallowDataDownload() { 3071 t.cl.lock() 3072 defer t.cl.unlock() 3073 t.disallowDataDownloadLocked() 3074 } 3075 3076 func (t *Torrent) disallowDataDownloadLocked() { 3077 t.dataDownloadDisallowed.Set() 3078 t.iterPeers(func(p *Peer) { 3079 // Could check if peer request state is empty/not interested? 3080 p.onNeedUpdateRequests("disallow data download") 3081 p.cancelAllRequests() 3082 }) 3083 } 3084 3085 func (t *Torrent) AllowDataDownload() { 3086 t.cl.lock() 3087 defer t.cl.unlock() 3088 // Can't move this outside the lock because other users require it to be unchanged while the 3089 // Client lock is held? 3090 if !t.dataDownloadDisallowed.Clear() { 3091 return 3092 } 3093 t.updateAllPiecePriorities("data download allowed") 3094 t.iterPeers(func(p *Peer) { 3095 p.onNeedUpdateRequests("allow data download") 3096 }) 3097 } 3098 3099 // Enables uploading data, if it was disabled. 3100 func (t *Torrent) AllowDataUpload() { 3101 t.cl.lock() 3102 defer t.cl.unlock() 3103 if !t.dataUploadDisallowed { 3104 return 3105 } 3106 t.dataUploadDisallowed = false 3107 t.iterPeers(func(p *Peer) { 3108 p.onNeedUpdateRequests("allow data upload") 3109 }) 3110 } 3111 3112 // Disables uploading data, if it was enabled. 3113 func (t *Torrent) DisallowDataUpload() { 3114 t.cl.lock() 3115 defer t.cl.unlock() 3116 t.dataUploadDisallowed = true 3117 for c := range t.conns { 3118 // TODO: This doesn't look right. Shouldn't we tickle writers to choke peers or something instead? 3119 c.onNeedUpdateRequests("disallow data upload") 3120 } 3121 } 3122 3123 // Sets a handler that is called if there's an error writing a chunk to local storage. By default, 3124 // or if nil, a critical message is logged, and data download is disabled. 3125 func (t *Torrent) SetOnWriteChunkError(f func(error)) { 3126 t.cl.lock() 3127 defer t.cl.unlock() 3128 t.userOnWriteChunkErr = f 3129 } 3130 3131 func (t *Torrent) iterPeers(f func(p *Peer)) { 3132 for pc := range t.conns { 3133 f(&pc.Peer) 3134 } 3135 for _, ws := range t.webSeeds { 3136 f(&ws.peer) 3137 } 3138 } 3139 3140 func (t *Torrent) callbacks() *Callbacks { 3141 return &t.cl.config.Callbacks 3142 } 3143 3144 type AddWebSeedsOpt func(*webseed.Client) 3145 3146 // TODO: Add a webseed http.Client option. 3147 3148 // Sets the WebSeed trailing path escaper for a webseed.Client. 3149 func WebSeedPathEscaper(custom webseed.PathEscaper) AddWebSeedsOpt { 3150 return func(c *webseed.Client) { 3151 c.PathEscaper = custom 3152 } 3153 } 3154 3155 // The rules for ClientConfig.DownloadRateLimiter apply here. 3156 func WebSeedResponseBodyRateLimiter(rl *rate.Limiter) AddWebSeedsOpt { 3157 return func(wc *webseed.Client) { 3158 wc.ResponseBodyRateLimiter = rl 3159 } 3160 } 3161 3162 func (t *Torrent) AddWebSeeds(urls []string, opts ...AddWebSeedsOpt) { 3163 t.cl.lock() 3164 defer t.cl.unlock() 3165 for _, u := range urls { 3166 t.addWebSeed(u, opts...) 3167 } 3168 } 3169 3170 // Returns true if the WebSeed was newly added with the provided configuration. 3171 func (t *Torrent) addWebSeed(url string, opts ...AddWebSeedsOpt) bool { 3172 if t.cl.config.DisableWebseeds { 3173 return false 3174 } 3175 urlKey := webseedUrlKey(unique.Make(url)) 3176 if _, ok := t.webSeeds[urlKey]; ok { 3177 return false 3178 } 3179 // I don't think Go http supports pipelining requests. However, we can have more ready to go 3180 // right away. This value should be some multiple of the number of connections to a host. This 3181 // number is based on keeping at least one connection actively downloading while another request 3182 // is fired off, and ensuring race detection works. Downloading Sintel 3183 // (08ada5a7a6183aae1e09d831df6748d566095a10) from "https://webtorrent.io/torrents/" is a good 3184 // test. Note since per-Torrent max requests for webseeds is not active, this doesn't do 3185 // anything. 3186 const defaultMaxRequests = 2 3187 ws := webseedPeer{ 3188 peer: Peer{ 3189 cl: t.cl, 3190 t: t, 3191 outgoing: true, 3192 Network: "http", 3193 // TODO: Set ban prefix? 3194 RemoteAddr: remoteAddrFromUrl(url), 3195 callbacks: t.callbacks(), 3196 }, 3197 client: webseed.Client{ 3198 HttpClient: t.cl.httpClient, 3199 Url: url, 3200 MaxRequests: defaultMaxRequests, 3201 ResponseBodyRateLimiter: t.cl.config.DownloadRateLimiter, 3202 }, 3203 hostKey: t.deriveWebSeedHostKey(url), 3204 url: urlKey, 3205 } 3206 ws.peer.initClosedCtx() 3207 for _, opt := range opts { 3208 opt(&ws.client) 3209 } 3210 setDefaultDownloadRateLimiterBurstIfZero(ws.client.ResponseBodyRateLimiter) 3211 ws.client.ResponseBodyWrapper = func(r io.Reader, interrupt func()) io.Reader { 3212 // Make sure to rate limit *after* idle timing. 3213 r = newIdleTimeoutReader(r, 30*time.Second, interrupt) 3214 return newRateLimitedReader(r, ws.client.ResponseBodyRateLimiter) 3215 } 3216 g.MakeMapWithCap(&ws.activeRequests, ws.client.MaxRequests) 3217 ws.locker = t.cl.locker() 3218 for _, f := range t.callbacks().NewPeer { 3219 f(&ws.peer) 3220 } 3221 ws.peer.logger = t.logger.WithContextValue(&ws).WithNames("webseed") 3222 ws.peer.slogger = t.slogger().With("webseed", url) 3223 ws.client.Logger = ws.peer.slogger 3224 // TODO: Abstract out a common struct initializer for this... 3225 ws.peer.legacyPeerImpl = &ws 3226 ws.peer.peerImpl = &ws 3227 if t.haveInfo() { 3228 ws.onGotInfo(t.info) 3229 } 3230 g.MapMustAssignNew(t.webSeeds, urlKey, &ws) 3231 ws.peer.onNeedUpdateRequests("Torrent.addWebSeed") 3232 return true 3233 } 3234 3235 func (t *Torrent) deriveWebSeedHostKey(urlStr string) (ret webseedHostKeyHandle) { 3236 u, err := url.Parse(urlStr) 3237 if err != nil { 3238 t.slogger().Warn("error parsing webseed URL", "url", urlStr, "err", err) 3239 return unique.Make(webseedHostKey(urlStr)) 3240 } 3241 return unique.Make(webseedHostKey(u.Hostname())) 3242 } 3243 3244 func (t *Torrent) peerIsActive(p *Peer) (active bool) { 3245 t.iterPeers(func(p1 *Peer) { 3246 if p1 == p { 3247 active = true 3248 } 3249 }) 3250 return 3251 } 3252 3253 // TODO: It's more of a RequestStruct really. 3254 func (t *Torrent) requestIndexToRequest(ri RequestIndex) Request { 3255 index := t.pieceIndexOfRequestIndex(ri) 3256 return Request{ 3257 pp.Integer(index), 3258 t.chunkIndexSpec(index, ri%t.chunksPerRegularPiece()), 3259 } 3260 } 3261 3262 func (t *Torrent) requestIndexFromRequest(r Request) RequestIndex { 3263 return t.pieceRequestIndexBegin(pieceIndex(r.Index)) + RequestIndex(r.Begin/t.chunkSize) 3264 } 3265 3266 // The first request index for the piece. 3267 func (t *Torrent) pieceRequestIndexBegin(piece pieceIndex) RequestIndex { 3268 return RequestIndex(piece) * t.chunksPerRegularPiece() 3269 } 3270 3271 // Run complete validation when lock is released. 3272 func (t *Torrent) deferUpdateComplete() { 3273 t.cl.unlockHandlers.addUpdateComplete(t) 3274 } 3275 3276 func (t *Torrent) updateComplete() { 3277 t.complete.SetBool(t.isComplete()) 3278 } 3279 3280 // TODO: I don't think having this flick back and forth while hashing is good. I think externally we 3281 // might want to wait until all hashing has completed, but the completion state shouldn't change 3282 // until we prove something is incorrect. 3283 func (t *Torrent) isComplete() bool { 3284 if t.activePieceHashes != 0 { 3285 return false 3286 } 3287 if !t.piecesQueuedForHash.IsEmpty() { 3288 return false 3289 } 3290 if !t.haveAllPieces() { 3291 return false 3292 } 3293 return true 3294 } 3295 3296 func (t *Torrent) cancelRequest(r RequestIndex) *PeerConn { 3297 p := t.requestingPeer(r) 3298 if p != nil { 3299 p.cancel(r) 3300 } 3301 // TODO: This is a check that an old invariant holds. It can be removed after some testing. 3302 //delete(t.pendingRequests, r) 3303 if _, ok := t.requestState[r]; ok { 3304 panic("expected request state to be gone") 3305 } 3306 return p 3307 } 3308 3309 func (t *Torrent) requestingPeer(r RequestIndex) (ret *PeerConn) { 3310 state, ok := t.requestState[r] 3311 if !ok { 3312 return nil 3313 } 3314 ret = state.peer.Value() 3315 panicif.Nil(ret) 3316 return 3317 } 3318 3319 func (t *Torrent) addConnWithAllPieces(p *Peer) { 3320 if t.connsWithAllPieces == nil { 3321 t.connsWithAllPieces = make(map[*Peer]struct{}, t.maxEstablishedConns) 3322 } 3323 t.connsWithAllPieces[p] = struct{}{} 3324 } 3325 3326 func (t *Torrent) deleteConnWithAllPieces(p *Peer) bool { 3327 _, ok := t.connsWithAllPieces[p] 3328 delete(t.connsWithAllPieces, p) 3329 return ok 3330 } 3331 3332 func (t *Torrent) numActivePeers() int { 3333 // TODO: Webseeds are "active" if they can serve any data. That means we need to track what 3334 // pieces they're able to provide. 3335 return len(t.conns) + len(t.webSeeds) 3336 } 3337 3338 // Specifically, whether we can expect data to vanish while trying to read. 3339 func (t *Torrent) hasStorageCap() bool { 3340 f := t.storage.Capacity 3341 if f == nil { 3342 return false 3343 } 3344 _, ok := (*f)() 3345 return ok 3346 } 3347 3348 func (t *Torrent) pieceIndexOfRequestIndex(ri RequestIndex) pieceIndex { 3349 return pieceIndex(ri / t.chunksPerRegularPiece()) 3350 } 3351 3352 func (t *Torrent) iterUndirtiedRequestIndexesInPiece( 3353 reuseIter *typedRoaring.Iterator[RequestIndex], 3354 piece pieceIndex, 3355 f func(RequestIndex), 3356 ) { 3357 reuseIter.Initialize(&t.dirtyChunks) 3358 pieceRequestIndexOffset := t.pieceRequestIndexBegin(piece) 3359 iterBitmapUnsetInRange( 3360 reuseIter, 3361 pieceRequestIndexOffset, pieceRequestIndexOffset+t.pieceNumChunks(piece), 3362 f, 3363 ) 3364 } 3365 3366 type webRtcStatsReports map[string]webrtc.StatsReport 3367 3368 func (t *Torrent) GetWebRtcPeerConnStats() map[string]webRtcStatsReports { 3369 stats := make(map[string]webRtcStatsReports) 3370 trackersMap := t.cl.websocketTrackers.clients 3371 for i, trackerClient := range trackersMap { 3372 ts := trackerClient.RtcPeerConnStats() 3373 stats[i] = ts 3374 } 3375 return stats 3376 } 3377 3378 type requestState struct { 3379 peer weak.Pointer[PeerConn] 3380 when time.Time 3381 } 3382 3383 // Returns an error if a received chunk is out of bounds in some way. 3384 func (t *Torrent) checkValidReceiveChunk(r Request) error { 3385 if !t.haveInfo() { 3386 return errors.New("torrent missing info") 3387 } 3388 if int(r.Index) >= t.numPieces() { 3389 return fmt.Errorf("chunk index %v, torrent num pieces %v", r.Index, t.numPieces()) 3390 } 3391 pieceLength := t.pieceLength(pieceIndex(r.Index)) 3392 if r.Begin >= pieceLength { 3393 return fmt.Errorf("chunk begins beyond end of piece (%v >= %v)", r.Begin, pieceLength) 3394 } 3395 // We could check chunk lengths here, but chunk request size is not changed often, and tricky 3396 // for peers to manipulate as they need to send potentially large buffers to begin with. There 3397 // should be considerable checks elsewhere for this case due to the network overhead. We should 3398 // catch most of the overflow manipulation stuff by checking index and begin above. 3399 return nil 3400 } 3401 3402 func (t *Torrent) peerConnsWithDialAddrPort(target netip.AddrPort) (ret []*PeerConn) { 3403 for pc := range t.conns { 3404 dialAddr, err := pc.remoteDialAddrPort() 3405 if err != nil { 3406 continue 3407 } 3408 if dialAddr != target { 3409 continue 3410 } 3411 ret = append(ret, pc) 3412 } 3413 return 3414 } 3415 3416 func wrapUtHolepunchMsgForPeerConn( 3417 recipient *PeerConn, 3418 msg utHolepunch.Msg, 3419 ) pp.Message { 3420 extendedPayload, err := msg.MarshalBinary() 3421 if err != nil { 3422 panic(err) 3423 } 3424 return pp.Message{ 3425 Type: pp.Extended, 3426 ExtendedID: MapMustGet(recipient.PeerExtensionIDs, utHolepunch.ExtensionName), 3427 ExtendedPayload: extendedPayload, 3428 } 3429 } 3430 3431 func sendUtHolepunchMsg( 3432 pc *PeerConn, 3433 msgType utHolepunch.MsgType, 3434 addrPort netip.AddrPort, 3435 errCode utHolepunch.ErrCode, 3436 ) { 3437 holepunchMsg := utHolepunch.Msg{ 3438 MsgType: msgType, 3439 AddrPort: addrPort, 3440 ErrCode: errCode, 3441 } 3442 incHolepunchMessagesSent(holepunchMsg) 3443 ppMsg := wrapUtHolepunchMsgForPeerConn(pc, holepunchMsg) 3444 pc.write(ppMsg) 3445 } 3446 3447 func incHolepunchMessages(msg utHolepunch.Msg, verb string) { 3448 torrent.Add( 3449 fmt.Sprintf( 3450 "holepunch %v %v messages %v", 3451 msg.MsgType, 3452 addrPortProtocolStr(msg.AddrPort), 3453 verb, 3454 ), 3455 1, 3456 ) 3457 } 3458 3459 func incHolepunchMessagesReceived(msg utHolepunch.Msg) { 3460 incHolepunchMessages(msg, "received") 3461 } 3462 3463 func incHolepunchMessagesSent(msg utHolepunch.Msg) { 3464 incHolepunchMessages(msg, "sent") 3465 } 3466 3467 func (t *Torrent) handleReceivedUtHolepunchMsg(msg utHolepunch.Msg, sender *PeerConn) error { 3468 incHolepunchMessagesReceived(msg) 3469 switch msg.MsgType { 3470 case utHolepunch.Rendezvous: 3471 t.logger.Printf("got holepunch rendezvous request for %v from %p", msg.AddrPort, sender) 3472 sendMsg := sendUtHolepunchMsg 3473 senderAddrPort, err := sender.remoteDialAddrPort() 3474 if err != nil { 3475 sender.logger.Levelf( 3476 log.Warning, 3477 "error getting ut_holepunch rendezvous sender's dial address: %v", 3478 err, 3479 ) 3480 // There's no better error code. The sender's address itself is invalid. I don't see 3481 // this error message being appropriate anywhere else anyway. 3482 sendMsg(sender, utHolepunch.Error, msg.AddrPort, utHolepunch.NoSuchPeer) 3483 } 3484 targets := t.peerConnsWithDialAddrPort(msg.AddrPort) 3485 if len(targets) == 0 { 3486 sendMsg(sender, utHolepunch.Error, msg.AddrPort, utHolepunch.NotConnected) 3487 return nil 3488 } 3489 for _, pc := range targets { 3490 if !pc.supportsExtension(utHolepunch.ExtensionName) { 3491 sendMsg(sender, utHolepunch.Error, msg.AddrPort, utHolepunch.NoSupport) 3492 continue 3493 } 3494 sendMsg(sender, utHolepunch.Connect, msg.AddrPort, 0) 3495 sendMsg(pc, utHolepunch.Connect, senderAddrPort, 0) 3496 } 3497 return nil 3498 case utHolepunch.Connect: 3499 holepunchAddr := msg.AddrPort 3500 t.logger.Printf("got holepunch connect request for %v from %p", holepunchAddr, sender) 3501 if g.MapContains(t.cl.undialableWithoutHolepunch, holepunchAddr) { 3502 setAdd(&t.cl.undialableWithoutHolepunchDialedAfterHolepunchConnect, holepunchAddr) 3503 if g.MapContains(t.cl.accepted, holepunchAddr) { 3504 setAdd(&t.cl.probablyOnlyConnectedDueToHolepunch, holepunchAddr) 3505 } 3506 } 3507 opts := outgoingConnOpts{ 3508 peerInfo: PeerInfo{ 3509 Addr: msg.AddrPort, 3510 Source: PeerSourceUtHolepunch, 3511 PexPeerFlags: sender.pex.remoteLiveConns[msg.AddrPort].UnwrapOrZeroValue(), 3512 }, 3513 t: t, 3514 // Don't attempt to start our own rendezvous if we fail to connect. 3515 skipHolepunchRendezvous: true, 3516 receivedHolepunchConnect: true, 3517 // Assume that the other end initiated the rendezvous, and will use our preferred 3518 // encryption. So we will act normally. 3519 HeaderObfuscationPolicy: t.cl.config.HeaderObfuscationPolicy, 3520 } 3521 initiateConn(opts, true) 3522 return nil 3523 case utHolepunch.Error: 3524 torrent.Add("holepunch error messages received", 1) 3525 t.logger.Levelf(log.Debug, "received ut_holepunch error message from %v: %v", sender, msg.ErrCode) 3526 return nil 3527 default: 3528 return fmt.Errorf("unhandled msg type %v", msg.MsgType) 3529 } 3530 } 3531 3532 func addrPortProtocolStr(addrPort netip.AddrPort) string { 3533 addr := addrPort.Addr() 3534 switch { 3535 case addr.Is4(): 3536 return "ipv4" 3537 case addr.Is6(): 3538 return "ipv6" 3539 default: 3540 panic(addrPort) 3541 } 3542 } 3543 3544 func (t *Torrent) trySendHolepunchRendezvous(addrPort netip.AddrPort) error { 3545 rzsSent := 0 3546 for pc := range t.conns { 3547 if !pc.supportsExtension(utHolepunch.ExtensionName) { 3548 continue 3549 } 3550 if pc.supportsExtension(pp.ExtensionNamePex) { 3551 if !g.MapContains(pc.pex.remoteLiveConns, addrPort) { 3552 continue 3553 } 3554 } 3555 t.logger.Levelf(log.Debug, "sent ut_holepunch rendezvous message to %v for %v", pc, addrPort) 3556 sendUtHolepunchMsg(pc, utHolepunch.Rendezvous, addrPort, 0) 3557 rzsSent++ 3558 } 3559 if rzsSent == 0 { 3560 return errors.New("no eligible relays") 3561 } 3562 return nil 3563 } 3564 3565 func (t *Torrent) getDialTimeoutUnlocked() time.Duration { 3566 cl := t.cl 3567 cl.rLock() 3568 defer cl.rUnlock() 3569 return t.dialTimeout() 3570 } 3571 3572 func (t *Torrent) canonicalShortInfohash() *infohash.T { 3573 if t.infoHash.Ok { 3574 return &t.infoHash.Value 3575 } 3576 return t.infoHashV2.UnwrapPtr().ToShort() 3577 } 3578 3579 func (t *Torrent) iterShortInfohashes() iter.Seq[shortInfohash] { 3580 return func(yield func(shortInfohash) bool) { 3581 t.eachShortInfohash(func(short [20]byte) { 3582 yield(short) 3583 }) 3584 } 3585 } 3586 3587 func (t *Torrent) eachShortInfohash(each func(short [20]byte)) { 3588 if t.infoHash.Value == *t.infoHashV2.Value.ToShort() { 3589 // This includes zero values, since they both should not be zero. Plus Option should not 3590 // allow non-zero values for None. 3591 panic("v1 and v2 info hashes should not be the same") 3592 } 3593 if t.infoHash.Ok { 3594 each(t.infoHash.Value) 3595 } 3596 if t.infoHashV2.Ok { 3597 v2Short := *t.infoHashV2.Value.ToShort() 3598 each(v2Short) 3599 } 3600 } 3601 3602 func (t *Torrent) getFileByPiecesRoot(hash [32]byte) *File { 3603 for _, f := range *t.files { 3604 if f.piecesRoot.Unwrap() == hash { 3605 return f 3606 } 3607 } 3608 return nil 3609 } 3610 3611 func (t *Torrent) pieceLayers() (pieceLayers map[string]string) { 3612 if t.files == nil { 3613 return 3614 } 3615 files := *t.files 3616 g.MakeMapWithCap(&pieceLayers, len(files)) 3617 file: 3618 for _, f := range files { 3619 if !f.piecesRoot.Ok { 3620 continue 3621 } 3622 key := f.piecesRoot.Value 3623 var value strings.Builder 3624 for i := f.BeginPieceIndex(); i < f.EndPieceIndex(); i++ { 3625 hashOpt := t.piece(i).hashV2 3626 if !hashOpt.Ok { 3627 // All hashes must be present. This implementation should handle missing files, so 3628 // move on to the next file. 3629 continue file 3630 } 3631 value.Write(hashOpt.Value[:]) 3632 } 3633 if value.Len() == 0 { 3634 // Non-empty files are not recorded in piece layers. 3635 continue 3636 } 3637 // If multiple files have the same root that shouldn't matter. 3638 pieceLayers[string(key[:])] = value.String() 3639 } 3640 return 3641 } 3642 3643 // Is On when all pieces are complete. 3644 func (t *Torrent) Complete() chansync.ReadOnlyFlag { 3645 return &t.complete 3646 } 3647 3648 func (t *Torrent) processSlogGroupInput(latest torrentSlogGroupInput) { 3649 t._slogGroup = slog.Group("torrent", 3650 "name", latest.name, 3651 "ih", latest.canonicalIh) 3652 t._slogger = t.baseSlogger.With(t._slogGroup) 3653 t.lastSlogGroupInput = latest 3654 } 3655 3656 func (t *Torrent) updateSlogGroup() { 3657 latest := t.makeSlogGroupInput() 3658 if t._slogger == nil || latest != t.lastSlogGroupInput { 3659 t.processSlogGroupInput(latest) 3660 } 3661 } 3662 3663 // NB: You may need to be holding client lock to call this now. 3664 func (t *Torrent) slogger() *slog.Logger { 3665 t.updateSlogGroup() 3666 return t._slogger 3667 } 3668 3669 func (t *Torrent) makeSlogGroupInput() torrentSlogGroupInput { 3670 var name any 3671 opt := t.bestName() 3672 if opt.Ok { 3673 name = opt.Value 3674 } 3675 return torrentSlogGroupInput{ 3676 name: name, 3677 canonicalIh: *t.canonicalShortInfohash(), 3678 } 3679 } 3680 3681 // Returns a group attr describing the Torrent. 3682 func (t *Torrent) slogGroup() slog.Attr { 3683 t.updateSlogGroup() 3684 return t._slogGroup 3685 } 3686 3687 // Get a chunk buffer from the pool. It should be returned when it's no longer in use. Do we 3688 // waste an allocation if we throw away the pointer it was stored with? 3689 func (t *Torrent) getChunkBuffer() []byte { 3690 b := *t.chunkPool.Get().(*[]byte) 3691 b = b[:t.chunkSize.Int()] 3692 return b 3693 } 3694 3695 func (t *Torrent) putChunkBuffer(b []byte) { 3696 panicif.NotEq(cap(b), t.chunkSize.Int()) 3697 // Does this allocate? Are we amortizing against the cost of a large buffer? 3698 t.chunkPool.Put(&b) 3699 } 3700 3701 func (t *Torrent) withSlogger(base *slog.Logger) *slog.Logger { 3702 return base.With(slog.Group( 3703 "torrent", 3704 "ih", *t.canonicalShortInfohash())) 3705 } 3706 3707 func (t *Torrent) endRequestIndexForFileIndex(fileIndex int) RequestIndex { 3708 f := t.Files()[fileIndex] 3709 end := intCeilDiv(uint64(f.offset)+uint64(f.length), t.chunkSize.Uint64()) 3710 return RequestIndex(end) 3711 } 3712 3713 func (t *Torrent) wantReceiveChunk(reqIndex RequestIndex) bool { 3714 if t.checkValidReceiveChunk(t.requestIndexToRequest(reqIndex)) != nil { 3715 return false 3716 } 3717 pi := t.pieceIndexOfRequestIndex(reqIndex) 3718 if t.ignorePieceForRequests(pi) { 3719 return false 3720 } 3721 if t.haveRequestIndexChunk(reqIndex) { 3722 return false 3723 } 3724 return true 3725 } 3726 3727 func (t *Torrent) getClosedErr() error { 3728 if t.closed.IsSet() { 3729 return errTorrentClosed 3730 } 3731 return nil 3732 } 3733 3734 func (t *Torrent) considerStartingHashers() bool { 3735 if t.storage == nil { 3736 return false 3737 } 3738 if t.activePieceHashes >= t.cl.config.PieceHashersPerTorrent { 3739 return false 3740 } 3741 if !t.cl.canStartPieceHashers() { 3742 return false 3743 } 3744 if t.piecesQueuedForHash.IsEmpty() { 3745 return false 3746 } 3747 return true 3748 } 3749 3750 func (t *Torrent) getFile(fileIndex int) *File { 3751 return (*t.files)[fileIndex] 3752 } 3753 3754 func (t *Torrent) fileMightBePartial(fileIndex int) bool { 3755 f := t.getFile(fileIndex) 3756 return t.piecesMightBePartial(f.BeginPieceIndex(), f.EndPieceIndex()) 3757 } 3758 3759 // Expand the piece range to include all pieces of the files in the original range. 3760 func (t *Torrent) expandPieceRangeToFullFiles(beginPieceIndex, endPieceIndex pieceIndex) (expandedBegin, expandedEnd pieceIndex) { 3761 if beginPieceIndex == endPieceIndex { 3762 return beginPieceIndex, endPieceIndex 3763 } 3764 firstFile := t.getFile(t.piece(beginPieceIndex).beginFile) 3765 lastFile := t.getFile(t.piece(endPieceIndex-1).endFile - 1) 3766 expandedBegin = firstFile.BeginPieceIndex() 3767 expandedEnd = lastFile.EndPieceIndex() 3768 return 3769 } 3770 3771 // Pieces in the range [begin, end) may have partially complete files. Note we only check for dirty chunks and either all or no pieces being complete. 3772 func (t *Torrent) filesInPieceRangeMightBePartial(begin, end pieceIndex) bool { 3773 begin, end = t.expandPieceRangeToFullFiles(begin, end) 3774 return t.piecesMightBePartial(begin, end) 3775 } 3776 3777 // Pieces in the range [begin, end) may have partially complete files. Note we only check for dirty chunks and either all or no pieces being complete. 3778 func (t *Torrent) filesInRequestRangeMightBePartial(beginRequest, endRequest RequestIndex) bool { 3779 if beginRequest >= endRequest { 3780 return false 3781 } 3782 beginPiece := t.pieceIndexOfRequestIndex(beginRequest) 3783 endPiece := pieceIndex(intCeilDiv(endRequest, t.chunksPerRegularPiece())) 3784 return t.filesInPieceRangeMightBePartial(beginPiece, endPiece) 3785 } 3786 3787 // Pieces in the range [begin, end) are dirty, or in a mixed completion state. 3788 func (t *Torrent) piecesMightBePartial(beginPieceIndex, endPieceIndex int) bool { 3789 // Check for dirtied chunks. 3790 if t.dirtyChunks.IntersectsWithInterval( 3791 uint64(t.pieceRequestIndexBegin(beginPieceIndex)), 3792 uint64(t.pieceRequestIndexBegin(endPieceIndex)), 3793 ) { 3794 // We have dirty chunks. Even if the file is complete, this could mean a partial file has 3795 // been started. 3796 return true 3797 } 3798 // Check for mixed completion. 3799 var r roaring.Bitmap 3800 r.AddRange(uint64(beginPieceIndex), uint64(endPieceIndex)) 3801 switch t._completedPieces.AndCardinality(&r) { 3802 case 0, uint64(endPieceIndex - beginPieceIndex): 3803 // We have either no pieces or all pieces and no dirty chunks. 3804 return false 3805 default: 3806 // We're somewhere in-between. 3807 return true 3808 } 3809 } 3810 3811 func (t *Torrent) hasActiveWebseedRequests() bool { 3812 for _, p := range t.webSeeds { 3813 for req := range p.activeRequests { 3814 if !req.cancelled.Load() { 3815 return true 3816 } 3817 } 3818 } 3819 return false 3820 } 3821 3822 // Increment pieces dirtied for conns and aggregate upstreams. 3823 func (t *Torrent) incrementPiecesDirtiedStats(p *Piece, inc func(stats *ConnStats) bool) { 3824 if len(p.dirtiers) == 0 { 3825 // Avoid allocating map. 3826 return 3827 } 3828 // 4 == 2 peerImpls (PeerConn and webseedPeer) and 1 base * one AllConnStats for each of Torrent 3829 // and Client. 3830 distinctUpstreamConnStats := make(map[*ConnStats]struct{}, 6) 3831 for c := range p.dirtiers { 3832 // Apply directly for each peer to avoid allocation. 3833 inc(&c._stats) 3834 // Collect distinct upstream connection stats. 3835 count := 0 3836 for cs := range c.upstreamConnStats() { 3837 distinctUpstreamConnStats[cs] = struct{}{} 3838 count++ 3839 } 3840 // All dirtiers should have both Torrent and Client stats for both base and impl-ConnStats. 3841 panicif.NotEq(count, 4) 3842 } 3843 // TODO: Have a debug assert/dev logging version of this. 3844 panicif.GreaterThan(len(distinctUpstreamConnStats), 6) 3845 maps.Keys(distinctUpstreamConnStats)(inc) 3846 } 3847 3848 // Maximum end request index for the torrent (one past the last). There might be other requests that 3849 // don't make sense if padding files and v2 are in use. 3850 func (t *Torrent) maxEndRequest() RequestIndex { 3851 return RequestIndex(intCeilDiv(uint64(t.length()), t.chunkSize.Uint64())) 3852 } 3853 3854 // Avoids needing or indexing the pieces slice. 3855 func (p *Torrent) chunkIndexSpec(piece pieceIndex, chunk chunkIndexType) ChunkSpec { 3856 return chunkIndexSpec(pp.Integer(chunk), p.pieceLength(piece), p.chunkSize) 3857 } 3858 3859 func (t *Torrent) progressUnitFloat() float64 { 3860 if !t.haveInfo() { 3861 return math.NaN() 3862 } 3863 return 1 - float64(t.bytesMissingLocked())/float64(t.info.TotalLength()) 3864 }