github.com/anacrolix/torrent@v1.61.0/request-strategy-impls_test.go (about)

     1  package torrent
     2  
     3  import (
     4  	"context"
     5  	"io"
     6  	"runtime"
     7  	"testing"
     8  
     9  	g "github.com/anacrolix/generics"
    10  	"github.com/anacrolix/missinggo/v2/iter"
    11  	"github.com/davecgh/go-spew/spew"
    12  	"github.com/go-quicktest/qt"
    13  
    14  	requestStrategy "github.com/anacrolix/torrent/internal/request-strategy"
    15  	"github.com/anacrolix/torrent/metainfo"
    16  	"github.com/anacrolix/torrent/storage"
    17  	infohash_v2 "github.com/anacrolix/torrent/types/infohash-v2"
    18  )
    19  
    20  func makeRequestStrategyPiece(t requestStrategy.Torrent) requestStrategy.Piece {
    21  	return t.Piece(0)
    22  }
    23  
    24  func TestRequestStrategyPieceDoesntAlloc(t *testing.T) {
    25  	akshalTorrent := &Torrent{pieces: make([]Piece, 1)}
    26  	rst := requestStrategyTorrent{akshalTorrent}
    27  	var before, after runtime.MemStats
    28  	runtime.ReadMemStats(&before)
    29  	p := makeRequestStrategyPiece(rst)
    30  	runtime.ReadMemStats(&after)
    31  	qt.Assert(t, qt.Equals(before.HeapAlloc, after.HeapAlloc))
    32  	// We have to use p, or it gets optimized away.
    33  	spew.Fdump(io.Discard, p)
    34  }
    35  
    36  type storagePiece struct {
    37  	complete bool
    38  }
    39  
    40  func (s storagePiece) ReadAt(p []byte, off int64) (n int, err error) {
    41  	//TODO implement me
    42  	panic("implement me")
    43  }
    44  
    45  func (s storagePiece) WriteAt(p []byte, off int64) (n int, err error) {
    46  	//TODO implement me
    47  	panic("implement me")
    48  }
    49  
    50  func (s storagePiece) MarkComplete() error {
    51  	//TODO implement me
    52  	panic("implement me")
    53  }
    54  
    55  func (s storagePiece) MarkNotComplete() error {
    56  	//TODO implement me
    57  	panic("implement me")
    58  }
    59  
    60  func (s storagePiece) Completion() storage.Completion {
    61  	return storage.Completion{Ok: true, Complete: s.complete}
    62  }
    63  
    64  var _ storage.PieceImpl = storagePiece{}
    65  
    66  type storageClient struct {
    67  	completed int
    68  }
    69  
    70  func (s *storageClient) OpenTorrent(
    71  	_ context.Context,
    72  	info *metainfo.Info,
    73  	infoHash metainfo.Hash,
    74  ) (storage.TorrentImpl, error) {
    75  	return storage.TorrentImpl{
    76  		Piece: func(p metainfo.Piece) storage.PieceImpl {
    77  			return storagePiece{complete: p.Index() < s.completed}
    78  		},
    79  	}, nil
    80  }
    81  
    82  func BenchmarkRequestStrategy(b *testing.B) {
    83  	cl := newTestingClient(b)
    84  	storageClient := storageClient{}
    85  	tor, new := cl.AddTorrentOpt(AddTorrentOpts{
    86  		InfoHash:   testingTorrentInfoHash,
    87  		InfoHashV2: g.Some(infohash_v2.FromHexString("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")),
    88  		Storage:    &storageClient,
    89  	})
    90  	tor.disableTriggers = true
    91  	qt.Assert(b, qt.IsTrue(new))
    92  	const pieceLength = 1 << 8 << 10
    93  	const numPieces = 10_000
    94  	err := tor.setInfoUnlocked(&metainfo.Info{
    95  		Pieces:      make([]byte, numPieces*metainfo.HashSize),
    96  		PieceLength: pieceLength,
    97  		Length:      pieceLength * numPieces,
    98  	})
    99  	qt.Assert(b, qt.IsNil(err))
   100  	peer := cl.newConnection(nil, newConnectionOpts{
   101  		network: "test",
   102  	})
   103  	peer.setTorrent(tor)
   104  	qt.Assert(b, qt.IsNotNil(tor.storage))
   105  	const chunkSize = defaultChunkSize
   106  	peer.onPeerHasAllPiecesNoTriggers()
   107  	for i := 0; i < tor.numPieces(); i++ {
   108  		tor.pieces[i].priority.Raise(PiecePriorityNormal)
   109  		tor.updatePiecePriorityNoRequests(i)
   110  	}
   111  	peer.peerChoking = false
   112  	for b.Loop() {
   113  		storageClient.completed = 0
   114  		for pieceIndex := range iter.N(numPieces) {
   115  			tor.cl.lock()
   116  			tor.updatePieceCompletion(pieceIndex)
   117  			tor.cl.unlock()
   118  		}
   119  		for completed := 0; completed <= numPieces; completed += 1 {
   120  			storageClient.completed = completed
   121  			if completed > 0 {
   122  				func() {
   123  					tor.cl.lock()
   124  					defer tor.cl.unlock()
   125  					tor.updatePieceCompletion(completed - 1)
   126  				}()
   127  			}
   128  			// Starting and stopping timers around this part causes lots of GC overhead.
   129  			rs := peer.getDesiredRequestState()
   130  			tor.cacheNextRequestIndexesForReuse(rs.Requests.requestIndexes)
   131  			// End of part that should be timed.
   132  			remainingChunks := (numPieces - completed) * (pieceLength / chunkSize)
   133  			qt.Assert(b, qt.HasLen(rs.Requests.requestIndexes, min(
   134  				remainingChunks,
   135  				int(cl.config.MaxUnverifiedBytes/chunkSize))))
   136  		}
   137  	}
   138  }