github.com/anacrolix/torrent@v1.61.0/storage/test/bench-piece-mark-complete.go (about)

     1  package test_storage
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"math/rand"
     7  	"sync"
     8  	"testing"
     9  
    10  	qt "github.com/go-quicktest/qt"
    11  
    12  	"github.com/anacrolix/torrent/metainfo"
    13  	"github.com/anacrolix/torrent/storage"
    14  )
    15  
    16  const (
    17  	ChunkSize        = 1 << 14
    18  	DefaultPieceSize = 2 << 20
    19  	DefaultNumPieces = 16
    20  )
    21  
    22  // This writes chunks to the storage concurrently, and waits for them all to complete. This matches
    23  // the behaviour from the peer connection read loop.
    24  func BenchmarkPieceMarkComplete(
    25  	b *testing.B, ci storage.ClientImpl,
    26  	pieceSize int64, numPieces int,
    27  	// This drives any special handling around capacity that may be configured into the storage
    28  	// implementation.
    29  	capacity int64,
    30  ) {
    31  	info := &metainfo.Info{
    32  		Pieces:      make([]byte, numPieces*metainfo.HashSize),
    33  		PieceLength: pieceSize,
    34  		Length:      pieceSize * int64(numPieces),
    35  		Name:        "TorrentName",
    36  	}
    37  	ti, err := ci.OpenTorrent(context.Background(), info, metainfo.Hash{})
    38  	qt.Assert(b, qt.IsNil(err))
    39  	tw := storage.Torrent{ti}
    40  	defer tw.Close()
    41  	rand.Read(info.Pieces)
    42  	data := make([]byte, pieceSize)
    43  	readData := make([]byte, pieceSize)
    44  	b.SetBytes(int64(numPieces) * pieceSize)
    45  	oneIter := func() {
    46  		for pieceIndex := 0; pieceIndex < numPieces; pieceIndex += 1 {
    47  			pi := tw.Piece(info.Piece(pieceIndex))
    48  			rand.Read(data)
    49  			b.StartTimer()
    50  			var wg sync.WaitGroup
    51  			for off := int64(0); off < int64(len(data)); off += ChunkSize {
    52  				wg.Add(1)
    53  				go func(off int64) {
    54  					defer wg.Done()
    55  					n, err := pi.WriteAt(data[off:off+ChunkSize], off)
    56  					if err != nil {
    57  						panic(err)
    58  					}
    59  					if n != ChunkSize {
    60  						panic(n)
    61  					}
    62  				}(off)
    63  			}
    64  			wg.Wait()
    65  			if capacity == 0 {
    66  				pi.MarkNotComplete()
    67  			}
    68  			// This might not apply if users of this benchmark don't cache with the expected capacity.
    69  			qt.Assert(b, qt.Equals(pi.Completion(), storage.Completion{Complete: false, Ok: true}))
    70  			qt.Assert(b, qt.IsNil(pi.MarkComplete()))
    71  			qt.Assert(b, qt.Equals(pi.Completion(), storage.Completion{Complete: true, Ok: true}))
    72  			n, err := pi.WriteTo(bytes.NewBuffer(readData[:0]))
    73  			b.StopTimer()
    74  			qt.Check(b, qt.Equals(n, int64(len(data))))
    75  			qt.Assert(b, qt.IsNil(err))
    76  			qt.Assert(b, qt.IsTrue(bytes.Equal(readData[:n], data)))
    77  		}
    78  	}
    79  	// Fill the cache
    80  	if capacity > 0 {
    81  		iterN := int((capacity + info.TotalLength() - 1) / info.TotalLength())
    82  		for i := 0; i < iterN; i += 1 {
    83  			oneIter()
    84  		}
    85  	}
    86  	b.StopTimer()
    87  	b.ResetTimer()
    88  	for i := 0; i < b.N; i += 1 {
    89  		oneIter()
    90  	}
    91  }