gitlab.com/SkynetLabs/skyd@v1.6.9/skymodules/renter/projectdownloadchunk_test.go (about)

     1  package renter
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"encoding/binary"
     7  	"fmt"
     8  	"io"
     9  	"reflect"
    10  	"strings"
    11  	"sync/atomic"
    12  	"testing"
    13  	"time"
    14  	"unsafe"
    15  
    16  	"gitlab.com/NebulousLabs/errors"
    17  	"gitlab.com/NebulousLabs/fastrand"
    18  	"gitlab.com/SkynetLabs/skyd/skymodules"
    19  	"go.sia.tech/siad/crypto"
    20  	"go.sia.tech/siad/modules"
    21  	"go.sia.tech/siad/types"
    22  )
    23  
    24  // TestPDC is a collection of unit test that verify the functionality of the
    25  // project download chunk object.
    26  func TestPDC(t *testing.T) {
    27  	if testing.Short() {
    28  		t.SkipNow()
    29  	}
    30  	t.Parallel()
    31  
    32  	t.Run("handleJobResponse", testProjectDownloadChunkHandleJobResponse)
    33  	t.Run("finalize", testProjectDownloadChunkFinalize)
    34  	t.Run("finished", testProjectDownloadChunkFinished)
    35  	t.Run("launchWorker", testProjectDownloadChunkLaunchWorker)
    36  	t.Run("workers", testProjectDownloadChunkWorkers)
    37  }
    38  
    39  // TestMarshalUnmarshalDownloadedData is a unit test for the custom Marshal and
    40  // Unmarshal implementations of the downloadedData.
    41  func TestMarshalUnmarshalDownloadedData(t *testing.T) {
    42  	t.Parallel()
    43  
    44  	// NOTE: 0 length pieces are allowed, so make sure to test that as well.
    45  	dd := &downloadedData{
    46  		LogicalChunkData: [][]byte{{1, 2, 3}, {4, 5, 6}, {}},
    47  		Proofs:           [][]crypto.Hash{{{9}, {8}, {7}}, {}, {{3}, {2}, {1}}},
    48  		RecoveredBytes:   fastrand.Uint64n(100),
    49  		SkipLength:       fastrand.Intn(100),
    50  		DataPieces:       fastrand.Intn(100),
    51  		ParityPieces:     fastrand.Intn(100),
    52  	}
    53  
    54  	buf := bytes.NewBuffer(nil)
    55  	err := dd.Marshal(buf)
    56  	if err != nil {
    57  		t.Fatal(err)
    58  	}
    59  	b := buf.Bytes()
    60  
    61  	var dd2 downloadedData
    62  	err = dd2.Unmarshal(b)
    63  	if err != nil {
    64  		t.Fatal(err)
    65  	}
    66  
    67  	if !reflect.DeepEqual(*dd, dd2) {
    68  		fmt.Println(*dd)
    69  		fmt.Println(dd2)
    70  		t.Fatal("mismatch")
    71  	}
    72  
    73  	// Unmarshal the data again. Dropping one byte at a time from the end.
    74  	// This should always fail but not panic.
    75  	b = b[:len(b)-1]
    76  	for len(b) > 0 {
    77  		var dd3 downloadedData
    78  		if err := dd3.Unmarshal(b); err == nil {
    79  			t.Fatal("should fail")
    80  		}
    81  		b = b[:len(b)-1]
    82  	}
    83  }
    84  
    85  // testProjectDownloadChunkHandleJobResponse is a unit test that verifies the
    86  // functionality of the 'handleJobResponse' function on the ProjectDownloadChunk
    87  func testProjectDownloadChunkHandleJobResponse(t *testing.T) {
    88  	t.Parallel()
    89  
    90  	// create pcws
    91  	pcws := newTestProjectChunkWorkerSet()
    92  	ec := pcws.staticErasureCoder
    93  
    94  	// create data and erasure code
    95  	data := fastrand.Bytes(int(modules.SectorSize))
    96  	pieces, err := ec.Encode(data)
    97  	if err != nil {
    98  		t.Fatal(err)
    99  	}
   100  
   101  	// update piece roots
   102  	empty := crypto.Hash{}
   103  	pcws.staticPieceRoots = []crypto.Hash{
   104  		empty,
   105  		crypto.MerkleRoot(pieces[1]),
   106  		empty,
   107  		empty,
   108  		empty,
   109  	}
   110  
   111  	// create pdc
   112  	pdc := newTestProjectDownloadChunk(pcws, nil)
   113  	pdc.piecesInfo[1].available++
   114  	pdc.piecesInfo[2].available++
   115  
   116  	// create worker
   117  	worker := new(worker)
   118  
   119  	// mock state after launching a worker
   120  	workerKey := uint32(1)
   121  	pdc.workerProgressMap[workerKey] = workerProgress{
   122  		completedPieces: make(completedPieces),
   123  		launchedPieces:  make(launchedPieces),
   124  	}
   125  
   126  	lwi := &launchedWorkerInfo{staticLaunchTime: time.Now().Add(-time.Minute)}
   127  	pdc.launchedWorkers = []*launchedWorkerInfo{lwi}
   128  
   129  	// mock a successful read response for piece 1
   130  	success := &jobReadResponse{
   131  		staticData:    pieces[1],
   132  		staticErr:     nil,
   133  		staticJobTime: time.Duration(1),
   134  		staticMetadata: jobReadMetadata{
   135  			staticLaunchedWorkerIndex: 0,
   136  			staticPieceRootIndex:      1,
   137  			staticSectorRoot:          crypto.MerkleRoot(pieces[1]),
   138  			staticWorker:              worker,
   139  			staticWorkerIdentifier:    workerKey,
   140  		},
   141  	}
   142  	pdc.handleJobReadResponse(success)
   143  
   144  	// assert pieces info got updated
   145  	if !pdc.piecesInfo[1].downloaded {
   146  		t.Fatal("unexpected")
   147  	}
   148  	if pdc.piecesInfo[1].available != 0 {
   149  		t.Fatal("unexpected")
   150  	}
   151  
   152  	// assert pieces data got updated and that we've unset the data
   153  	if !bytes.Equal(pdc.piecesData[1], pieces[1]) {
   154  		t.Fatal("unexpected")
   155  	}
   156  	if success.staticData != nil {
   157  		t.Fatal("unexpected")
   158  	}
   159  
   160  	// assert the launched worker information got updated
   161  	if lwi.completeTime == (time.Time{}) ||
   162  		lwi.jobDuration == 0 ||
   163  		lwi.totalDuration == 0 ||
   164  		lwi.jobErr != nil {
   165  		t.Fatal("unexpected")
   166  	}
   167  
   168  	// mock a failed read response for piece 2
   169  	pdc.handleJobReadResponse(&jobReadResponse{
   170  		staticData:    nil,
   171  		staticErr:     errors.New("read failed"),
   172  		staticJobTime: time.Duration(1),
   173  		staticMetadata: jobReadMetadata{
   174  			staticPieceRootIndex:   2,
   175  			staticSectorRoot:       empty,
   176  			staticWorker:           worker,
   177  			staticWorkerIdentifier: workerKey,
   178  		},
   179  	})
   180  
   181  	// assert pieces info got updated
   182  	if pdc.piecesInfo[2].downloaded {
   183  		t.Fatal("unexpected")
   184  	}
   185  	if pdc.piecesInfo[2].available != 0 {
   186  		t.Fatal("unexpected")
   187  	}
   188  
   189  	// assert pieces data
   190  	if pdc.piecesData[2] != nil {
   191  		t.Fatal("unexpected")
   192  	}
   193  
   194  	// assert the launched worker information got updated
   195  	if lwi.completeTime == (time.Time{}) ||
   196  		lwi.jobDuration == 0 ||
   197  		lwi.totalDuration == 0 ||
   198  		lwi.jobErr == nil {
   199  		t.Fatal("unexpected", lwi)
   200  	}
   201  }
   202  
   203  // testProjectDownloadChunkFinalize is a unit test for the 'finalize' function
   204  // on the pdc. It verifies whether the returned data is properly offset to
   205  // include only the pieces requested by the user.
   206  func testProjectDownloadChunkFinalize(t *testing.T) {
   207  	t.Parallel()
   208  
   209  	// create PCWS
   210  	pcws := newTestProjectChunkWorkerSet()
   211  	ec := pcws.staticErasureCoder
   212  
   213  	// create data
   214  	originalData := fastrand.Bytes(int(modules.SectorSize))
   215  	sectorRoot := crypto.MerkleRoot(originalData)
   216  	pcws.staticPieceRoots = []crypto.Hash{sectorRoot}
   217  
   218  	// RS encode the data
   219  	data := make([]byte, modules.SectorSize)
   220  	copy(data, originalData)
   221  	pieces, err := ec.Encode(data)
   222  	if err != nil {
   223  		t.Fatal(err)
   224  	}
   225  
   226  	// download a random amount of data at random offset
   227  	length := (fastrand.Uint64n(5) + 1) * crypto.SegmentSize
   228  	offset := fastrand.Uint64n(modules.SectorSize - length)
   229  	pieceOffset, pieceLength := GetPieceOffsetAndLen(ec, offset, length)
   230  
   231  	sliced := make([][]byte, len(pieces))
   232  	for i, piece := range pieces {
   233  		sliced[i] = make([]byte, pieceLength)
   234  		copy(sliced[i], piece[pieceOffset:pieceOffset+pieceLength])
   235  	}
   236  
   237  	// create a pdc
   238  	responseChan := make(chan *downloadResponse, 1)
   239  	pdc := newTestProjectDownloadChunk(pcws, responseChan)
   240  	pdc.offsetInChunk = offset
   241  	pdc.lengthInChunk = length
   242  	pdc.pieceOffset = pieceOffset
   243  	pdc.pieceLength = pieceLength
   244  	pdc.piecesData = sliced
   245  	pdc.staticBaseSectorDownloadStats = skymodules.NewSectorDownloadStats()
   246  	pdc.staticFanoutSectorDownloadStats = skymodules.NewSectorDownloadStats()
   247  
   248  	pdc.launchedWorkers = append(pdc.launchedWorkers, &launchedWorkerInfo{
   249  		staticLaunchTime:           time.Now(),
   250  		staticExpectedCompleteTime: time.Now().Add(time.Minute),
   251  		staticExpectedDuration:     time.Minute,
   252  
   253  		staticPDC:    pdc,
   254  		staticWorker: new(worker),
   255  	})
   256  
   257  	// call finalize
   258  	pdc.finalize()
   259  
   260  	// verify the download
   261  	downloadResponse := <-responseChan
   262  	if downloadResponse.err != nil {
   263  		t.Fatal("unexpected error", downloadResponse.err)
   264  	}
   265  	dd, err := downloadResponse.Data()
   266  	if err != nil {
   267  		t.Fatal(err)
   268  	}
   269  	responseData, err := dd.Recover()
   270  	if err != nil {
   271  		t.Fatal(err)
   272  	}
   273  	if !bytes.Equal(responseData, originalData[offset:offset+length]) {
   274  		t.Log("offset", offset)
   275  		t.Log("length", length)
   276  		t.Log("bytes downloaded", len(responseData))
   277  
   278  		t.Log("actual:\n", responseData)
   279  		t.Log("expected:\n", originalData[offset:offset+length])
   280  		t.Fatal("unexpected data")
   281  	}
   282  	if downloadResponse.launchedWorkers == nil || len(downloadResponse.launchedWorkers) != 1 || downloadResponse.launchedWorkers[0].staticExpectedDuration != time.Minute {
   283  		t.Fatal("unexpected")
   284  	}
   285  
   286  	// call fail
   287  	pdc.fail(errors.New("failure"))
   288  	downloadResponse = <-responseChan
   289  	if downloadResponse.err == nil {
   290  		t.Fatal("unexpected error")
   291  	}
   292  	if downloadResponse.launchedWorkers == nil {
   293  		t.Fatal("unexpected")
   294  	}
   295  }
   296  
   297  // testProjectDownloadChunkFinished is a unit test for the 'finished' function
   298  // on the pdc. It verifies whether the hopeful and completed pieces are properly
   299  // counted and whether the return values are correct.
   300  func testProjectDownloadChunkFinished(t *testing.T) {
   301  	// create EC
   302  	ec, err := skymodules.NewRSCode(3, 5)
   303  	if err != nil {
   304  		t.Fatal("unexpected")
   305  	}
   306  
   307  	// create pdc
   308  	pcws := newCustomTestProjectChunkWorkerSet(ec)
   309  	pdc := newTestProjectDownloadChunk(pcws, nil)
   310  
   311  	// mock unresolved state with hope of successful download
   312  	pdc.unresolvedWorkersRemaining = 4
   313  	finished, err := pdc.finished()
   314  	if err != nil {
   315  		t.Fatal("unexpected error", err)
   316  	}
   317  	if finished {
   318  		t.Fatal("unexpected")
   319  	}
   320  
   321  	// mock one resolved piece - still unfinished but hopeful
   322  	pdc.unresolvedWorkersRemaining = 3
   323  	pdc.piecesInfo[0].available++
   324  	finished, err = pdc.finished()
   325  	if err != nil {
   326  		t.Fatal("unexpected error", err)
   327  	}
   328  	if finished {
   329  		t.Fatal("unexpected")
   330  	}
   331  
   332  	// mock all resolved, only 2 availables - should not be hopeful, need 3
   333  	pdc.unresolvedWorkersRemaining = 0
   334  	pdc.piecesInfo[1].available++
   335  	finished, err = pdc.finished()
   336  	if !errors.Contains(err, errNotEnoughPieces) {
   337  		t.Fatal("unexpected error", err)
   338  	}
   339  	if finished {
   340  		t.Fatal("unexpected")
   341  	}
   342  
   343  	// add one available - should be hopeful and unfinished
   344  	pdc.piecesInfo[2].available++
   345  	finished, err = pdc.finished()
   346  	if err != nil {
   347  		t.Fatal("unexpected error", err)
   348  	}
   349  	if finished {
   350  		t.Fatal("unexpected")
   351  	}
   352  
   353  	// mock all downloaded, should be finished
   354  	pdc.piecesInfo[0].available = 0
   355  	pdc.piecesInfo[0].downloaded = true
   356  	pdc.piecesInfo[1].available = 0
   357  	pdc.piecesInfo[1].downloaded = true
   358  	pdc.piecesInfo[2].available = 0
   359  	pdc.piecesInfo[2].downloaded = true
   360  	finished, err = pdc.finished()
   361  	if err != nil {
   362  		t.Fatal("unexpected error", err)
   363  	}
   364  	if !finished {
   365  		t.Fatal("unexpected")
   366  	}
   367  }
   368  
   369  // testProjectDownloadChunkLaunchWorker is a unit test for the 'launchWorker'
   370  // function on the pdc.
   371  func testProjectDownloadChunkLaunchWorker(t *testing.T) {
   372  	t.Parallel()
   373  
   374  	// mock a worker, ensure the readqueue returns a non zero time estimate
   375  	worker := mockWorker(100 * time.Millisecond)
   376  	workerIdentifier := uint32(1)
   377  	workerHostPubKeyStr := worker.staticHostPubKeyStr
   378  
   379  	// create pdc
   380  	pcws := newTestProjectChunkWorkerSet()
   381  	pdc := newTestProjectDownloadChunk(pcws, nil)
   382  	pdc.pieceLength = 1 << 16 // 64kb
   383  
   384  	// launch a worker and expect it to have enqueued a job and expect the
   385  	// complete time to be somewhere in the future
   386  	expectedCompleteTime, added := pdc.launchWorker(&individualWorker{
   387  		staticWorker:     worker,
   388  		staticIdentifier: workerIdentifier,
   389  	}, 0, false)
   390  	if !added {
   391  		t.Fatal("unexpected")
   392  	}
   393  	if expectedCompleteTime.Before(time.Now()) {
   394  		t.Fatal("unexpected")
   395  	}
   396  
   397  	// assert worker progress has been initialised
   398  	progress, exists := pdc.workerProgressMap[workerIdentifier]
   399  	if !exists {
   400  		t.Fatal("unexpected")
   401  	}
   402  
   403  	// verify one worker was launched without failure
   404  	launchTime := progress.launchedPieces[0]
   405  	if launchTime.IsZero() {
   406  		t.Fatal("unexpected")
   407  	}
   408  
   409  	// mention of the launched worker should be present in the PDC's launched
   410  	// worker map, which holds debug information about all workers that were
   411  	// launched.
   412  	if len(pdc.launchedWorkers) != 1 {
   413  		t.Fatal("unexpected")
   414  	}
   415  	lw := pdc.launchedWorkers[0]
   416  
   417  	// assert the launched worker info contains what we expect it to contain
   418  	if lw.staticLaunchTime == (time.Time{}) ||
   419  		lw.completeTime != (time.Time{}) ||
   420  		lw.staticExpectedCompleteTime == (time.Time{}) ||
   421  		lw.jobDuration != 0 ||
   422  		lw.totalDuration != 0 ||
   423  		lw.staticExpectedDuration == 0 ||
   424  		!bytes.Equal(lw.staticPDC.uid[:], pdc.uid[:]) ||
   425  		lw.staticWorker.staticHostPubKeyStr != workerHostPubKeyStr {
   426  		t.Fatal("unexpected")
   427  	}
   428  }
   429  
   430  // testProjectDownloadChunkWorkers is a unit test for the 'workers' function on
   431  // the pdc.
   432  func testProjectDownloadChunkWorkers(t *testing.T) {
   433  	t.Parallel()
   434  
   435  	// create pdc
   436  	pcws := newTestProjectChunkWorkerSet()
   437  	pdc := newTestProjectDownloadChunk(pcws, nil)
   438  	ws := pdc.workerState
   439  
   440  	// assert there are no workers
   441  	workers := pdc.workers()
   442  	if len(workers) != 0 {
   443  		t.Fatal("bad")
   444  	}
   445  
   446  	// mock some workers
   447  	w1 := mockWorker(0)
   448  	w2 := mockWorker(0)
   449  	w3 := mockWorker(0)
   450  
   451  	// mock two unresolved workers
   452  	ws.unresolvedWorkers["w1"] = &pcwsUnresolvedWorker{staticWorker: w1}
   453  	ws.unresolvedWorkers["w2"] = &pcwsUnresolvedWorker{staticWorker: w2}
   454  
   455  	// assert they're returned in the worker list
   456  	workers = pdc.workers()
   457  	if len(workers) != 2 {
   458  		t.Fatal("bad")
   459  	}
   460  
   461  	// mock a resolved worker
   462  	ws.resolvedWorkers = append(ws.resolvedWorkers, pcwsWorkerResponse{
   463  		worker:       w3,
   464  		pieceIndices: []uint64{0},
   465  	})
   466  
   467  	// assert they're returned in the worker list
   468  	workers = pdc.workers()
   469  	if len(workers) != 3 {
   470  		t.Fatal("bad")
   471  	}
   472  
   473  	// clear its piece indices and assert the worker is excluded
   474  	ws.resolvedWorkers[0].pieceIndices = nil
   475  	workers = pdc.workers()
   476  	if len(workers) != 2 {
   477  		t.Fatal("bad")
   478  	}
   479  
   480  	// mock w1 being on maintenance cooldown and assert the worker is excluded
   481  	w1.staticMaintenanceState.cooldownUntil = time.Now().Add(time.Minute)
   482  	workers = pdc.workers()
   483  	if len(workers) != 1 {
   484  		t.Fatal("bad")
   485  	}
   486  }
   487  
   488  // TestGetPieceOffsetAndLen is a unit test that probes the helper function
   489  // getPieceOffsetAndLength
   490  func TestGetPieceOffsetAndLen(t *testing.T) {
   491  	randOff := fastrand.Uint64n(modules.SectorSize)
   492  	randLen := fastrand.Uint64n(modules.SectorSize)
   493  
   494  	// verify an EC that does not support partials, defaults to a segemnt size
   495  	// that is equal to the sectorsize
   496  	ec := skymodules.NewRSCodeDefault()
   497  	pieceOff, pieceLen := GetPieceOffsetAndLen(ec, randOff, randLen)
   498  	if pieceOff != 0 || pieceLen%modules.SectorSize != 0 {
   499  		t.Fatal("unexpected", pieceOff, pieceLen)
   500  	}
   501  
   502  	// verify an EC that does support partials using the appropriate segment
   503  	// size and the offset are as we expect them to be
   504  	ec = skymodules.NewRSSubCodeDefault()
   505  	pieceOff, pieceLen = GetPieceOffsetAndLen(ec, randOff, randLen)
   506  	if pieceOff%crypto.SegmentSize != 0 || pieceLen%crypto.SegmentSize != 0 {
   507  		t.Fatal("unexpected", pieceOff, pieceLen)
   508  	}
   509  
   510  	// verify an EC with minPieces different from 1 that supports partials
   511  	// encoding ensures we are reading enough data
   512  	dataPieces := 2
   513  	segmentSize := crypto.SegmentSize
   514  	chunkSegmentSize := uint64(dataPieces * segmentSize)
   515  	ec, err := skymodules.NewRSSubCode(2, 5, uint64(segmentSize))
   516  	if err != nil {
   517  		t.Fatal(err)
   518  	}
   519  	pieceOff, pieceLen = GetPieceOffsetAndLen(ec, randOff, randLen)
   520  	if ((pieceOff+pieceLen)*uint64(ec.MinPieces()))%chunkSegmentSize != 0 {
   521  		t.Fatal("unexpected", pieceOff, pieceLen)
   522  	}
   523  
   524  	// verify an EC that returns a segment size of 0 is considered invalid
   525  	ec = &mockErasureCoder{}
   526  	defer func() {
   527  		if r := recover(); r == nil || !strings.Contains(fmt.Sprintf("%v", r), "pcws has a bad erasure coder") {
   528  			t.Fatal("Expected build.Critical", r)
   529  		}
   530  	}()
   531  	GetPieceOffsetAndLen(ec, 0, 0)
   532  }
   533  
   534  // TestGetPieceOffsetAndLenWithRecover is a unit test that isolates both
   535  // 'getPieceOffsetAndLen' in combination with the Recover function on the EC and
   536  // asserts we can properly encode and then recover at random offset and length
   537  func TestGetPieceOffsetAndLenWithRecover(t *testing.T) {
   538  	t.Parallel()
   539  
   540  	// create data
   541  	cntr := 0
   542  	originalData := make([]byte, modules.SectorSize)
   543  	for i := 0; i < int(modules.SectorSize); i += 2 {
   544  		binary.BigEndian.PutUint16(originalData[i:], uint16(cntr))
   545  		cntr += 1
   546  	}
   547  
   548  	// RS encode the data
   549  	data := make([]byte, modules.SectorSize)
   550  	copy(data, originalData)
   551  	ec := skymodules.NewRSSubCodeDefault()
   552  	pieces, err := ec.Encode(data)
   553  	if err != nil {
   554  		t.Fatal(err)
   555  	}
   556  
   557  	// Declare helper for testing.
   558  	run := func(offset, length uint64) {
   559  		pieceOffset, pieceLength := GetPieceOffsetAndLen(ec, offset, length)
   560  		skipLength := offset % (crypto.SegmentSize * uint64(ec.MinPieces()))
   561  
   562  		sliced := make([][]byte, len(pieces))
   563  		for i, piece := range pieces {
   564  			sliced[i] = make([]byte, pieceLength)
   565  			copy(sliced[i], piece[pieceOffset:pieceOffset+pieceLength])
   566  		}
   567  
   568  		buf := bytes.NewBuffer(nil)
   569  		skipWriter := &skipWriter{
   570  			writer: buf,
   571  			skip:   int(skipLength),
   572  		}
   573  		err = ec.Recover(sliced, length+uint64(skipLength), skipWriter)
   574  		if err != nil {
   575  			t.Fatal(err)
   576  		}
   577  		actual := buf.Bytes()
   578  
   579  		expected := originalData[offset : offset+length]
   580  		if !bytes.Equal(actual, expected) {
   581  			t.Log("Input       :", offset, length, pieceOffset, pieceLength)
   582  			t.Log("original    :", originalData[:crypto.SegmentSize*8])
   583  			t.Log("expected    :", expected)
   584  			t.Log("expected len:", len(expected))
   585  			t.Log("actual      :", actual)
   586  			t.Log("actual   len:", len(actual))
   587  			t.Fatal("unexpected")
   588  		}
   589  	}
   590  
   591  	// Test some cases manually.
   592  	run(0, crypto.SegmentSize)
   593  	run(crypto.SegmentSize, crypto.SegmentSize)
   594  	run(2*crypto.SegmentSize, crypto.SegmentSize)
   595  	run(crypto.SegmentSize, 2*crypto.SegmentSize)
   596  	run(1, crypto.SegmentSize)
   597  	run(0, crypto.SegmentSize-1)
   598  	run(0, crypto.SegmentSize+1)
   599  	run(crypto.SegmentSize-1, crypto.SegmentSize+1)
   600  
   601  	// Test random inputs.
   602  	for rounds := 0; rounds < 100; rounds++ {
   603  		// random length and offset
   604  		length := (fastrand.Uint64n(5*crypto.SegmentSize) + 1)
   605  		offset := fastrand.Uint64n(modules.SectorSize - length)
   606  		run(offset, length)
   607  	}
   608  }
   609  
   610  // TestLaunchedWorkerInfo_String is a small unit test that verifies the output
   611  // of the String implementation on the launched worker info object.
   612  func TestLaunchedWorkerInfo_String(t *testing.T) {
   613  	t.Parallel()
   614  
   615  	pdc := new(projectDownloadChunk)
   616  	fastrand.Read(pdc.uid[:])
   617  
   618  	w := new(worker)
   619  	w.staticHostPubKey = types.SiaPublicKey{
   620  		Algorithm: types.SignatureEd25519,
   621  		Key:       fastrand.Bytes(32),
   622  	}
   623  
   624  	lwi := &launchedWorkerInfo{
   625  		staticPieceIndex:        1,
   626  		staticIsOverdriveWorker: false,
   627  
   628  		staticLaunchTime:           time.Now().Add(-5 * time.Second),
   629  		staticExpectedCompleteTime: time.Now().Add(10 * time.Second),
   630  		staticExpectedDuration:     10 * time.Second,
   631  
   632  		staticPDC:    pdc,
   633  		staticWorker: w,
   634  	}
   635  
   636  	// assert output when download not complete
   637  	expectedWorkerInfo := "initial worker " + w.staticHostPubKey.ShortString()
   638  	expectedPieceInfo := "piece 1"
   639  	expectedEstInfo := "estimated complete 10000 ms"
   640  	expectedDurInfo := "not responded after 5000ms"
   641  	if !strings.Contains(lwi.String(), expectedWorkerInfo) ||
   642  		!strings.Contains(lwi.String(), expectedPieceInfo) ||
   643  		!strings.Contains(lwi.String(), expectedEstInfo) ||
   644  		!strings.Contains(lwi.String(), expectedDurInfo) {
   645  		t.Fatal("unexpected: ", lwi.String())
   646  	}
   647  
   648  	// assert output when download complete
   649  	lwi.completeTime = time.Now()
   650  	lwi.jobDuration = 20 * time.Second
   651  	lwi.totalDuration = time.Since(lwi.staticLaunchTime)
   652  
   653  	expectedDurInfo = "responded after 5000ms"
   654  	expectedJobInfo := "read job took 20000ms"
   655  	expectedErrInfo := "job completed successfully"
   656  	if !strings.Contains(lwi.String(), expectedWorkerInfo) ||
   657  		!strings.Contains(lwi.String(), expectedDurInfo) ||
   658  		!strings.Contains(lwi.String(), expectedJobInfo) ||
   659  		!strings.Contains(lwi.String(), expectedErrInfo) {
   660  		t.Fatal("unexpected", lwi.String())
   661  	}
   662  
   663  	// assert output when job errored out
   664  	lwi.jobErr = errors.New("some failure")
   665  
   666  	expectedErrInfo = "job failed with err: some failure"
   667  	if !strings.Contains(lwi.String(), expectedWorkerInfo) ||
   668  		!strings.Contains(lwi.String(), expectedDurInfo) ||
   669  		!strings.Contains(lwi.String(), expectedJobInfo) ||
   670  		!strings.Contains(lwi.String(), expectedErrInfo) {
   671  		t.Fatal("unexpected", lwi.String())
   672  	}
   673  
   674  	// assert output when worker is overdrive worker
   675  	lwi.staticIsOverdriveWorker = true
   676  	expectedWorkerInfo = "overdrive worker " + w.staticHostPubKey.ShortString()
   677  	if !strings.Contains(lwi.String(), expectedWorkerInfo) {
   678  		t.Fatal("unexpected", lwi.String())
   679  	}
   680  }
   681  
   682  // newTestProjectDownloadChunk returns a PDC used for testing
   683  func newTestProjectDownloadChunk(pcws *projectChunkWorkerSet, responseChan chan *downloadResponse) *projectDownloadChunk {
   684  	ec := pcws.staticErasureCoder
   685  
   686  	pieceIndices := make([]uint64, ec.NumPieces())
   687  	for i := 0; i < len(pieceIndices); i++ {
   688  		pieceIndices[i] = uint64(i)
   689  	}
   690  
   691  	if responseChan == nil {
   692  		responseChan = make(chan *downloadResponse, 1)
   693  	}
   694  
   695  	return &projectDownloadChunk{
   696  		piecesInfo:   make([]pieceInfo, ec.NumPieces()),
   697  		piecesData:   make([][]byte, ec.NumPieces()),
   698  		piecesProofs: make([][]crypto.Hash, ec.NumPieces()),
   699  
   700  		workerProgressMap: make(map[uint32]workerProgress),
   701  
   702  		downloadResponseChan: responseChan,
   703  		workerSet:            pcws,
   704  		workerState:          pcws.managedWorkerState(),
   705  
   706  		ctx: context.Background(),
   707  
   708  		staticLaunchTime:   time.Now(),
   709  		staticPieceIndices: pieceIndices,
   710  	}
   711  }
   712  
   713  // mockWorker is a helper function that returns a worker with a pricetable
   714  // and an initialised read queue that returns a non zero value for read
   715  // estimates depending on the given jobTime value.
   716  func mockWorker(jobTime time.Duration) *worker {
   717  	spk := types.SiaPublicKey{
   718  		Algorithm: types.SignatureEd25519,
   719  		Key:       fastrand.Bytes(crypto.PublicKeySize),
   720  	}
   721  
   722  	worker := new(worker)
   723  	worker.staticHostPubKey = spk
   724  	worker.staticHostPubKeyStr = spk.String()
   725  
   726  	worker.newMaintenanceState()
   727  
   728  	// init price table
   729  	worker.newPriceTable()
   730  	worker.staticPriceTable().staticPriceTable = newDefaultPriceTable()
   731  	worker.staticPriceTable().staticUpdateTime = time.Now()
   732  	worker.staticPriceTable().staticExpiryTime = time.Now().Add(5 * time.Minute)
   733  
   734  	// init worker cache
   735  	wc := new(workerCache)
   736  	atomic.StorePointer(&worker.atomicCache, unsafe.Pointer(wc))
   737  
   738  	jrs := NewJobReadStats()
   739  	jrs.weightedJobTime64k = float64(jobTime)
   740  
   741  	// init queues
   742  	worker.initJobHasSectorQueue()
   743  	worker.initJobUpdateRegistryQueue()
   744  	worker.initJobReadRegistryQueue()
   745  	worker.initJobReadQueue(jrs)
   746  	worker.initJobLowPrioReadQueue(jrs)
   747  
   748  	return worker
   749  }
   750  
   751  // mockErasureCoder implements the erasure coder interface, but is an invalid
   752  // erasure coder that returns a 0 segmentsize. It is used to test the critical
   753  // that is thrown when an invalid EC is passed to 'getPieceOffsetAndLen'
   754  type mockErasureCoder struct{}
   755  
   756  func (mec *mockErasureCoder) NumPieces() int                       { return 10 }
   757  func (mec *mockErasureCoder) MinPieces() int                       { return 1 }
   758  func (mec *mockErasureCoder) Encode(data []byte) ([][]byte, error) { return nil, nil }
   759  func (mec *mockErasureCoder) Identifier() skymodules.ErasureCoderIdentifier {
   760  	return skymodules.ErasureCoderIdentifier("mock")
   761  }
   762  func (mec *mockErasureCoder) EncodeShards(data [][]byte) ([][]byte, error)         { return nil, nil }
   763  func (mec *mockErasureCoder) Reconstruct(pieces [][]byte) error                    { return nil }
   764  func (mec *mockErasureCoder) Recover(pieces [][]byte, n uint64, w io.Writer) error { return nil }
   765  func (mec *mockErasureCoder) SupportsPartialEncoding() (uint64, bool)              { return 0, true }
   766  func (mec *mockErasureCoder) Type() skymodules.ErasureCoderType {
   767  	return skymodules.ErasureCoderType{9, 9, 9, 9}
   768  }