gitlab.com/SkynetLabs/skyd@v1.6.9/skymodules/renter/projectdownloadworker_test.go (about)

     1  package renter
     2  
     3  import (
     4  	"math"
     5  	"reflect"
     6  	"sync/atomic"
     7  	"testing"
     8  	"time"
     9  	"unsafe"
    10  
    11  	"gitlab.com/NebulousLabs/fastrand"
    12  	"gitlab.com/SkynetLabs/skyd/skymodules"
    13  	"go.sia.tech/siad/crypto"
    14  	"go.sia.tech/siad/types"
    15  )
    16  
    17  // newTestDownloadState creates a new bufferedDownloadState for testing.
    18  func newTestDownloadState() *bufferedDownloadState {
    19  	return &bufferedDownloadState{
    20  		pieces: make(map[uint64]struct{}),
    21  		added:  make(map[uint32]struct{}),
    22  	}
    23  }
    24  
    25  // TestChimeraWorker verifies the NewChimeraWorker constructor.
    26  func TestChimeraWorker(t *testing.T) {
    27  	t.Parallel()
    28  
    29  	// newIndividualWorker is a helper function that creates a test worker
    30  	numWorkers := uint32(0)
    31  	newIndividualWorker := func(complete, avail, cost float64) *individualWorker {
    32  		numWorkers++
    33  		return &individualWorker{
    34  			resolved:               false,
    35  			cachedCompleteChance:   complete,
    36  			staticAvailabilityRate: avail,
    37  			staticCost:             cost,
    38  			staticIdentifier:       numWorkers,
    39  		}
    40  	}
    41  
    42  	// assert the complete chances and cost are averaged and the identifier is
    43  	// initialized
    44  	workers := []*individualWorker{
    45  		newIndividualWorker(.1, .5, .5),
    46  		newIndividualWorker(.2, .5, .6),
    47  		newIndividualWorker(.3, .5, .7),
    48  		newIndividualWorker(.4, .5, .8),
    49  	}
    50  	numWorkers++
    51  	cw := NewChimeraWorker(workers, numWorkers)
    52  	if cw.staticChanceComplete != .125 {
    53  		t.Fatal("bad", cw.staticChanceComplete)
    54  	}
    55  	if cw.staticCost != .65 {
    56  		t.Fatal("bad", cw.staticCost)
    57  	}
    58  	if cw.staticIdentifier == 0 {
    59  		t.Fatal("bad", cw.staticIdentifier)
    60  	}
    61  }
    62  
    63  // TestIndividualWorker runs a series of small unit tests that probe the methods
    64  // on the individual worker object.
    65  func TestIndividualWorker(t *testing.T) {
    66  	t.Parallel()
    67  	t.Run("cost", testIndividualWorker_cost)
    68  	t.Run("pieces", testIndividualWorker_pieces)
    69  	t.Run("isLaunched", testIndividualWorker_isLaunched)
    70  	t.Run("isResolved", testIndividualWorker_isResolved)
    71  	t.Run("worker", testIndividualWorker_worker)
    72  }
    73  
    74  // testIndividualWorker_cost is a unit test for the cost method
    75  func testIndividualWorker_cost(t *testing.T) {
    76  	t.Parallel()
    77  
    78  	iw := &individualWorker{staticCost: float64(fastrand.Uint64n(10) + 1)}
    79  
    80  	// cost should return the static cost if the worker is not launched
    81  	if iw.cost() != iw.staticCost {
    82  		t.Fatal("bad")
    83  	}
    84  
    85  	// cost should return zero if the worker is launched
    86  	iw.currentPieceLaunchedAt = time.Now()
    87  	if iw.cost() != 0 {
    88  		t.Fatal("bad")
    89  	}
    90  }
    91  
    92  // testIndividualWorker_pieces is a unit test for the pieces method
    93  func testIndividualWorker_pieces(t *testing.T) {
    94  	t.Parallel()
    95  
    96  	iw := &individualWorker{}
    97  	if iw.pieces(nil) != nil {
    98  		t.Fatal("bad")
    99  	}
   100  
   101  	iw.pieceIndices = []uint64{0, 1, 2}
   102  	if len(iw.pieces(nil)) != 3 {
   103  		t.Fatal("bad")
   104  	}
   105  	for i, piece := range []uint64{0, 1, 2} {
   106  		if iw.pieces(nil)[i] != piece {
   107  			t.Fatal("bad")
   108  		}
   109  	}
   110  }
   111  
   112  // testIndividualWorker_isLaunched is a unit test for the isLaunched method
   113  func testIndividualWorker_isLaunched(t *testing.T) {
   114  	t.Parallel()
   115  
   116  	iw := &individualWorker{}
   117  	if iw.isLaunched() {
   118  		t.Fatal("bad")
   119  	}
   120  
   121  	iw.currentPieceLaunchedAt = time.Now()
   122  	if !iw.isLaunched() {
   123  		t.Fatal("bad")
   124  	}
   125  }
   126  
   127  // testIndividualWorker_isResolved is a unit test for the isResolved method
   128  func testIndividualWorker_isResolved(t *testing.T) {
   129  	t.Parallel()
   130  
   131  	iw := &individualWorker{}
   132  	if iw.isResolved() {
   133  		t.Fatal("bad")
   134  	}
   135  
   136  	iw.resolved = true
   137  	if !iw.isResolved() {
   138  		t.Fatal("bad")
   139  	}
   140  }
   141  
   142  // testIndividualWorker_worker is a unit test for the worker method
   143  func testIndividualWorker_worker(t *testing.T) {
   144  	t.Parallel()
   145  
   146  	iw := &individualWorker{}
   147  	if iw.worker() != nil {
   148  		t.Fatal("bad")
   149  	}
   150  
   151  	w := new(worker)
   152  	iw.staticWorker = w
   153  	if iw.worker() != w {
   154  		t.Fatal("bad")
   155  	}
   156  }
   157  
   158  // TestWorkerSet is a set of unit tests that verify the functionality of the
   159  // worker set.
   160  func TestWorkerSet(t *testing.T) {
   161  	t.Parallel()
   162  
   163  	t.Run("AdjustedDuration", testWorkerSetAdjustedDuration)
   164  	t.Run("CheaperSetFromCandidate", testWorkerSetCheaperSetFromCandidate)
   165  	t.Run("Clone", testWorkerSetClone)
   166  	t.Run("Create", testWorkerSetCreate)
   167  	t.Run("GreaterThanHalf", testWorkerSetGreaterThanHalf)
   168  }
   169  
   170  // testWorkerSetAdjustedDuration is a unit test that verifies the functionality
   171  // of the AdjustedDuration method on the worker set.
   172  func testWorkerSetAdjustedDuration(t *testing.T) {
   173  	t.Parallel()
   174  
   175  	ws := &workerSet{
   176  		staticBucketDuration: 100 * time.Millisecond,
   177  		staticMinPieces:      1,
   178  	}
   179  
   180  	// default ppms
   181  	ppms := skymodules.DefaultSkynetPricePerMS
   182  
   183  	// expect no penalty if the ppms exceeds the job cost
   184  	if ws.adjustedDuration(ppms) != ws.staticBucketDuration {
   185  		t.Fatal("bad")
   186  	}
   187  
   188  	// create a worker and ensure its job cost is not zero
   189  	cost, _ := types.SiacoinPrecision.Float64()
   190  	iw1 := &individualWorker{
   191  		staticWorker: mockWorker(10 * time.Millisecond),
   192  		staticCost:   cost,
   193  	}
   194  	if iw1.cost() == 0 {
   195  		t.Fatal("bad")
   196  	}
   197  
   198  	// add the worker and calculate the adjusted duration, ensure a cost penalty
   199  	// has been applied. We don't have to cover the actual cost penalty as that
   200  	// is unit tested by TestAddCostPenalty.
   201  	ws.workers = append(ws.workers, iw1)
   202  	if ws.adjustedDuration(ppms) <= ws.staticBucketDuration {
   203  		t.Fatal("bad")
   204  	}
   205  }
   206  
   207  // testWorkerSetCheaperSetFromCandidate is a unit test that verifies the
   208  // functionality of the CheaperSetFromCandidate method on the worker set.
   209  func testWorkerSetCheaperSetFromCandidate(t *testing.T) {
   210  	t.Parallel()
   211  
   212  	sc, _ := types.SiacoinPrecision.Float64()
   213  
   214  	// updateReadCostWithFactor is a helper function that enables altering a
   215  	// worker's cost by multiplying initial cost with the given factor
   216  	updateReadCostWithFactor := func(w *individualWorker, factor uint64) {
   217  		w.staticCost = sc * float64(factor)
   218  	}
   219  
   220  	// workerAt is a small helper function that returns the worker's identifier
   221  	// at the given index
   222  	workerAt := func(ws *workerSet, index int) string {
   223  		return ws.workers[index].worker().staticHostPubKeyStr
   224  	}
   225  
   226  	// create some workers
   227  	iw1 := newTestIndivualWorker("w1", 1, 1, 10*time.Millisecond, []uint64{1})
   228  	iw2 := newTestIndivualWorker("w2", 2, 1, 10*time.Millisecond, []uint64{2})
   229  
   230  	// have w1 and w2 download p1 and p2
   231  	iw1.markPieceForDownload(1)
   232  	iw2.markPieceForDownload(2)
   233  
   234  	// update the read cost of both workers
   235  	updateReadCostWithFactor(iw1, 10) // 10SC
   236  	updateReadCostWithFactor(iw2, 20) // 20SC
   237  
   238  	// assert the workers are increasingly more expensive
   239  	if iw1.cost() >= iw2.cost() {
   240  		t.Fatal("bad")
   241  	}
   242  
   243  	// build a worker set
   244  	pcws := newTestProjectChunkWorkerSet()
   245  	pdc := newTestProjectDownloadChunk(pcws, nil)
   246  	ws := &workerSet{
   247  		workers:              []downloadWorker{iw1, iw2},
   248  		staticBucketDuration: 100 * time.Millisecond,
   249  		staticMinPieces:      1,
   250  		staticPDC:            pdc,
   251  	}
   252  
   253  	// create w3 without pieces
   254  	iw3 := newTestIndivualWorker("w3", 3, 1, 10*time.Millisecond, []uint64{})
   255  
   256  	// assert the cheaper set is nil (candidate has no pieces)
   257  	if ws.cheaperSetFromCandidate(iw3) != nil {
   258  		t.Fatal("bad")
   259  	}
   260  
   261  	// have w3 download p3 but at a high cost, higher than w1 and w2
   262  	iw3.pieceIndices = append(iw3.pieceIndices, 3)
   263  	iw3.markPieceForDownload(3)
   264  	updateReadCostWithFactor(iw3, 30) // 30SC
   265  
   266  	// assert the cheaper set is nil (candidate is more expensive)
   267  	cheaperSet := ws.cheaperSetFromCandidate(iw3)
   268  	if cheaperSet != nil {
   269  		t.Fatal("bad")
   270  	}
   271  
   272  	// make w3 cheaper than w2
   273  	updateReadCostWithFactor(iw3, 15) // 15SC
   274  	cheaperSet = ws.cheaperSetFromCandidate(iw3)
   275  	if cheaperSet == nil {
   276  		t.Fatal("bad")
   277  	}
   278  	if len(cheaperSet.workers) != 2 {
   279  		t.Fatal("bad")
   280  	}
   281  	if workerAt(cheaperSet, 0) != "w1" || workerAt(cheaperSet, 1) != "w3" {
   282  		t.Fatal("bad")
   283  	}
   284  
   285  	// continue with the cheaper set as working set (w1 and w3)
   286  	ws = cheaperSet
   287  
   288  	// create w4 with all pieces
   289  	iw4 := newTestIndivualWorker("w4", 4, 1, 10*time.Millisecond, []uint64{1, 3})
   290  
   291  	// make w4 more expensive
   292  	updateReadCostWithFactor(iw4, 40) // 40SC
   293  	cheaperSet = ws.cheaperSetFromCandidate(iw4)
   294  	if cheaperSet != nil {
   295  		t.Fatal("bad")
   296  	}
   297  
   298  	// make w4 cheaper than w1
   299  	updateReadCostWithFactor(iw4, 4) // 4SC
   300  	cheaperSet = ws.cheaperSetFromCandidate(iw4)
   301  	if cheaperSet == nil {
   302  		t.Fatal("bad")
   303  	}
   304  	if len(cheaperSet.workers) != 2 {
   305  		t.Fatal("bad")
   306  	}
   307  
   308  	// assert we did not swap w1 but w3 because it was more expensive
   309  	if workerAt(cheaperSet, 0) != "w1" || workerAt(cheaperSet, 1) != "w4" {
   310  		t.Fatal("bad", workerAt(cheaperSet, 0), workerAt(cheaperSet, 1))
   311  	}
   312  
   313  	// continue with the cheaper set as working set
   314  	ws = cheaperSet
   315  
   316  	// create w5 capable of resolving p1, but make it more expensive
   317  	iw5 := newTestIndivualWorker("w5", 5, .1, 10*time.Millisecond, []uint64{1})
   318  	updateReadCostWithFactor(iw5, 50) // 50SC
   319  
   320  	// assert the cheaper set is nil (candidate is more expensive)
   321  	cheaperSet = ws.cheaperSetFromCandidate(iw5)
   322  	if cheaperSet != nil {
   323  		t.Fatal("bad")
   324  	}
   325  
   326  	// make w5 cheaper than w1
   327  	updateReadCostWithFactor(iw5, 5) // 5SC
   328  	cheaperSet = ws.cheaperSetFromCandidate(iw5)
   329  	if cheaperSet == nil {
   330  		t.Fatal("bad")
   331  	}
   332  	if len(cheaperSet.workers) != 2 {
   333  		t.Fatal("bad")
   334  	}
   335  
   336  	// assert we swapped out w1 for w5 because it was cheaper
   337  	if workerAt(cheaperSet, 0) != "w5" || workerAt(cheaperSet, 1) != "w4" {
   338  		t.Fatal("bad", workerAt(cheaperSet, 0), workerAt(cheaperSet, 1))
   339  	}
   340  
   341  	// set a launch time for w1
   342  	ws.workers[0].(*individualWorker).currentPieceLaunchedAt = time.Now()
   343  
   344  	// assert we did not swap out w1 for w5 because the cost is now 0
   345  	cheaperSet = ws.cheaperSetFromCandidate(iw5)
   346  	if cheaperSet != nil {
   347  		t.Fatal("bad")
   348  	}
   349  }
   350  
   351  // testWorkerSetClone is a unit test that verifies the
   352  // functionality of the Clone method on the worker set.
   353  func testWorkerSetClone(t *testing.T) {
   354  	t.Parallel()
   355  
   356  	// create some workers
   357  	iw1 := newTestIndivualWorker("w1", 1, 0, 0, nil)
   358  	iw2 := newTestIndivualWorker("w2", 2, 0, 0, nil)
   359  	iw3 := newTestIndivualWorker("w3", 3, 0, 0, nil)
   360  
   361  	// build a worker set
   362  	ws := &workerSet{
   363  		workers:              []downloadWorker{iw1, iw2, iw3},
   364  		staticBucketDuration: 100 * time.Millisecond,
   365  		staticMinPieces:      1,
   366  	}
   367  
   368  	// use reflection to assert "clone" returns an identical object
   369  	clone := ws.clone()
   370  	if !reflect.DeepEqual(clone, ws) {
   371  		t.Fatal("bad")
   372  	}
   373  }
   374  
   375  // testWorkerSetCreate is a unit test that verifies the creation of a worker set
   376  func testWorkerSetCreate(t *testing.T) {
   377  	t.Parallel()
   378  
   379  	bI := 11
   380  	bDur := skymodules.DistributionDurationForBucketIndex(bI) // 44ms
   381  	minPieces := 1
   382  	numOD := 0
   383  	workersNeeded := minPieces + numOD
   384  
   385  	// create pdc
   386  	pcws := newTestProjectChunkWorkerSet()
   387  	pdc := newTestProjectDownloadChunk(pcws, nil)
   388  
   389  	// mock a launched worker
   390  	readDT := skymodules.NewDistribution(time.Minute)
   391  	readDT.AddDataPoint(30 * time.Millisecond)
   392  	readDT.AddDataPoint(40 * time.Millisecond)
   393  	readDT.AddDataPoint(60 * time.Millisecond)
   394  	launchedWorkerIdentifier := uint32(1)
   395  	lw := &individualWorker{
   396  		pieceIndices: []uint64{0},
   397  		resolved:     true,
   398  
   399  		currentPiece:           0,
   400  		currentPieceLaunchedAt: time.Now().Add(-20 * time.Millisecond),
   401  		staticReadDistribution: *readDT,
   402  		staticIdentifier:       launchedWorkerIdentifier,
   403  		staticCost:             1,
   404  	}
   405  
   406  	// mock a resolved worker
   407  	readDT = skymodules.NewDistribution(time.Minute)
   408  	readDT.AddDataPoint(20 * time.Millisecond)
   409  	readDT.AddDataPoint(30 * time.Millisecond)
   410  	readDT.AddDataPoint(40 * time.Millisecond)
   411  	readDT.AddDataPoint(60 * time.Millisecond)
   412  	resolvedWorkerIdentifier := uint32(2)
   413  	rw := &individualWorker{
   414  		pieceIndices: []uint64{0},
   415  		resolved:     true,
   416  
   417  		staticReadDistribution: *readDT,
   418  		staticIdentifier:       resolvedWorkerIdentifier,
   419  		staticCost:             1,
   420  	}
   421  
   422  	// recalculate the distribution chances
   423  	lw.recalculateDistributionChances()
   424  	rw.recalculateDistributionChances()
   425  
   426  	// cache the complete chance at the bucket index
   427  	lw.recalculateCompleteChance(bI)
   428  	rw.recalculateCompleteChance(bI)
   429  
   430  	// assert the launched worker's chance is lower than the resolved worker,
   431  	// but both are higher than 50% so they can sustain a worker set on their
   432  	// own
   433  	lwcc := lw.completeChanceCached()
   434  	rwcc := rw.completeChanceCached()
   435  	if lwcc >= rwcc || lwcc <= .5 || rwcc <= .5 {
   436  		t.Fatal("unexpected", lwcc, rwcc)
   437  	}
   438  
   439  	// create a worker set
   440  	workers := []*individualWorker{lw, rw}
   441  	ws, _ := pdc.createWorkerSetInner(workers, minPieces, numOD, bI, bDur, newTestDownloadState())
   442  	if ws == nil || len(ws.workers) != 1 {
   443  		t.Fatal("unexpected")
   444  	}
   445  
   446  	// assert the launched worker got selected
   447  	selected := ws.workers[0]
   448  	if selected.identifier() != launchedWorkerIdentifier {
   449  		t.Fatal("unexpected", selected.identifier())
   450  	}
   451  
   452  	// assert the resolved worker was selected as most likely, proving that the
   453  	// launched worker beat it because it's cheaper
   454  	dlw := pdc.buildDownloadWorkers(workers, newTestDownloadState())
   455  	mostLikely, lessLikely := pdc.splitMostlikelyLessLikely(dlw, workersNeeded, newTestDownloadState())
   456  	if len(mostLikely) != 1 || len(lessLikely) != 1 {
   457  		t.Fatal("unexpected")
   458  	}
   459  	if mostLikely[0].identifier() != resolvedWorkerIdentifier || lessLikely[0].identifier() != launchedWorkerIdentifier {
   460  		t.Fatal("unexpected")
   461  	}
   462  
   463  	// now bump the overdrive workers, and assert both workers are in the set
   464  	ws, _ = pdc.createWorkerSetInner(workers, minPieces, numOD+1, bI, bDur, newTestDownloadState())
   465  	if ws == nil || len(ws.workers) != 2 {
   466  		t.Fatal("unexpected", ws)
   467  	}
   468  }
   469  
   470  // testWorkerSetGreaterThanHalf is a unit test that verifies the functionality
   471  // of the GreaterThanHalf method on the worker set.
   472  func testWorkerSetGreaterThanHalf(t *testing.T) {
   473  	t.Parallel()
   474  
   475  	// convenience Variables
   476  	DistributionDurationForBucketIndex := skymodules.DistributionDurationForBucketIndex
   477  	distributionTotalBuckets := skymodules.DistributionTrackerTotalBuckets
   478  
   479  	// create some workers
   480  	iw1 := newTestIndivualWorker("w1", 1, 0, 0, nil)
   481  	iw2 := newTestIndivualWorker("w2", 2, 0, 0, nil)
   482  	iw3 := newTestIndivualWorker("w3", 3, 0, 0, nil)
   483  	iw4 := newTestIndivualWorker("w4", 4, 0, 0, nil)
   484  	iw5 := newTestIndivualWorker("w5", 5, 0, 0, nil)
   485  
   486  	// populate their distributions in a way that the output of the distribution
   487  	// becomes predictable by adding a single datapoint to every bucket
   488  	for _, w := range []*individualWorker{iw1, iw2, iw3, iw4, iw5} {
   489  		// mimic resolved workers, that ensures we only take the read DTs into
   490  		// account, which is sufficient for the goal of this unit test
   491  		w.resolved = true
   492  		for i := 0; i < distributionTotalBuckets; i++ {
   493  			point := DistributionDurationForBucketIndex(i)
   494  			w.staticReadDistribution.AddDataPoint(point)
   495  		}
   496  		w.recalculateDistributionChances()
   497  	}
   498  
   499  	// recalculateCompleteChance is a helper function that mimics the download
   500  	// algorithm recalculating complete chances at a certain bucket index, we
   501  	// use percentages here because we're reasoning about complete chances
   502  	recalculateCompleteChance := func(pct float64) {
   503  		index := distributionTotalBuckets * int(pct*100) / 100
   504  		for _, w := range []*individualWorker{iw1, iw2, iw3, iw4, iw5} {
   505  			w.recalculateCompleteChance(index)
   506  		}
   507  	}
   508  
   509  	// build a worker set with 0 overdrive workers
   510  	ws := &workerSet{
   511  		workers:              []downloadWorker{iw1},
   512  		staticBucketDuration: 100 * time.Millisecond,
   513  		staticMinPieces:      2,
   514  	}
   515  
   516  	// assert chance is not greater than .5
   517  	recalculateCompleteChance(.5)
   518  	if ws.chanceGreaterThanHalf() {
   519  		t.Fatal("bad")
   520  	}
   521  
   522  	// push it right over the 50% mark
   523  	recalculateCompleteChance(.51)
   524  	if !ws.chanceGreaterThanHalf() {
   525  		t.Fatal("bad")
   526  	}
   527  
   528  	// add another worker, seeing as the chances are multiplied and both workers
   529  	// have a chance ~= 50% the total chance should not be greater than half
   530  	recalculateCompleteChance(.5)
   531  	ws.workers = append(ws.workers, iw2)
   532  	if ws.chanceGreaterThanHalf() {
   533  		t.Fatal("bad")
   534  	}
   535  
   536  	// at 75% chance per worker the total chance should be ~56% which is greater
   537  	// than half
   538  	recalculateCompleteChance(.75)
   539  	if !ws.chanceGreaterThanHalf() {
   540  		t.Fatal("bad")
   541  	}
   542  
   543  	// add another worker, this makes it so the worker set has one overdrive
   544  	// worker, which influences the 'chanceGreaterThanHalf' because now we have
   545  	// one worker to spare, increasing our total chance
   546  	ws.workers = append(ws.workers, iw3)
   547  	ws.staticNumOverdrive = 1
   548  
   549  	// when all workers have exatly a 50% chance, the total chance does NOT
   550  	// exceed 0.5 because it is exactly equal to 0.5
   551  	//
   552  	// 0.5*0.5*0.5 = 0.125 is the chance they're all able to complete after dur
   553  	// 0.125/0.5*0.5 = 0.125 is the chance one is tails and the others are heads
   554  	// 0.125+(0.125*3) = 0.5 because every worker can be the one that's tails
   555  	recalculateCompleteChance(.5)
   556  	if ws.chanceGreaterThanHalf() {
   557  		t.Fatal("bad")
   558  	}
   559  
   560  	// now redo the calculation with a duration that's just over half of the
   561  	// distributand assert the chance is now greater than half
   562  	recalculateCompleteChance(.51)
   563  	if !ws.chanceGreaterThanHalf() {
   564  		t.Fatal("bad")
   565  	}
   566  
   567  	// add another worker, this makes it so the worker set has two overdrive
   568  	// workers
   569  	ws.workers = append(ws.workers, iw4)
   570  	ws.staticNumOverdrive = 2
   571  
   572  	// whe we have two overdrive workers, we have a similar situation but now
   573  	// there are essentially two workers to spare, increasing our chances. We
   574  	// again have the situation where exactly one of them is unable to download
   575  	// the piece but all others are (OD worker one) but also the situation where
   576  	// every worker pair is the pair where both workers are able to download
   577  	//
   578  	// assert that at a duration that corresponds with the 33% mark the total
   579  	// chance is not greater than half:
   580  	//
   581  	// chance they all complete the DL is:
   582  	// 0,33^4 ~= 0.012
   583  	//
   584  	// chance one of them fails and others complete is:
   585  	// (0,67*0,33^4)*4 ~= 0,09
   586  	//
   587  	// chance each pair becomes tails and other workers complete is:
   588  	// (0,67^2*0,33^2)*6 ~= 0,3
   589  	//
   590  	// there are 6 unique pairs for 4 workers and 2 OD workers
   591  	// which is n choose k and equal to n!/k(n-k)! or in our case 4!/2*2! = 6
   592  	//
   593  	// the toal chance is thus ~= 0.4 which is not greater than half
   594  	recalculateCompleteChance(.33)
   595  	if ws.chanceGreaterThanHalf() {
   596  		t.Fatal("bad")
   597  	}
   598  
   599  	// doing the same calculations with 0.4 results in:
   600  	// 0,4^4 ~= 0,025
   601  	// (0,6*0,4^3)*4 ~= 0,15
   602  	// (0,6^2*0,4^2)*6 ~= 0,35
   603  	//
   604  	// the toal chance is thus ~= 0.525 which is not greater than half
   605  	recalculateCompleteChance(.4)
   606  	if !ws.chanceGreaterThanHalf() {
   607  		t.Fatal("bad")
   608  	}
   609  
   610  	// add another worker, we limit n choose m to 2 because of computational
   611  	// complexity, in the case we have more than 2 overdrive workers we use an
   612  	// approximation where we return true if the sum of all chances is greater
   613  	// than minpieces
   614  	//
   615  	// we have two minpieces and 5 workers so we have to exceed 40% per worker
   616  	ws.workers = append(ws.workers, iw5)
   617  	ws.staticNumOverdrive = 3
   618  	if ws.chanceGreaterThanHalf() {
   619  		t.Fatal("bad")
   620  	}
   621  	recalculateCompleteChance(.41)
   622  	if !ws.chanceGreaterThanHalf() {
   623  		t.Fatal("bad")
   624  	}
   625  }
   626  
   627  // TestCoinflips wraps a set of unit tests that verify the functionality of the
   628  // coinflips type.
   629  func TestCoinflips(t *testing.T) {
   630  	t.Parallel()
   631  	t.Run("AllHeads", testCoinflipsAllHeads)
   632  	t.Run("HeadsAllowOneTails", testCoinflipsHeadsAllowOneTails)
   633  	t.Run("HeadsAllowTwoTails", testCoinflipsHeadsAllowTwoTails)
   634  	t.Run("Sum", testCoinflipsSum)
   635  }
   636  
   637  // testCoinflipsAllHeads is a unit test to verify the functionality of
   638  // 'chanceAllHeads' on the coinflips type.
   639  func testCoinflipsAllHeads(t *testing.T) {
   640  	t.Parallel()
   641  
   642  	// almost equal compares floats up until a precision threshold of 1e-9, this
   643  	// necessary due to floating point errors that arise when multiplying floats
   644  	almostEqual := func(a, b float64) bool {
   645  		return math.Abs(a-b) <= 1e-9
   646  	}
   647  
   648  	tests := []struct {
   649  		name   string
   650  		flips  coinflips
   651  		chance float64
   652  	}{
   653  		{"no_flips", []float64{}, 0},
   654  		{"one_pure_flip", []float64{.5}, .5},
   655  		{"two_pure_flips", []float64{.5, .5}, math.Pow(.5, 2)},
   656  		{"multiple_diff_flips", []float64{.1, .3, .2}, .1 * .2 * .3},
   657  		{"one_heads", []float64{1}, 1},
   658  		{"multiple_heads", []float64{1}, 1},
   659  	}
   660  
   661  	for _, test := range tests {
   662  		actual := test.flips.chanceAllHeads()
   663  		if !almostEqual(actual, test.chance) {
   664  			t.Error("bad", test.name, actual, test.chance)
   665  		}
   666  	}
   667  }
   668  
   669  // testCoinflipsHeadsAllowOneTails is a unit test to verify the functionality of
   670  // 'chanceHeadsAllowOneTails' on the coinflips type.
   671  func testCoinflipsHeadsAllowOneTails(t *testing.T) {
   672  	t.Parallel()
   673  
   674  	// almost equal compares floats up until a precision threshold of 1e-9, this
   675  	// necessary due to floating point errors that arise when multiplying floats
   676  	almostEqual := func(a, b float64) bool {
   677  		return math.Abs(a-b) <= 1e-9
   678  	}
   679  
   680  	tests := []struct {
   681  		name   string
   682  		flips  coinflips
   683  		chance float64
   684  	}{
   685  		{"no_flips", []float64{}, 0},
   686  		{"one_pure_flip", []float64{.5}, 1},
   687  		{"two_pure_flips", []float64{.5, .5}, 0.75},
   688  		{"multiple_diff_flips", []float64{.25, .5, .75}, (.25 * .5 * .75) + (0.75 * .5 * .75) + (.5 * .25 * .75) + (.25 * .25 * .5)},
   689  		{"one_heads", []float64{1}, 1},
   690  		{"multiple_heads", []float64{1, 1}, 1},
   691  	}
   692  
   693  	for _, test := range tests {
   694  		actual := test.flips.chanceHeadsAllowOneTails()
   695  		if !almostEqual(actual, test.chance) {
   696  			t.Error("bad", test.name, actual, test.chance)
   697  		}
   698  	}
   699  }
   700  
   701  // testCoinflipsHeadsAllowTwoTails is a unit test to verify the functionality of
   702  // 'chanceHeadsAllowTwoTails' on the coinflips type.
   703  func testCoinflipsHeadsAllowTwoTails(t *testing.T) {
   704  	t.Parallel()
   705  
   706  	// almost equal compares floats up until a precision threshold of 1e-9, this
   707  	// necessary due to floating point errors that arise when multiplying floats
   708  	almostEqual := func(a, b float64) bool {
   709  		return math.Abs(a-b) <= 1e-9
   710  	}
   711  
   712  	tests := []struct {
   713  		name   string
   714  		flips  coinflips
   715  		chance float64
   716  	}{
   717  		{"no_flips", []float64{}, 0},
   718  		{"one_pure_flip", []float64{.5}, 1},
   719  		{"two_pure_flips", []float64{.5, .5}, 1},
   720  		{"multiple_diff_flips", []float64{.25, .5, .75}, (.25 * .5 * .75) + (0.75 * .5 * .75) + (.5 * .25 * .75) + (.25 * .25 * .5) + (.75 * .5 * .75) + (.75 * .5 * .25) + (.25 * .5 * .25)},
   721  		{"one_heads", []float64{1}, 1},
   722  		{"multiple_heads", []float64{1, 1}, 1},
   723  	}
   724  
   725  	for _, test := range tests {
   726  		actual := test.flips.chanceHeadsAllowTwoTails()
   727  		if !almostEqual(actual, test.chance) {
   728  			t.Error("bad", test.name, actual, test.chance)
   729  		}
   730  	}
   731  }
   732  
   733  // testCoinflipsHeadsAllowTwoTails is a unit test to verify the functionality of
   734  // 'chanceSum' on the coinflips type.
   735  func testCoinflipsSum(t *testing.T) {
   736  	t.Parallel()
   737  
   738  	// almost equal compares floats up until a precision threshold of 1e-9, this
   739  	// necessary due to floating point errors that arise when multiplying floats
   740  	almostEqual := func(a, b float64) bool {
   741  		return math.Abs(a-b) <= 1e-9
   742  	}
   743  
   744  	tests := []struct {
   745  		name   string
   746  		flips  coinflips
   747  		chance float64
   748  	}{
   749  		{"no_flips", []float64{}, 0},
   750  		{"one_pure_flip", []float64{.5}, .5},
   751  		{"two_pure_flips", []float64{.5, .5}, 1},
   752  		{"multiple_diff_flips", []float64{.25, .5, .75}, 1.5},
   753  		{"one_heads", []float64{1}, 1},
   754  		{"multiple_heads", []float64{1, 1}, 2},
   755  	}
   756  
   757  	for _, test := range tests {
   758  		actual := test.flips.chanceSum()
   759  		if !almostEqual(actual, test.chance) {
   760  			t.Error("bad", test.name, actual, test.chance)
   761  		}
   762  	}
   763  }
   764  
   765  // TestAddCostPenalty is a unit test that covers the `addCostPenalty` helper
   766  // function.
   767  //
   768  // NOTE: should this file get removed due to introducing a new version of the
   769  // overdrive, this test has to move to projectdownloadworker_test as the
   770  // `addCostPenalty` function is used there as well.
   771  func TestAddCostPenalty(t *testing.T) {
   772  	t.Parallel()
   773  
   774  	// verify happy case
   775  	jt := time.Duration(fastrand.Intn(10) + 1)
   776  	jc := types.NewCurrency64(fastrand.Uint64n(100) + 10)
   777  	pricePerMS := types.NewCurrency64(5)
   778  
   779  	// calculate the expected outcome
   780  	penalty, err := jc.Div(pricePerMS).Uint64()
   781  	if err != nil {
   782  		t.Fatal(err)
   783  	}
   784  	expected := jt + (time.Duration(penalty) * time.Millisecond)
   785  	adjusted := addCostPenalty(jt, jc, pricePerMS)
   786  	if adjusted != expected {
   787  		t.Error("unexpected", adjusted, expected)
   788  	}
   789  
   790  	// verify no penalty if pricePerMS is higher than the cost of the job
   791  	adjusted = addCostPenalty(jt, jc, types.SiacoinPrecision)
   792  	if adjusted != jt {
   793  		t.Error("unexpected")
   794  	}
   795  
   796  	// verify penalty equal to MaxInt64 and job time of 0
   797  	jt = time.Duration(1)
   798  	jc = types.NewCurrency64(math.MaxInt64)
   799  	pricePerMS = types.NewCurrency64(1)
   800  	jt = addCostPenalty(jt, jc, pricePerMS)
   801  	if jt != time.Duration(math.MaxInt64) {
   802  		t.Error("unexpected")
   803  	}
   804  
   805  	// verify penalty equal to MaxInt64 and job time of 0
   806  	jt = time.Duration(0)
   807  	jc = types.NewCurrency64(math.MaxInt64)
   808  	pricePerMS = types.NewCurrency64(1)
   809  	jt = addCostPenalty(jt, jc, pricePerMS)
   810  	if jt != time.Duration(math.MaxInt64) {
   811  		t.Error("unexpected")
   812  	}
   813  
   814  	// verify overflow
   815  	jt = time.Duration(1)
   816  	jc = types.NewCurrency64(math.MaxUint64).Mul64(10)
   817  	pricePerMS = types.NewCurrency64(2)
   818  	jt = addCostPenalty(jt, jc, pricePerMS)
   819  	if jt != time.Duration(math.MaxInt64) {
   820  		t.Error("Expected job time to be adjusted to MaxInt64 on overflow")
   821  	}
   822  
   823  	// verify penalty higher than MaxInt64
   824  	jt = time.Duration(1)
   825  	jc = types.NewCurrency64(math.MaxInt64).Add64(1)
   826  	pricePerMS = types.NewCurrency64(1)
   827  	jt = addCostPenalty(jt, jc, pricePerMS)
   828  	if jt != time.Duration(math.MaxInt64) {
   829  		t.Error("Expected job time to be adjusted to MaxInt64 when penalty exceeds MaxInt64")
   830  	}
   831  
   832  	// verify high job time overflowing after adding penalty
   833  	jc = types.NewCurrency64(10)
   834  	pricePerMS = types.NewCurrency64(1)   // penalty is 10
   835  	jt = time.Duration(math.MaxInt64 - 5) // job time + penalty exceeds MaxInt64
   836  	jt = addCostPenalty(jt, jc, pricePerMS)
   837  	if jt != time.Duration(math.MaxInt64) {
   838  		t.Error("Expected job time to be adjusted to MaxInt64 when job time + penalty exceeds MaxInt64")
   839  	}
   840  }
   841  
   842  // TestBuildChimeraWorkers is a unit test that covers the 'buildChimeraWorkers'
   843  // helper function.
   844  func TestBuildChimeraWorkers(t *testing.T) {
   845  	t.Parallel()
   846  
   847  	// newIndividualWorker wraps newTestIndivualWorker
   848  	numWorkers := uint32(0)
   849  	newIndividualWorker := func(availabilityRate float64) *individualWorker {
   850  		numWorkers++
   851  		iw := newTestIndivualWorker("w", numWorkers, availabilityRate, 0, []uint64{0})
   852  		iw.resolved = false
   853  		return iw
   854  	}
   855  
   856  	// create pdc
   857  	pcws := newTestProjectChunkWorkerSet()
   858  	pdc := newTestProjectDownloadChunk(pcws, nil)
   859  
   860  	// empty case
   861  	numWorkers++
   862  	chimeras := pdc.buildChimeraWorkers([]*individualWorker{}, numWorkers)
   863  	if len(chimeras) != 0 {
   864  		t.Fatal("bad")
   865  	}
   866  
   867  	// add some workers that do not add up to the availability rate threshold
   868  	workers := []*individualWorker{
   869  		newIndividualWorker(0.3),
   870  		newIndividualWorker(0.1),
   871  		newIndividualWorker(0.5),
   872  	}
   873  
   874  	// check we still don't have a full chimera
   875  	numWorkers++
   876  	chimeras = pdc.buildChimeraWorkers(workers, numWorkers)
   877  	if len(chimeras) != 0 {
   878  		t.Fatal("bad")
   879  	}
   880  
   881  	// add more workers we should end up with 2 chimeras
   882  	workers = append(
   883  		workers,
   884  		newIndividualWorker(0.6),
   885  		newIndividualWorker(0.6),
   886  		// chimera 1 complete
   887  		newIndividualWorker(0.8),
   888  		newIndividualWorker(0.3),
   889  		newIndividualWorker(0.6),
   890  		newIndividualWorker(0.3),
   891  		// chimera 2 complete, .1 remainder
   892  	)
   893  
   894  	// assert we have two chimeras
   895  	numWorkers++
   896  	chimeras = pdc.buildChimeraWorkers(workers, numWorkers)
   897  	if len(chimeras) != 2 {
   898  		t.Fatal("bad", len(chimeras))
   899  	}
   900  }
   901  
   902  // TestBuildDownloadWorkers is a unit test that covers the
   903  // 'buildDownloadWorkers' helper function.
   904  func TestBuildDownloadWorkers(t *testing.T) {
   905  	t.Parallel()
   906  
   907  	// newIndividualWorker wraps newTestIndivualWorker
   908  	numWorkers := uint32(0)
   909  	newIndividualWorker := func(availabilityRate float64, resolved bool) *individualWorker {
   910  		numWorkers++
   911  		iw := newTestIndivualWorker("w", numWorkers, availabilityRate, 0, []uint64{0})
   912  		iw.cachedCompleteChance = .1
   913  		iw.resolved = resolved
   914  		return iw
   915  	}
   916  
   917  	// create pdc
   918  	pcws := newTestProjectChunkWorkerSet()
   919  	pdc := newTestProjectDownloadChunk(pcws, nil)
   920  
   921  	// empty case
   922  	downloadWorkers := pdc.buildDownloadWorkers([]*individualWorker{}, newTestDownloadState())
   923  	if len(downloadWorkers) != 0 {
   924  		t.Fatal("bad")
   925  	}
   926  
   927  	// add couple of workers with resolve chance not adding up to to the
   928  	workers := []*individualWorker{
   929  		newIndividualWorker(0.3, false),
   930  		newIndividualWorker(0.1, false),
   931  		newIndividualWorker(0.5, false),
   932  		// 0.9 avail rate
   933  	}
   934  
   935  	// assert we still don't have a chimera
   936  	downloadWorkers = pdc.buildDownloadWorkers(workers, newTestDownloadState())
   937  	if len(downloadWorkers) != 0 {
   938  		t.Fatal("bad")
   939  	}
   940  
   941  	// add more workers we should end up with 2 chimeras
   942  	workers = append(
   943  		workers,
   944  		newIndividualWorker(0.6, false),
   945  		newIndividualWorker(0.6, false),
   946  		// chimera 1 complete (.1 extra)
   947  		newIndividualWorker(0.9, false),
   948  		newIndividualWorker(0.3, false),
   949  		newIndividualWorker(0.6, false),
   950  		newIndividualWorker(0.3, false),
   951  		// chimera 2 complete (.1 extra)
   952  	)
   953  
   954  	// assert we have two download workers, both chimera workers
   955  	downloadWorkers = pdc.buildDownloadWorkers(workers, newTestDownloadState())
   956  	if len(downloadWorkers) != 2 {
   957  		t.Fatal("bad", len(downloadWorkers))
   958  	}
   959  	_, w1IsChimera := downloadWorkers[0].(*chimeraWorker)
   960  	_, w2IsChimera := downloadWorkers[1].(*chimeraWorker)
   961  	if !(w1IsChimera && w2IsChimera) {
   962  		t.Fatal("bad")
   963  	}
   964  
   965  	// add two resolved workers + enough unresolved to complete another chimera
   966  	workers = append(
   967  		workers,
   968  		newIndividualWorker(0.4, true),
   969  		newIndividualWorker(0.3, true),
   970  		newIndividualWorker(0.9, false),
   971  		newIndividualWorker(0.5, false),
   972  		newIndividualWorker(0.6, false),
   973  		newIndividualWorker(0.6, false),
   974  	)
   975  
   976  	downloadWorkers = pdc.buildDownloadWorkers(workers, newTestDownloadState())
   977  	if len(downloadWorkers) != 5 {
   978  		t.Fatal("bad", len(downloadWorkers))
   979  	}
   980  	_, w1IsIndividual := downloadWorkers[0].(*individualWorker)
   981  	_, w2IsIndividual := downloadWorkers[1].(*individualWorker)
   982  	_, w3IsChimera := downloadWorkers[2].(*chimeraWorker)
   983  	_, w4IsChimera := downloadWorkers[3].(*chimeraWorker)
   984  	_, w5IsChimera := downloadWorkers[4].(*chimeraWorker)
   985  	if !(w1IsIndividual && w2IsIndividual && w3IsChimera && w4IsChimera && w5IsChimera) {
   986  		t.Fatal("bad")
   987  	}
   988  }
   989  
   990  // TestSplitMostLikelyLessLikely is a unit test that covers
   991  // 'splitMostlikelyLessLikely' on the projectDownloadChunk
   992  func TestSplitMostLikelyLessLikely(t *testing.T) {
   993  	t.Parallel()
   994  
   995  	// define some variables
   996  	workersNeeded := 2
   997  
   998  	// create pdc
   999  	pcws := newTestProjectChunkWorkerSet()
  1000  	pdc := newTestProjectDownloadChunk(pcws, nil)
  1001  	pdc.staticPieceIndices = []uint64{0, 1}
  1002  
  1003  	// mock the worker
  1004  	_, pk := crypto.GenerateKeyPair()
  1005  	spk := types.SiaPublicKey{
  1006  		Algorithm: types.SignatureEd25519,
  1007  		Key:       pk[:],
  1008  	}
  1009  	worker := new(worker)
  1010  	worker.staticHostPubKey = spk
  1011  
  1012  	// we will have 3 workers, one resolved one and two chimeras
  1013  	iw1 := &individualWorker{staticIdentifier: 1}
  1014  	cw1 := NewChimeraWorker(nil, 2)
  1015  	cw2 := NewChimeraWorker(nil, 3)
  1016  
  1017  	// mock the chance afters, make sure the chimeras have a higher chance,
  1018  	// meaning they are more likely to end up in the most likely set
  1019  	iw1.cachedCompleteChance = .1
  1020  	cw1.staticChanceComplete = .2
  1021  	cw2.staticChanceComplete = .3
  1022  
  1023  	// helper variables
  1024  	iw1Key := iw1.identifier()
  1025  	cw1Key := cw1.identifier()
  1026  	cw2Key := cw2.identifier()
  1027  
  1028  	// split the workers in most likely and less likely
  1029  	workers := []downloadWorker{iw1, cw1, cw2}
  1030  	mostLikely, lessLikely := pdc.splitMostlikelyLessLikely(workers, workersNeeded, newTestDownloadState())
  1031  
  1032  	// expect the most likely to consist of the 2 chimeras
  1033  	if len(mostLikely) != workersNeeded {
  1034  		t.Fatal("bad", len(mostLikely))
  1035  	}
  1036  	mostLikelyKey1 := mostLikely[0].identifier()
  1037  	mostLikelyKey2 := mostLikely[1].identifier()
  1038  	if !(mostLikelyKey1 == cw2Key && mostLikelyKey2 == cw1Key) {
  1039  		t.Fatal("bad")
  1040  	}
  1041  
  1042  	// assert the less likely set is empty
  1043  	if len(lessLikely) != 0 {
  1044  		t.Fatal("bad")
  1045  	}
  1046  
  1047  	// add a piece to the individual worker
  1048  	iw1.pieceIndices = append(iw1.pieceIndices, 2)
  1049  
  1050  	// assert it's now in the less likely set
  1051  	workers = []downloadWorker{iw1, cw1, cw2}
  1052  	_, lessLikely = pdc.splitMostlikelyLessLikely(workers, workersNeeded, newTestDownloadState())
  1053  	if len(lessLikely) != 1 {
  1054  		t.Fatal("bad")
  1055  	}
  1056  	lessLikelyKey1 := lessLikely[0].identifier()
  1057  	if lessLikelyKey1 != iw1Key {
  1058  		t.Fatal("bad")
  1059  	}
  1060  
  1061  	// reconfigure the pdc to only have 1 static piece
  1062  	pdc.staticPieceIndices = []uint64{0}
  1063  
  1064  	// assert most likely and less likely
  1065  	mostLikely, lessLikely = pdc.splitMostlikelyLessLikely(workers, workersNeeded, newTestDownloadState())
  1066  	if len(mostLikely) != 2 {
  1067  		t.Fatal("bad")
  1068  	}
  1069  	if len(lessLikely) != 1 {
  1070  		t.Fatal("bad")
  1071  	}
  1072  
  1073  	// we want to assert that despites its lower chance, the individual worker
  1074  	// now made it in the most likely set as it can resolve a piece the chimeras
  1075  	// can't - and both chimeras are present in the returned slices as well
  1076  	mostLikelyKey1 = mostLikely[0].identifier()
  1077  	mostLikelyKey2 = mostLikely[1].identifier()
  1078  	lessLikelyKey1 = lessLikely[0].identifier()
  1079  	if mostLikelyKey1 != cw2Key || mostLikelyKey2 != iw1Key || lessLikelyKey1 != cw1Key {
  1080  		t.Fatal("bad")
  1081  	}
  1082  }
  1083  
  1084  // TestBucketIndexRange is a unit test that verifies the functionality of the
  1085  // helper function BucketIndexRange
  1086  func TestBucketIndexRange(t *testing.T) {
  1087  	t.Parallel()
  1088  
  1089  	maxBI := skymodules.DistributionTrackerTotalBuckets - 1
  1090  
  1091  	min, max := bucketIndexRange(0)
  1092  	if min != 0 || max != bucketIndexScanStep {
  1093  		t.Fatal("bad")
  1094  	}
  1095  
  1096  	min, max = bucketIndexRange(maxBI - bucketIndexScanStep)
  1097  	if min != maxBI-2*bucketIndexScanStep || max != maxBI {
  1098  		t.Fatal("bad")
  1099  	}
  1100  
  1101  	min, max = bucketIndexRange(bucketIndexScanStep)
  1102  	if min != 0 || max != 2*bucketIndexScanStep {
  1103  		t.Fatal("bad")
  1104  	}
  1105  
  1106  	// randomly generate a bucket index that's constructed in a way that the
  1107  	// range is not below 0 or above the max
  1108  	maxBII := uint64(skymodules.DistributionTrackerTotalBuckets - 2*bucketIndexScanStep)
  1109  	random := fastrand.Uint64n(maxBII) + bucketIndexScanStep
  1110  	min, max = bucketIndexRange(int(random))
  1111  	if min != int(random)-bucketIndexScanStep || max != int(random)+bucketIndexScanStep {
  1112  		t.Fatal("bad", random)
  1113  	}
  1114  }
  1115  
  1116  // TestIsGoodForDownload is a unit test that verifies the functionality of the
  1117  // helper function IsGoodForDownload
  1118  func TestIsGoodForDownload(t *testing.T) {
  1119  	t.Parallel()
  1120  
  1121  	w := mockWorker(0)
  1122  	sc := types.SiacoinPrecision
  1123  
  1124  	// assert happy case
  1125  	gfd := isGoodForDownload(w, []uint64{0})
  1126  	if !gfd {
  1127  		t.Fatal("bad")
  1128  	}
  1129  
  1130  	// assert workers with no pieces are not good for download
  1131  	gfd = isGoodForDownload(w, nil)
  1132  	if gfd {
  1133  		t.Fatal("bad")
  1134  	}
  1135  	gfd = isGoodForDownload(w, []uint64{})
  1136  	if gfd {
  1137  		t.Fatal("bad")
  1138  	}
  1139  
  1140  	// assert workers on maintenance cooldown are not good for download
  1141  	w.staticMaintenanceState.cooldownUntil = time.Now().Add(time.Minute)
  1142  	gfd = isGoodForDownload(w, []uint64{0})
  1143  	if gfd {
  1144  		t.Fatal("bad")
  1145  	}
  1146  	w.staticMaintenanceState.cooldownUntil = time.Time{} // reset
  1147  
  1148  	// assert workers that are not async ready are not good for download
  1149  	w.staticPriceTable().staticExpiryTime = time.Now().Add(-time.Minute)
  1150  	gfd = isGoodForDownload(w, []uint64{0})
  1151  	if gfd {
  1152  		t.Fatal("bad")
  1153  	}
  1154  	w.staticPriceTable().staticExpiryTime = time.Now().Add(time.Minute) // reset
  1155  
  1156  	// assert workers that are considered gouging are not good for download (we
  1157  	// trigger gouging detection by pushing the dl bandwidthcost over the max)
  1158  	wc := new(workerCache)
  1159  	wc.staticRenterAllowance.MaxDownloadBandwidthPrice = sc
  1160  	atomic.StorePointer(&w.atomicCache, unsafe.Pointer(wc))
  1161  	w.staticPriceTable().staticPriceTable.DownloadBandwidthCost = sc.Mul64(2)
  1162  	gfd = isGoodForDownload(w, []uint64{0})
  1163  	if gfd {
  1164  		t.Fatal("bad")
  1165  	}
  1166  }
  1167  
  1168  // newTestIndivualWorker is a helper function that returns an individualWorker
  1169  // for testing purposes.
  1170  func newTestIndivualWorker(hostPubKeyStr string, identifier uint32, availabilityRate float64, readDuration time.Duration, pieceIndices []uint64) *individualWorker {
  1171  	w := mockWorker(readDuration)
  1172  	w.staticHostPubKeyStr = hostPubKeyStr
  1173  
  1174  	sc, _ := types.SiacoinPrecision.Float64()
  1175  	iw := &individualWorker{
  1176  		pieceIndices:             pieceIndices,
  1177  		staticAvailabilityRate:   availabilityRate,
  1178  		staticCost:               sc,
  1179  		staticDownloadLaunchTime: time.Now(),
  1180  		staticIdentifier:         identifier,
  1181  		staticLookupDistribution: *skymodules.NewDistribution(15 * time.Minute),
  1182  		staticReadDistribution:   *skymodules.NewDistribution(15 * time.Minute),
  1183  		staticWorker:             w,
  1184  	}
  1185  	return iw
  1186  }
  1187  
  1188  // TestPartitionWorkers is a unit test for partitionWorkers.
  1189  func TestPartitionWorkers(t *testing.T) {
  1190  	w1 := &individualWorker{cachedCompleteChance: 0.1, staticIdentifier: 1}
  1191  	w2 := &individualWorker{cachedCompleteChance: 0.2, staticIdentifier: 2}
  1192  	w3 := &individualWorker{cachedCompleteChance: 0.3, staticIdentifier: 3}
  1193  	w4 := &individualWorker{cachedCompleteChance: 0.4, staticIdentifier: 4}
  1194  	w5 := &individualWorker{cachedCompleteChance: 0.5, staticIdentifier: 5}
  1195  
  1196  	// Partition all workers with a chance of <= 0.3 to the left and the
  1197  	// others to the right.
  1198  	iws := []*individualWorker{w1, w2, w3, w4, w5}
  1199  	left, right := partitionWorkers(iws, func(i int) bool { return iws[i].cachedCompleteChance <= 0.3 })
  1200  
  1201  	expectedLeft := []*individualWorker{w1, w2, w3}
  1202  	expectedRight := []*individualWorker{w5, w4}
  1203  	for i := range left {
  1204  		if left[i] != expectedLeft[i] {
  1205  			t.Fatal("wrong left", i)
  1206  		}
  1207  	}
  1208  	for i := range right {
  1209  		if right[i] != expectedRight[i] {
  1210  			t.Fatal("wrong right", i)
  1211  		}
  1212  	}
  1213  
  1214  	// iws will be a bit scrambled at the end since the algorithm only
  1215  	// partitions but doesn't preserve the order.
  1216  	expectedIWS := []*individualWorker{w1, w2, w3, w5, w4}
  1217  	for i := range iws {
  1218  		if iws[i] != expectedIWS[i] {
  1219  			t.Fatal("wrong iws", i)
  1220  		}
  1221  	}
  1222  
  1223  	// Partition the right partition again using <0.5 this time.
  1224  	left, right = partitionWorkers(right, func(i int) bool { return right[i].cachedCompleteChance < 0.5 })
  1225  
  1226  	expectedLeft = []*individualWorker{w4}
  1227  	expectedRight = []*individualWorker{w5}
  1228  	for i := range left {
  1229  		if left[i] != expectedLeft[i] {
  1230  			t.Fatal("wrong left", i)
  1231  		}
  1232  	}
  1233  	for i := range right {
  1234  		if right[i] != expectedRight[i] {
  1235  			t.Fatal("wrong right", i)
  1236  		}
  1237  	}
  1238  
  1239  	// Since all of that happened in place, iws should be sorted again.
  1240  	expectedIWS = []*individualWorker{w1, w2, w3, w4, w5}
  1241  	for i := range iws {
  1242  		if iws[i] != expectedIWS[i] {
  1243  			t.Fatal("wrong iws", i)
  1244  		}
  1245  	}
  1246  }