gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/renter/uploadheap_test.go (about)

     1  package renter
     2  
     3  import (
     4  	"fmt"
     5  	"math"
     6  	"os"
     7  	"testing"
     8  
     9  	"gitlab.com/SiaPrime/SiaPrime/crypto"
    10  	"gitlab.com/SiaPrime/SiaPrime/modules"
    11  	"gitlab.com/SiaPrime/SiaPrime/modules/renter/siadir"
    12  	"gitlab.com/SiaPrime/SiaPrime/modules/renter/siafile"
    13  	"gitlab.com/SiaPrime/SiaPrime/siatest/dependencies"
    14  )
    15  
    16  // TestBuildUnfinishedChunks probes buildUnfinishedChunks to make sure that the
    17  // correct chunks are being added to the heap
    18  func TestBuildUnfinishedChunks(t *testing.T) {
    19  	if testing.Short() {
    20  		t.SkipNow()
    21  	}
    22  	t.Parallel()
    23  
    24  	// Create Renter
    25  	rt, err := newRenterTesterWithDependency(t.Name(), &dependencies.DependencyDisableRepairAndHealthLoops{})
    26  	if err != nil {
    27  		t.Fatal(err)
    28  	}
    29  	defer rt.Close()
    30  
    31  	// Create file on disk
    32  	path, err := rt.createZeroByteFileOnDisk()
    33  	if err != nil {
    34  		t.Fatal(err)
    35  	}
    36  	// Create file with more than 1 chunk and mark the first chunk at stuck
    37  	rsc, _ := siafile.NewRSCode(1, 1)
    38  	siaPath, err := modules.NewSiaPath("stuckFile")
    39  	if err != nil {
    40  		t.Fatal(err)
    41  	}
    42  	up := modules.FileUploadParams{
    43  		Source:      path,
    44  		SiaPath:     siaPath,
    45  		ErasureCode: rsc,
    46  	}
    47  	f, err := rt.renter.staticFileSet.NewSiaFile(up, crypto.GenerateSiaKey(crypto.RandomCipherType()), 10e3, 0777)
    48  	if err != nil {
    49  		t.Fatal(err)
    50  	}
    51  	if f.NumChunks() <= 1 {
    52  		t.Fatalf("File created with not enough chunks for test, have %v need at least 2", f.NumChunks())
    53  	}
    54  	if err = f.SetStuck(uint64(0), true); err != nil {
    55  		t.Fatal(err)
    56  	}
    57  
    58  	// Create maps to pass into methods
    59  	hosts := make(map[string]struct{})
    60  	offline := make(map[string]bool)
    61  	goodForRenew := make(map[string]bool)
    62  
    63  	// Manually add workers to worker pool
    64  	for i := 0; i < int(f.NumChunks()); i++ {
    65  		rt.renter.staticWorkerPool.workers[string(i)] = &worker{
    66  			killChan: make(chan struct{}),
    67  		}
    68  	}
    69  
    70  	// Call managedBuildUnfinishedChunks as not stuck loop, all un stuck chunks
    71  	// should be returned
    72  	uucs := rt.renter.managedBuildUnfinishedChunks(f, hosts, targetUnstuckChunks, offline, goodForRenew)
    73  	if len(uucs) != int(f.NumChunks())-1 {
    74  		t.Fatalf("Incorrect number of chunks returned, expected %v got %v", int(f.NumChunks())-1, len(uucs))
    75  	}
    76  	for _, c := range uucs {
    77  		if c.stuck {
    78  			t.Fatal("Found stuck chunk when expecting only unstuck chunks")
    79  		}
    80  	}
    81  
    82  	// Call managedBuildUnfinishedChunks as stuck loop, all stuck chunks should
    83  	// be returned
    84  	uucs = rt.renter.managedBuildUnfinishedChunks(f, hosts, targetStuckChunks, offline, goodForRenew)
    85  	if len(uucs) != 1 {
    86  		t.Fatalf("Incorrect number of chunks returned, expected 1 got %v", len(uucs))
    87  	}
    88  	for _, c := range uucs {
    89  		if !c.stuck {
    90  			t.Fatal("Found unstuck chunk when expecting only stuck chunks")
    91  		}
    92  	}
    93  
    94  	// Remove file on disk to make file not repairable
    95  	err = os.Remove(path)
    96  	if err != nil {
    97  		t.Fatal(err)
    98  	}
    99  
   100  	// Call managedBuildUnfinishedChunks as not stuck loop, since the file is
   101  	// now not repairable it should return no chunks
   102  	uucs = rt.renter.managedBuildUnfinishedChunks(f, hosts, targetUnstuckChunks, offline, goodForRenew)
   103  	if len(uucs) != 0 {
   104  		t.Fatalf("Incorrect number of chunks returned, expected 0 got %v", len(uucs))
   105  	}
   106  
   107  	// Call managedBuildUnfinishedChunks as stuck loop, all chunks should be
   108  	// returned because they should have been marked as stuck by the previous
   109  	// call and stuck chunks should still be returned if the file is not
   110  	// repairable
   111  	uucs = rt.renter.managedBuildUnfinishedChunks(f, hosts, targetStuckChunks, offline, goodForRenew)
   112  	if len(uucs) != int(f.NumChunks()) {
   113  		t.Fatalf("Incorrect number of chunks returned, expected %v got %v", f.NumChunks(), len(uucs))
   114  	}
   115  	for _, c := range uucs {
   116  		if !c.stuck {
   117  			t.Fatal("Found unstuck chunk when expecting only stuck chunks")
   118  		}
   119  	}
   120  }
   121  
   122  // TestBuildChunkHeap probes managedBuildChunkHeap to make sure that the correct
   123  // chunks are being added to the heap
   124  func TestBuildChunkHeap(t *testing.T) {
   125  	if testing.Short() {
   126  		t.SkipNow()
   127  	}
   128  	t.Parallel()
   129  
   130  	// Create Renter
   131  	rt, err := newRenterTesterWithDependency(t.Name(), &dependencies.DependencyDisableRepairAndHealthLoops{})
   132  	if err != nil {
   133  		t.Fatal(err)
   134  	}
   135  	defer rt.Close()
   136  
   137  	// Create 2 files
   138  	rsc, _ := siafile.NewRSCode(1, 1)
   139  	up := modules.FileUploadParams{
   140  		Source:      "",
   141  		SiaPath:     modules.RandomSiaPath(),
   142  		ErasureCode: rsc,
   143  	}
   144  	f1, err := rt.renter.staticFileSet.NewSiaFile(up, crypto.GenerateSiaKey(crypto.RandomCipherType()), 10e3, 0777)
   145  	if err != nil {
   146  		t.Fatal(err)
   147  	}
   148  	up.SiaPath = modules.RandomSiaPath()
   149  	f2, err := rt.renter.staticFileSet.NewSiaFile(up, crypto.GenerateSiaKey(crypto.RandomCipherType()), 10e3, 0777)
   150  	if err != nil {
   151  		t.Fatal(err)
   152  	}
   153  
   154  	// Manually add workers to worker pool and create host map
   155  	hosts := make(map[string]struct{})
   156  	for i := 0; i < int(f1.NumChunks()+f2.NumChunks()); i++ {
   157  		rt.renter.staticWorkerPool.workers[string(i)] = &worker{
   158  			killChan: make(chan struct{}),
   159  		}
   160  	}
   161  
   162  	// Call managedBuildChunkHeap as stuck loop, since there are no stuck chunks
   163  	// there should be no chunks in the upload heap
   164  	rt.renter.managedBuildChunkHeap(modules.RootSiaPath(), hosts, targetStuckChunks)
   165  	if rt.renter.uploadHeap.managedLen() != 0 {
   166  		t.Fatalf("Expected heap length of %v but got %v", 0, rt.renter.uploadHeap.managedLen())
   167  	}
   168  
   169  	// Call managedBuildChunkHeap as not stuck loop, since we didn't upload the
   170  	// files we created nor do we have contracts, all the chunks will be viewed
   171  	// as not downloadable because they have a health of >1. Therefore we
   172  	// shouldn't see any chunks in the heap
   173  	rt.renter.managedBuildChunkHeap(modules.RootSiaPath(), hosts, targetUnstuckChunks)
   174  	if rt.renter.uploadHeap.managedLen() != 0 {
   175  		t.Fatalf("Expected heap length of %v but got %v", 0, rt.renter.uploadHeap.managedLen())
   176  	}
   177  
   178  	// Call managedBuildChunkHeap again as the stuck loop, since the previous
   179  	// call saw all the chunks as not downloadable it will have marked them as
   180  	// stuck.
   181  	//
   182  	// For the stuck loop managedBuildChunkHeap will randomly grab one chunk
   183  	// from maxChunksInHeap files to add to the heap. There are two files
   184  	// created in the test so we would expect 2 or maxStuckChunksInHeap,
   185  	// whichever is less, chunks to be added to the heap
   186  	rt.renter.managedBuildChunkHeap(modules.RootSiaPath(), hosts, targetStuckChunks)
   187  	expectedChunks := math.Min(2, float64(maxStuckChunksInHeap))
   188  	if rt.renter.uploadHeap.managedLen() != int(expectedChunks) {
   189  		t.Fatalf("Expected heap length of %v but got %v", expectedChunks, rt.renter.uploadHeap.managedLen())
   190  	}
   191  
   192  	// Pop all chunks off and confirm they are stuck and marked as stuckRepair
   193  	chunk := rt.renter.uploadHeap.managedPop()
   194  	for chunk != nil {
   195  		if !chunk.stuck || !chunk.stuckRepair {
   196  			t.Log("Stuck:", chunk.stuck)
   197  			t.Log("StuckRepair:", chunk.stuckRepair)
   198  			t.Fatal("Chunk has incorrect stuck fields")
   199  		}
   200  		chunk = rt.renter.uploadHeap.managedPop()
   201  	}
   202  }
   203  
   204  // TestUploadHeap probes the upload heap to make sure chunks are sorted
   205  // correctly
   206  func TestUploadHeap(t *testing.T) {
   207  	if testing.Short() {
   208  		t.SkipNow()
   209  	}
   210  	t.Parallel()
   211  
   212  	// Create renter
   213  	rt, err := newRenterTesterWithDependency(t.Name(), &dependencies.DependencyDisableRepairAndHealthLoops{})
   214  	if err != nil {
   215  		t.Fatal(err)
   216  	}
   217  	defer rt.Close()
   218  
   219  	// Add chunks to heap. Chunks are prioritize by stuck status first and then
   220  	// by piecesComplete/piecesNeeded
   221  	//
   222  	// Adding 2 stuck chunks then 2 unstuck chunks, each set has a chunk with 1
   223  	// piece completed and 2 pieces completed. If the heap doesn't sort itself
   224  	// then this would put an unstuck chunk with the highest completion at the
   225  	// top of the heap which would be wrong
   226  	chunk := &unfinishedUploadChunk{
   227  		id: uploadChunkID{
   228  			fileUID: "stuck",
   229  			index:   1,
   230  		},
   231  		stuck:           true,
   232  		piecesCompleted: 1,
   233  		piecesNeeded:    1,
   234  	}
   235  	if !rt.renter.uploadHeap.managedPush(chunk) {
   236  		t.Fatal("unable to push chunk", chunk)
   237  	}
   238  	chunk = &unfinishedUploadChunk{
   239  		id: uploadChunkID{
   240  			fileUID: "stuck",
   241  			index:   2,
   242  		},
   243  		stuck:           true,
   244  		piecesCompleted: 2,
   245  		piecesNeeded:    1,
   246  	}
   247  	if !rt.renter.uploadHeap.managedPush(chunk) {
   248  		t.Fatal("unable to push chunk", chunk)
   249  	}
   250  	chunk = &unfinishedUploadChunk{
   251  		id: uploadChunkID{
   252  			fileUID: "unstuck",
   253  			index:   1,
   254  		},
   255  		stuck:           true,
   256  		piecesCompleted: 1,
   257  		piecesNeeded:    1,
   258  	}
   259  	if !rt.renter.uploadHeap.managedPush(chunk) {
   260  		t.Fatal("unable to push chunk", chunk)
   261  	}
   262  	chunk = &unfinishedUploadChunk{
   263  		id: uploadChunkID{
   264  			fileUID: "unstuck",
   265  			index:   2,
   266  		},
   267  		stuck:           true,
   268  		piecesCompleted: 2,
   269  		piecesNeeded:    1,
   270  	}
   271  	if !rt.renter.uploadHeap.managedPush(chunk) {
   272  		t.Fatal("unable to push chunk", chunk)
   273  	}
   274  
   275  	chunk = rt.renter.uploadHeap.managedPop()
   276  	if !chunk.stuck {
   277  		t.Fatal("top chunk should be stuck")
   278  	}
   279  	if chunk.piecesCompleted != 1 {
   280  		t.Fatal("top chunk should have the less amount of completed chunks")
   281  	}
   282  }
   283  
   284  // TestAddChunksToHeap probes the managedAddChunksToHeap method to ensure it is
   285  // functioning as intended
   286  func TestAddChunksToHeap(t *testing.T) {
   287  	if testing.Short() {
   288  		t.SkipNow()
   289  	}
   290  	t.Parallel()
   291  
   292  	// Create Renter
   293  	rt, err := newRenterTesterWithDependency(t.Name(), &dependencies.DependencyDisableRepairAndHealthLoops{})
   294  	if err != nil {
   295  		t.Fatal(err)
   296  	}
   297  	defer rt.Close()
   298  
   299  	// Create File params
   300  	_, rsc := testingFileParams()
   301  	source, err := rt.createZeroByteFileOnDisk()
   302  	if err != nil {
   303  		t.Fatal(err)
   304  	}
   305  	up := modules.FileUploadParams{
   306  		Source:      source,
   307  		ErasureCode: rsc,
   308  	}
   309  
   310  	// Create files in multiple directories
   311  	var numChunks uint64
   312  	var dirSiaPaths []modules.SiaPath
   313  	names := []string{"rootFile", "subdir/File", "subdir2/file"}
   314  	for _, name := range names {
   315  		siaPath, err := modules.NewSiaPath(name)
   316  		if err != nil {
   317  			t.Fatal(err)
   318  		}
   319  		up.SiaPath = siaPath
   320  		f, err := rt.renter.staticFileSet.NewSiaFile(up, crypto.GenerateSiaKey(crypto.RandomCipherType()), modules.SectorSize, 0777)
   321  		if err != nil {
   322  			t.Fatal(err)
   323  		}
   324  		// Track number of chunks
   325  		numChunks += f.NumChunks()
   326  		dirSiaPath, err := siaPath.Dir()
   327  		if err != nil {
   328  			t.Fatal(err)
   329  		}
   330  		// Make sure directories are created
   331  		err = rt.renter.CreateDir(dirSiaPath)
   332  		if err != nil && err != siadir.ErrPathOverload {
   333  			t.Fatal(err)
   334  		}
   335  		dirSiaPaths = append(dirSiaPaths, dirSiaPath)
   336  	}
   337  
   338  	// Call bubbled to ensure directory metadata is updated
   339  	for _, siaPath := range dirSiaPaths {
   340  		err := rt.renter.managedBubbleMetadata(siaPath)
   341  		if err != nil {
   342  			t.Fatal(err)
   343  		}
   344  	}
   345  
   346  	// Manually add workers to worker pool and create host map
   347  	hosts := make(map[string]struct{})
   348  	for i := 0; i < rsc.MinPieces(); i++ {
   349  		rt.renter.staticWorkerPool.workers[string(i)] = &worker{
   350  			killChan: make(chan struct{}),
   351  		}
   352  	}
   353  
   354  	// Make sure directory Heap is ready
   355  	err = rt.renter.managedPushUnexploredDirectory(modules.RootSiaPath())
   356  	if err != nil {
   357  		t.Fatal(err)
   358  	}
   359  
   360  	// call managedAddChunksTo Heap
   361  	siaPaths, err := rt.renter.managedAddChunksToHeap(hosts)
   362  	if err != nil {
   363  		t.Fatal(err)
   364  	}
   365  
   366  	// Confirm that all chunks from all the directories were added since there
   367  	// are not enough chunks in only one directory to fill the heap
   368  	if len(siaPaths) != 3 {
   369  		t.Fatal("Expected 3 siaPaths to be returned, got", siaPaths)
   370  	}
   371  	if rt.renter.uploadHeap.managedLen() != int(numChunks) {
   372  		t.Fatalf("Expected uploadHeap to have %v chunks but it has %v chunks", numChunks, rt.renter.uploadHeap.managedLen())
   373  	}
   374  }
   375  
   376  // TestAddDirectoryBackToHeap ensures that when not all the chunks in a
   377  // directory are added to the uploadHeap that the directory is added back to the
   378  // directoryHeap with an updated Health
   379  func TestAddDirectoryBackToHeap(t *testing.T) {
   380  	if testing.Short() {
   381  		t.SkipNow()
   382  	}
   383  	t.Parallel()
   384  
   385  	// Create Renter with interrupt dependency
   386  	rt, err := newRenterTesterWithDependency(t.Name(), &dependencies.DependencyDisableRepairAndHealthLoops{})
   387  	if err != nil {
   388  		t.Fatal(err)
   389  	}
   390  	defer rt.Close()
   391  
   392  	// Create file
   393  	rsc, _ := siafile.NewRSCode(1, 1)
   394  	siaPath, err := modules.NewSiaPath("test")
   395  	if err != nil {
   396  		t.Fatal(err)
   397  	}
   398  	source, err := rt.createZeroByteFileOnDisk()
   399  	if err != nil {
   400  		t.Fatal(err)
   401  	}
   402  	up := modules.FileUploadParams{
   403  		Source:      source,
   404  		SiaPath:     siaPath,
   405  		ErasureCode: rsc,
   406  	}
   407  	f, err := rt.renter.staticFileSet.NewSiaFile(up, crypto.GenerateSiaKey(crypto.RandomCipherType()), modules.SectorSize, 0777)
   408  	if err != nil {
   409  		t.Fatal(err)
   410  	}
   411  
   412  	// Create maps for method inputs
   413  	hosts := make(map[string]struct{})
   414  	offline := make(map[string]bool)
   415  	goodForRenew := make(map[string]bool)
   416  
   417  	// Manually add workers to worker pool
   418  	for i := 0; i < int(f.NumChunks()); i++ {
   419  		rt.renter.staticWorkerPool.workers[string(i)] = &worker{
   420  			killChan: make(chan struct{}),
   421  		}
   422  	}
   423  
   424  	// Confirm we are starting with an empty upload and directory heap
   425  	if rt.renter.uploadHeap.managedLen() != 0 {
   426  		t.Fatal("Expected upload heap to be empty but has length of", rt.renter.uploadHeap.managedLen())
   427  	}
   428  	// "Empty" -> gets initialized with the root dir, therefore should have one
   429  	// directory in it.
   430  	if rt.renter.directoryHeap.managedLen() != 1 {
   431  		t.Fatal("Expected directory heap to be empty but has length of", rt.renter.directoryHeap.managedLen())
   432  	}
   433  	// Reset the dir heap to clear the root dir out, rest of test wants an empty
   434  	// heap.
   435  	rt.renter.directoryHeap.managedReset()
   436  
   437  	// Add chunks from file to uploadHeap
   438  	rt.renter.managedBuildAndPushChunks([]*siafile.SiaFileSetEntry{f}, hosts, targetUnstuckChunks, offline, goodForRenew)
   439  
   440  	// Upload heap should now have NumChunks chunks and directory heap should still be empty
   441  	if rt.renter.uploadHeap.managedLen() != int(f.NumChunks()) {
   442  		t.Fatalf("Expected upload heap to be of size %v but was %v", f.NumChunks(), rt.renter.uploadHeap.managedLen())
   443  	}
   444  	if rt.renter.directoryHeap.managedLen() != 0 {
   445  		t.Fatal("Expected directory heap to be empty but has length of", rt.renter.directoryHeap.managedLen())
   446  	}
   447  
   448  	// Empty uploadHeap
   449  	rt.renter.uploadHeap.managedReset()
   450  
   451  	// Fill upload heap with chunks that are a worse health than the chunks in
   452  	// the file
   453  	var i uint64
   454  	for rt.renter.uploadHeap.managedLen() < maxUploadHeapChunks {
   455  		chunk := &unfinishedUploadChunk{
   456  			id: uploadChunkID{
   457  				fileUID: "chunk",
   458  				index:   i,
   459  			},
   460  			stuck:           false,
   461  			piecesCompleted: -1,
   462  			piecesNeeded:    1,
   463  		}
   464  		if !rt.renter.uploadHeap.managedPush(chunk) {
   465  			t.Fatal("Chunk should have been added to heap")
   466  		}
   467  		i++
   468  	}
   469  
   470  	// Record length of upload heap
   471  	uploadHeapLen := rt.renter.uploadHeap.managedLen()
   472  
   473  	// Try and add chunks to upload heap again
   474  	rt.renter.managedBuildAndPushChunks([]*siafile.SiaFileSetEntry{f}, hosts, targetUnstuckChunks, offline, goodForRenew)
   475  
   476  	// No chunks should have been added to the upload heap
   477  	if rt.renter.uploadHeap.managedLen() != uploadHeapLen {
   478  		t.Fatalf("Expected upload heap to be of size %v but was %v", uploadHeapLen, rt.renter.uploadHeap.managedLen())
   479  	}
   480  	// There should be one directory in the directory heap now
   481  	if rt.renter.directoryHeap.managedLen() != 1 {
   482  		t.Fatal("Expected directory heap to have 1 element but has length of", rt.renter.directoryHeap.managedLen())
   483  	}
   484  	// The directory should be marked as explored
   485  	d := rt.renter.directoryHeap.managedPop()
   486  	if !d.explored {
   487  		t.Fatal("Directory should be explored")
   488  	}
   489  	// The directory should be the root directory as that is where we created
   490  	// the test file
   491  	if !d.siaPath.Equals(modules.RootSiaPath()) {
   492  		t.Fatal("Expected Directory siapath to be the root siaPath but was", d.siaPath.String())
   493  	}
   494  	// The directory health should be that of the file since none of the chunks
   495  	// were added
   496  	health, _, _, _, _ := f.Health(offline, goodForRenew)
   497  	if d.health != health {
   498  		t.Fatalf("Expected directory health to be %v but was %v", health, d.health)
   499  	}
   500  }
   501  
   502  // TestUploadHeapMaps tests that the uploadHeap's maps are properly updated
   503  // through pushing, popping, and reseting the heap
   504  func TestUploadHeapMaps(t *testing.T) {
   505  	if testing.Short() {
   506  		t.SkipNow()
   507  	}
   508  	t.Parallel()
   509  
   510  	// Create renter
   511  	rt, err := newRenterTesterWithDependency(t.Name(), &dependencies.DependencyDisableRepairAndHealthLoops{})
   512  	if err != nil {
   513  		t.Fatal(err)
   514  	}
   515  	defer rt.Close()
   516  
   517  	// Add stuck and unstuck chunks to heap to fill up the heap maps
   518  	numHeapChunks := uint64(10)
   519  	sf, err := rt.renter.newRenterTestFile()
   520  	if err != nil {
   521  		t.Fatal(err)
   522  	}
   523  	for i := uint64(0); i < numHeapChunks; i++ {
   524  		// Create copy of siafile entry to be closed by reset
   525  		copy, err := sf.CopyEntry()
   526  		if err != nil {
   527  			t.Fatal(err)
   528  		}
   529  		// Create minimum chunk
   530  		stuck := i%2 == 0
   531  		chunk := &unfinishedUploadChunk{
   532  			id: uploadChunkID{
   533  				fileUID: siafile.SiafileUID(fmt.Sprintf("chunk - %v", i)),
   534  				index:   i,
   535  			},
   536  			fileEntry:       copy,
   537  			stuck:           stuck,
   538  			piecesCompleted: 1,
   539  			piecesNeeded:    1,
   540  		}
   541  		// push chunk to heap
   542  		if !rt.renter.uploadHeap.managedPush(chunk) {
   543  			t.Fatal("unable to push chunk", chunk)
   544  		}
   545  		// Confirm chunk is in the correct map
   546  		if stuck {
   547  			_, ok := rt.renter.uploadHeap.stuckHeapChunks[chunk.id]
   548  			if !ok {
   549  				t.Fatal("stuck chunk not in stuck chunk heap map")
   550  			}
   551  		} else {
   552  			_, ok := rt.renter.uploadHeap.unstuckHeapChunks[chunk.id]
   553  			if !ok {
   554  				t.Fatal("unstuck chunk not in unstuck chunk heap map")
   555  			}
   556  		}
   557  	}
   558  
   559  	// Close original siafile entry
   560  	if err := sf.Close(); err != nil {
   561  		t.Fatal(err)
   562  	}
   563  
   564  	// Confirm length of maps
   565  	if len(rt.renter.uploadHeap.unstuckHeapChunks) != int(numHeapChunks/2) {
   566  		t.Fatalf("Expected %v unstuck chunks in map but found %v", numHeapChunks/2, len(rt.renter.uploadHeap.unstuckHeapChunks))
   567  	}
   568  	if len(rt.renter.uploadHeap.stuckHeapChunks) != int(numHeapChunks/2) {
   569  		t.Fatalf("Expected %v stuck chunks in map but found %v", numHeapChunks/2, len(rt.renter.uploadHeap.stuckHeapChunks))
   570  	}
   571  	if len(rt.renter.uploadHeap.repairingChunks) != 0 {
   572  		t.Fatalf("Expected %v repairing chunks in map but found %v", 0, len(rt.renter.uploadHeap.repairingChunks))
   573  	}
   574  
   575  	// Pop off some chunks
   576  	poppedChunks := 3
   577  	for i := 0; i < poppedChunks; i++ {
   578  		// Pop chunk
   579  		chunk := rt.renter.uploadHeap.managedPop()
   580  		// Confirm it is in the repairing map
   581  		_, ok := rt.renter.uploadHeap.repairingChunks[chunk.id]
   582  		if !ok {
   583  			t.Fatal("popped chunk not found in repairing map")
   584  		}
   585  		// Confirm the chunk cannot be pushed back onto the heap
   586  		if rt.renter.uploadHeap.managedPush(chunk) {
   587  			t.Fatal("should not have been able to push chunk back onto heap")
   588  		}
   589  	}
   590  
   591  	// Confirm length of maps
   592  	if len(rt.renter.uploadHeap.repairingChunks) != poppedChunks {
   593  		t.Fatalf("Expected %v repairing chunks in map but found %v", poppedChunks, len(rt.renter.uploadHeap.repairingChunks))
   594  	}
   595  	remainingChunks := len(rt.renter.uploadHeap.unstuckHeapChunks) + len(rt.renter.uploadHeap.stuckHeapChunks)
   596  	if remainingChunks != int(numHeapChunks)-poppedChunks {
   597  		t.Fatalf("Expected %v chunks to still be in the heap maps but found %v", int(numHeapChunks)-poppedChunks, remainingChunks)
   598  	}
   599  
   600  	// Reset the heap
   601  	if err := rt.renter.uploadHeap.managedReset(); err != nil {
   602  		t.Fatal(err)
   603  	}
   604  
   605  	// Confirm length of maps
   606  	if len(rt.renter.uploadHeap.repairingChunks) != poppedChunks {
   607  		t.Fatalf("Expected %v repairing chunks in map but found %v", poppedChunks, len(rt.renter.uploadHeap.repairingChunks))
   608  	}
   609  	remainingChunks = len(rt.renter.uploadHeap.unstuckHeapChunks) + len(rt.renter.uploadHeap.stuckHeapChunks)
   610  	if remainingChunks != 0 {
   611  		t.Fatalf("Expected %v chunks to still be in the heap maps but found %v", 0, remainingChunks)
   612  	}
   613  }