github.com/fozzysec/SiaPrime@v0.0.0-20190612043147-66c8e8d11fe3/siatest/renter/renter_test.go (about)

     1  package renter
     2  
     3  import (
     4  	"fmt"
     5  	"io"
     6  	"math"
     7  	"math/big"
     8  	"os"
     9  	"path/filepath"
    10  	"reflect"
    11  	"sort"
    12  	"strconv"
    13  	"sync"
    14  	"testing"
    15  	"time"
    16  
    17  	"SiaPrime/build"
    18  	"SiaPrime/crypto"
    19  	"SiaPrime/modules"
    20  	"SiaPrime/modules/renter"
    21  	"SiaPrime/node"
    22  	"SiaPrime/node/api"
    23  	"SiaPrime/node/api/client"
    24  	"SiaPrime/siatest"
    25  	"SiaPrime/types"
    26  
    27  	"gitlab.com/NebulousLabs/errors"
    28  	"gitlab.com/NebulousLabs/fastrand"
    29  )
    30  
    31  // Test Limitations
    32  //
    33  // Timeouts - when possible test should be run in parallel to improve runtime
    34  //
    35  // panic: too many open files - There is a limit to how many tests can be run in
    36  // parallel (~10).  When too many test groups are trying to be created at the
    37  // same time the test package panics because to many files have been created and
    38  // it can't support any more tests
    39  
    40  // test is a helper struct for running subtests when tests can use the same test
    41  // group
    42  type test struct {
    43  	name string
    44  	test func(*testing.T, *siatest.TestGroup)
    45  }
    46  
    47  // runRenterTests is a helper function to run the subtests when tests can use
    48  // the same test group
    49  func runRenterTests(t *testing.T, gp siatest.GroupParams, tests []test) error {
    50  	tg, err := siatest.NewGroupFromTemplate(siatest.TestDir(t.Name()), gp)
    51  	if err != nil {
    52  		return errors.AddContext(err, "failed to create group")
    53  	}
    54  	defer func() {
    55  		if err := tg.Close(); err != nil {
    56  			t.Fatal(err)
    57  		}
    58  	}()
    59  	// Run subtests
    60  	for _, test := range tests {
    61  		t.Run(test.name, func(t *testing.T) {
    62  			test.test(t, tg)
    63  		})
    64  	}
    65  	return nil
    66  }
    67  
    68  // TestRenter executes a number of subtests using the same TestGroup to
    69  // save time on initialization
    70  func TestRenter(t *testing.T) {
    71  	if testing.Short() {
    72  		t.SkipNow()
    73  	}
    74  	t.Parallel()
    75  
    76  	// Create a group for the subtests
    77  	groupParams := siatest.GroupParams{
    78  		Hosts:   5,
    79  		Renters: 1,
    80  		Miners:  1,
    81  	}
    82  
    83  	// Specify subtests to run
    84  	subTests := []test{
    85  		{"TestClearDownloadHistory", testClearDownloadHistory},
    86  		{"TestSetFileTrackingPath", testSetFileTrackingPath},
    87  		{"TestDownloadAfterRenew", testDownloadAfterRenew},
    88  		{"TestDownloadMultipleLargeSectors", testDownloadMultipleLargeSectors},
    89  		{"TestLocalRepair", testLocalRepair},
    90  	}
    91  
    92  	// Run tests
    93  	if err := runRenterTests(t, groupParams, subTests); err != nil {
    94  		t.Fatal(err)
    95  	}
    96  }
    97  
    98  // TestRenterTwo executes a number of subtests using the same TestGroup to
    99  // save time on initialization
   100  func TestRenterTwo(t *testing.T) {
   101  	if testing.Short() {
   102  		t.SkipNow()
   103  	}
   104  	t.Parallel()
   105  
   106  	// Create a group for the subtests
   107  	groupParams := siatest.GroupParams{
   108  		Hosts:   5,
   109  		Renters: 1,
   110  		Miners:  1,
   111  	}
   112  
   113  	// Specify subtests to run
   114  	subTests := []test{
   115  		{"TestReceivedFieldEqualsFileSize", testReceivedFieldEqualsFileSize},
   116  		{"TestRemoteRepair", testRemoteRepair},
   117  		{"TestSingleFileGet", testSingleFileGet},
   118  		{"TestStreamingCache", testStreamingCache},
   119  		{"TestUploadDownload", testUploadDownload},
   120  	}
   121  
   122  	// Run tests
   123  	if err := runRenterTests(t, groupParams, subTests); err != nil {
   124  		t.Fatal(err)
   125  	}
   126  }
   127  
   128  // testReceivedFieldEqualsFileSize tests that the bug that caused finished
   129  // downloads to stall in the UI and siac is gone.
   130  func testReceivedFieldEqualsFileSize(t *testing.T, tg *siatest.TestGroup) {
   131  	// Make sure the test has enough hosts.
   132  	if len(tg.Hosts()) < 4 {
   133  		t.Fatal("testReceivedFieldEqualsFileSize requires at least 4 hosts")
   134  	}
   135  	// Grab the first of the group's renters
   136  	r := tg.Renters()[0]
   137  
   138  	// Clear the download history to make sure it's empty before we start the test.
   139  	err := r.RenterClearAllDownloadsPost()
   140  	if err != nil {
   141  		t.Fatal(err)
   142  	}
   143  
   144  	// Upload a file.
   145  	dataPieces := uint64(3)
   146  	parityPieces := uint64(1)
   147  	fileSize := int(modules.SectorSize)
   148  	lf, rf, err := r.UploadNewFileBlocking(fileSize, dataPieces, parityPieces)
   149  	if err != nil {
   150  		t.Fatal("Failed to upload a file for testing: ", err)
   151  	}
   152  
   153  	// This code sums up the 'received' variable in a similar way the renter
   154  	// does it. We use it to find a fetchLen for which received != fetchLen due
   155  	// to the implicit rounding of the unsigned integers.
   156  	var fetchLen uint64
   157  	for fetchLen = uint64(100); ; fetchLen++ {
   158  		received := uint64(0)
   159  		for piecesCompleted := uint64(1); piecesCompleted <= dataPieces; piecesCompleted++ {
   160  			received += fetchLen / dataPieces
   161  		}
   162  		if received != fetchLen {
   163  			break
   164  		}
   165  	}
   166  
   167  	// Download fetchLen bytes of the file.
   168  	_, err = r.DownloadToDiskPartial(rf, lf, false, 0, fetchLen)
   169  	if err != nil {
   170  		t.Fatal(err)
   171  	}
   172  
   173  	// Get the download.
   174  	rdg, err := r.RenterDownloadsGet()
   175  	if err != nil {
   176  		t.Fatal(err)
   177  	}
   178  	d := rdg.Downloads[0]
   179  
   180  	// Make sure that 'Received' matches the amount of data we fetched.
   181  	if !d.Completed {
   182  		t.Error("Download should be completed but wasn't")
   183  	}
   184  	if d.Received != fetchLen {
   185  		t.Errorf("Received was %v but should be %v", d.Received, fetchLen)
   186  	}
   187  }
   188  
   189  // testClearDownloadHistory makes sure that the download history is
   190  // properly cleared when called through the API
   191  func testClearDownloadHistory(t *testing.T, tg *siatest.TestGroup) {
   192  	// Grab the first of the group's renters
   193  	r := tg.Renters()[0]
   194  
   195  	rdg, err := r.RenterDownloadsGet()
   196  	if err != nil {
   197  		t.Fatal("Could not get download history:", err)
   198  	}
   199  	numDownloads := 10
   200  	if len(rdg.Downloads) < numDownloads {
   201  		remainingDownloads := numDownloads - len(rdg.Downloads)
   202  		rf, err := r.RenterFilesGet()
   203  		if err != nil {
   204  			t.Fatal(err)
   205  		}
   206  		// Check if the renter has any files
   207  		// Upload a file if none
   208  		if len(rf.Files) == 0 {
   209  			dataPieces := uint64(1)
   210  			parityPieces := uint64(1)
   211  			fileSize := 100 + siatest.Fuzz()
   212  			_, _, err := r.UploadNewFileBlocking(fileSize, dataPieces, parityPieces)
   213  			if err != nil {
   214  				t.Fatal("Failed to upload a file for testing: ", err)
   215  			}
   216  			rf, err = r.RenterFilesGet()
   217  			if err != nil {
   218  				t.Fatal(err)
   219  			}
   220  		}
   221  		// Download files to build download history
   222  		dest := filepath.Join(siatest.SiaTestingDir, strconv.Itoa(fastrand.Intn(math.MaxInt32)))
   223  		for i := 0; i < remainingDownloads; i++ {
   224  			err = r.RenterDownloadGet(rf.Files[0].SiaPath, dest, 0, rf.Files[0].Filesize, false)
   225  			if err != nil {
   226  				t.Fatal("Could not Download file:", err)
   227  			}
   228  		}
   229  		rdg, err = r.RenterDownloadsGet()
   230  		if err != nil {
   231  			t.Fatal("Could not get download history:", err)
   232  		}
   233  		// Confirm download history is not empty
   234  		if len(rdg.Downloads) != numDownloads {
   235  			t.Fatalf("Not all downloads added to download history: only %v downloads added, expected %v", len(rdg.Downloads), numDownloads)
   236  		}
   237  	}
   238  	numDownloads = len(rdg.Downloads)
   239  
   240  	// Check removing one download from history
   241  	// Remove First Download
   242  	timestamp := rdg.Downloads[0].StartTime
   243  	err = r.RenterClearDownloadsRangePost(timestamp, timestamp)
   244  	if err != nil {
   245  		t.Fatal("Error in API endpoint to remove download from history:", err)
   246  	}
   247  	numDownloads--
   248  	rdg, err = r.RenterDownloadsGet()
   249  	if err != nil {
   250  		t.Fatal("Could not get download history:", err)
   251  	}
   252  	if len(rdg.Downloads) != numDownloads {
   253  		t.Fatalf("Download history not reduced: history has %v downloads, expected %v", len(rdg.Downloads), numDownloads)
   254  	}
   255  	i := sort.Search(len(rdg.Downloads), func(i int) bool { return rdg.Downloads[i].StartTime.Equal(timestamp) })
   256  	if i < len(rdg.Downloads) {
   257  		t.Fatal("Specified download not removed from history")
   258  	}
   259  	// Remove Last Download
   260  	timestamp = rdg.Downloads[len(rdg.Downloads)-1].StartTime
   261  	err = r.RenterClearDownloadsRangePost(timestamp, timestamp)
   262  	if err != nil {
   263  		t.Fatal("Error in API endpoint to remove download from history:", err)
   264  	}
   265  	numDownloads--
   266  	rdg, err = r.RenterDownloadsGet()
   267  	if err != nil {
   268  		t.Fatal("Could not get download history:", err)
   269  	}
   270  	if len(rdg.Downloads) != numDownloads {
   271  		t.Fatalf("Download history not reduced: history has %v downloads, expected %v", len(rdg.Downloads), numDownloads)
   272  	}
   273  	i = sort.Search(len(rdg.Downloads), func(i int) bool { return rdg.Downloads[i].StartTime.Equal(timestamp) })
   274  	if i < len(rdg.Downloads) {
   275  		t.Fatal("Specified download not removed from history")
   276  	}
   277  
   278  	// Check Clear Before
   279  	timestamp = rdg.Downloads[len(rdg.Downloads)-2].StartTime
   280  	err = r.RenterClearDownloadsBeforePost(timestamp)
   281  	if err != nil {
   282  		t.Fatal("Error in API endpoint to clear download history before timestamp:", err)
   283  	}
   284  	rdg, err = r.RenterDownloadsGet()
   285  	if err != nil {
   286  		t.Fatal("Could not get download history:", err)
   287  	}
   288  	i = sort.Search(len(rdg.Downloads), func(i int) bool { return rdg.Downloads[i].StartTime.Before(timestamp) })
   289  	if i < len(rdg.Downloads) {
   290  		t.Fatal("Download found that was before given time")
   291  	}
   292  
   293  	// Check Clear After
   294  	timestamp = rdg.Downloads[1].StartTime
   295  	err = r.RenterClearDownloadsAfterPost(timestamp)
   296  	if err != nil {
   297  		t.Fatal("Error in API endpoint to clear download history after timestamp:", err)
   298  	}
   299  	rdg, err = r.RenterDownloadsGet()
   300  	if err != nil {
   301  		t.Fatal("Could not get download history:", err)
   302  	}
   303  	i = sort.Search(len(rdg.Downloads), func(i int) bool { return rdg.Downloads[i].StartTime.After(timestamp) })
   304  	if i < len(rdg.Downloads) {
   305  		t.Fatal("Download found that was after given time")
   306  	}
   307  
   308  	// Check clear range
   309  	before := rdg.Downloads[1].StartTime
   310  	after := rdg.Downloads[len(rdg.Downloads)-1].StartTime
   311  	err = r.RenterClearDownloadsRangePost(after, before)
   312  	if err != nil {
   313  		t.Fatal("Error in API endpoint to remove range of downloads from history:", err)
   314  	}
   315  	rdg, err = r.RenterDownloadsGet()
   316  	if err != nil {
   317  		t.Fatal("Could not get download history:", err)
   318  	}
   319  	i = sort.Search(len(rdg.Downloads), func(i int) bool {
   320  		return rdg.Downloads[i].StartTime.Before(before) && rdg.Downloads[i].StartTime.After(after)
   321  	})
   322  	if i < len(rdg.Downloads) {
   323  		t.Fatal("Not all downloads from range removed from history")
   324  	}
   325  
   326  	// Check clearing download history
   327  	err = r.RenterClearAllDownloadsPost()
   328  	if err != nil {
   329  		t.Fatal("Error in API endpoint to clear download history:", err)
   330  	}
   331  	rdg, err = r.RenterDownloadsGet()
   332  	if err != nil {
   333  		t.Fatal("Could not get download history:", err)
   334  	}
   335  	if len(rdg.Downloads) != 0 {
   336  		t.Fatalf("Download history not cleared: history has %v downloads, expected 0", len(rdg.Downloads))
   337  	}
   338  }
   339  
   340  // testDownloadAfterRenew makes sure that we can still download a file
   341  // after the contract period has ended.
   342  func testDownloadAfterRenew(t *testing.T, tg *siatest.TestGroup) {
   343  	// Grab the first of the group's renters
   344  	renter := tg.Renters()[0]
   345  	// Upload file, creating a piece for each host in the group
   346  	dataPieces := uint64(1)
   347  	parityPieces := uint64(len(tg.Hosts())) - dataPieces
   348  	fileSize := 100 + siatest.Fuzz()
   349  	_, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces)
   350  	if err != nil {
   351  		t.Fatal("Failed to upload a file for testing: ", err)
   352  	}
   353  	// Mine enough blocks for the next period to start. This means the
   354  	// contracts should be renewed and the data should still be available for
   355  	// download.
   356  	miner := tg.Miners()[0]
   357  	for i := types.BlockHeight(0); i < siatest.DefaultAllowance.Period; i++ {
   358  		if err := miner.MineBlock(); err != nil {
   359  			t.Fatal(err)
   360  		}
   361  	}
   362  	// Download the file synchronously directly into memory.
   363  	_, err = renter.DownloadByStream(remoteFile)
   364  	if err != nil {
   365  		t.Fatal(err)
   366  	}
   367  }
   368  
   369  // testDownloadMultipleLargeSectors downloads multiple large files (>5 Sectors)
   370  // in parallel and makes sure that the downloads are blocking each other.
   371  func testDownloadMultipleLargeSectors(t *testing.T, tg *siatest.TestGroup) {
   372  	// parallelDownloads is the number of downloads that are run in parallel.
   373  	parallelDownloads := 10
   374  	// fileSize is the size of the downloaded file.
   375  	fileSize := int(10*modules.SectorSize) + siatest.Fuzz()
   376  	// set download limits and reset them after test.
   377  	// uniqueRemoteFiles is the number of files that will be uploaded to the
   378  	// network. Downloads will choose the remote file to download randomly.
   379  	uniqueRemoteFiles := 5
   380  	// Grab the first of the group's renters
   381  	renter := tg.Renters()[0]
   382  	// set download limits and reset them after test.
   383  	if err := renter.RenterPostRateLimit(int64(fileSize)*2, 0); err != nil {
   384  		t.Fatal("failed to set renter bandwidth limit", err)
   385  	}
   386  	defer func() {
   387  		if err := renter.RenterPostRateLimit(0, 0); err != nil {
   388  			t.Error("failed to reset renter bandwidth limit", err)
   389  		}
   390  	}()
   391  
   392  	// Upload files
   393  	dataPieces := uint64(len(tg.Hosts())) - 1
   394  	parityPieces := uint64(1)
   395  	remoteFiles := make([]*siatest.RemoteFile, 0, uniqueRemoteFiles)
   396  	for i := 0; i < uniqueRemoteFiles; i++ {
   397  		_, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces)
   398  		if err != nil {
   399  			t.Fatal("Failed to upload a file for testing: ", err)
   400  		}
   401  		remoteFiles = append(remoteFiles, remoteFile)
   402  	}
   403  
   404  	// Randomly download using download to file and download to stream methods.
   405  	wg := new(sync.WaitGroup)
   406  	for i := 0; i < parallelDownloads; i++ {
   407  		wg.Add(1)
   408  		go func() {
   409  			var err error
   410  			var rf = remoteFiles[fastrand.Intn(len(remoteFiles))]
   411  			if fastrand.Intn(2) == 0 {
   412  				_, err = renter.DownloadByStream(rf)
   413  			} else {
   414  				_, err = renter.DownloadToDisk(rf, false)
   415  			}
   416  			if err != nil {
   417  				t.Error("Download failed:", err)
   418  			}
   419  			wg.Done()
   420  		}()
   421  	}
   422  	wg.Wait()
   423  }
   424  
   425  // testLocalRepair tests if a renter correctly repairs a file from disk
   426  // after a host goes offline.
   427  func testLocalRepair(t *testing.T, tg *siatest.TestGroup) {
   428  	// Grab the first of the group's renters
   429  	renter := tg.Renters()[0]
   430  
   431  	// Check that we have enough hosts for this test.
   432  	if len(tg.Hosts()) < 2 {
   433  		t.Fatal("This test requires at least 2 hosts")
   434  	}
   435  
   436  	// Set fileSize and redundancy for upload
   437  	fileSize := int(modules.SectorSize)
   438  	dataPieces := uint64(1)
   439  	parityPieces := uint64(len(tg.Hosts())) - dataPieces
   440  
   441  	// Upload file
   442  	_, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces)
   443  	if err != nil {
   444  		t.Fatal(err)
   445  	}
   446  	// Get the file info of the fully uploaded file. Tha way we can compare the
   447  	// redundancies later.
   448  	fi, err := renter.FileInfo(remoteFile)
   449  	if err != nil {
   450  		t.Fatal("failed to get file info", err)
   451  	}
   452  
   453  	// Take down one of the hosts and check if redundancy decreases.
   454  	if err := tg.RemoveNode(tg.Hosts()[0]); err != nil {
   455  		t.Fatal("Failed to shutdown host", err)
   456  	}
   457  	expectedRedundancy := float64(dataPieces+parityPieces-1) / float64(dataPieces)
   458  	if err := renter.WaitForDecreasingRedundancy(remoteFile, expectedRedundancy); err != nil {
   459  		t.Fatal("Redundancy isn't decreasing", err)
   460  	}
   461  	// We should still be able to download
   462  	if _, err := renter.DownloadByStream(remoteFile); err != nil {
   463  		t.Fatal("Failed to download file", err)
   464  	}
   465  	// Bring up a new host and check if redundancy increments again.
   466  	_, err = tg.AddNodes(node.HostTemplate)
   467  	if err != nil {
   468  		t.Fatal("Failed to create a new host", err)
   469  	}
   470  	if err := renter.WaitForUploadRedundancy(remoteFile, fi.Redundancy); err != nil {
   471  		t.Fatal("File wasn't repaired", err)
   472  	}
   473  	// We should be able to download
   474  	if _, err := renter.DownloadByStream(remoteFile); err != nil {
   475  		t.Fatal("Failed to download file", err)
   476  	}
   477  }
   478  
   479  // testRemoteRepair tests if a renter correctly repairs a file by
   480  // downloading it after a host goes offline.
   481  func testRemoteRepair(t *testing.T, tg *siatest.TestGroup) {
   482  	// Grab the first of the group's renters
   483  	r := tg.Renters()[0]
   484  
   485  	// Check that we have enough hosts for this test.
   486  	if len(tg.Hosts()) < 2 {
   487  		t.Fatal("This test requires at least 2 hosts")
   488  	}
   489  
   490  	// Set fileSize and redundancy for upload
   491  	fileSize := int(modules.SectorSize)
   492  	dataPieces := uint64(1)
   493  	parityPieces := uint64(len(tg.Hosts())) - dataPieces
   494  
   495  	// Upload file
   496  	localFile, remoteFile, err := r.UploadNewFileBlocking(fileSize, dataPieces, parityPieces)
   497  	if err != nil {
   498  		t.Fatal(err)
   499  	}
   500  	// Get the file info of the fully uploaded file. Tha way we can compare the
   501  	// redundancieslater.
   502  	fi, err := r.FileInfo(remoteFile)
   503  	if err != nil {
   504  		t.Fatal("failed to get file info", err)
   505  	}
   506  
   507  	// Delete the file locally.
   508  	if err := localFile.Delete(); err != nil {
   509  		t.Fatal("failed to delete local file", err)
   510  	}
   511  
   512  	// Take down all of the parity hosts and check if redundancy decreases.
   513  	for i := uint64(0); i < parityPieces; i++ {
   514  		if err := tg.RemoveNode(tg.Hosts()[0]); err != nil {
   515  			t.Fatal("Failed to shutdown host", err)
   516  		}
   517  	}
   518  	expectedRedundancy := float64(dataPieces+parityPieces-1) / float64(dataPieces)
   519  	if err := r.WaitForDecreasingRedundancy(remoteFile, expectedRedundancy); err != nil {
   520  		t.Fatal("Redundancy isn't decreasing", err)
   521  	}
   522  	// We should still be able to download
   523  	if _, err := r.DownloadByStream(remoteFile); err != nil {
   524  		t.Fatal("Failed to download file", err)
   525  	}
   526  	// Bring up new parity hosts and check if redundancy increments again.
   527  	_, err = tg.AddNodeN(node.HostTemplate, int(parityPieces))
   528  	if err != nil {
   529  		t.Fatal("Failed to create a new host", err)
   530  	}
   531  	// When doing remote repair the redundancy might not reach 100%.
   532  	expectedRedundancy = (1.0 - renter.RemoteRepairDownloadThreshold) * fi.Redundancy
   533  	if err := r.WaitForUploadRedundancy(remoteFile, expectedRedundancy); err != nil {
   534  		t.Fatal("File wasn't repaired", err)
   535  	}
   536  	// We should be able to download
   537  	if _, err := r.DownloadByStream(remoteFile); err != nil {
   538  		t.Fatal("Failed to download file", err)
   539  	}
   540  }
   541  
   542  // testSingleFileGet is a subtest that uses an existing TestGroup to test if
   543  // using the single file API endpoint works
   544  func testSingleFileGet(t *testing.T, tg *siatest.TestGroup) {
   545  	// Grab the first of the group's renters
   546  	renter := tg.Renters()[0]
   547  	// Upload file, creating a piece for each host in the group
   548  	dataPieces := uint64(1)
   549  	parityPieces := uint64(len(tg.Hosts())) - dataPieces
   550  	fileSize := 100 + siatest.Fuzz()
   551  	_, _, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces)
   552  	if err != nil {
   553  		t.Fatal("Failed to upload a file for testing: ", err)
   554  	}
   555  
   556  	files, err := renter.Files()
   557  	if err != nil {
   558  		t.Fatal("Failed to get renter files: ", err)
   559  	}
   560  
   561  	var file modules.FileInfo
   562  	checks := 0
   563  	for _, f := range files {
   564  		// Only request files if file was fully uploaded for first API request
   565  		if f.UploadProgress < 100 {
   566  			continue
   567  		}
   568  		checks++
   569  		file, err = renter.File(f.SiaPath)
   570  		if err != nil {
   571  			t.Fatal("Failed to request single file", err)
   572  		}
   573  
   574  		// Can't use reflect.DeepEqual because certain fields are too dynamic,
   575  		// however those fields are also not indicative of whether or not the
   576  		// files are the same.  Not checking Redundancy, Available, Renewing
   577  		// ,UploadProgress, UploadedBytes, or Renewing
   578  		if f.Expiration != file.Expiration {
   579  			t.Log("File from Files() Expiration:", f.Expiration)
   580  			t.Log("File from File() Expiration:", file.Expiration)
   581  			t.Fatal("Single file queries does not match file previously requested.")
   582  		}
   583  		if f.Filesize != file.Filesize {
   584  			t.Log("File from Files() Filesize:", f.Filesize)
   585  			t.Log("File from File() Filesize:", file.Filesize)
   586  			t.Fatal("Single file queries does not match file previously requested.")
   587  		}
   588  		if f.LocalPath != file.LocalPath {
   589  			t.Log("File from Files() LocalPath:", f.LocalPath)
   590  			t.Log("File from File() LocalPath:", file.LocalPath)
   591  			t.Fatal("Single file queries does not match file previously requested.")
   592  		}
   593  		if f.SiaPath != file.SiaPath {
   594  			t.Log("File from Files() SiaPath:", f.SiaPath)
   595  			t.Log("File from File() SiaPath:", file.SiaPath)
   596  			t.Fatal("Single file queries does not match file previously requested.")
   597  		}
   598  	}
   599  	if checks == 0 {
   600  		t.Fatal("No files checks through single file endpoint.")
   601  	}
   602  }
   603  
   604  // testStreamingCache checks if the chunk cache works correctly.
   605  func testStreamingCache(t *testing.T, tg *siatest.TestGroup) {
   606  	// Grab the first of the group's renters
   607  	r := tg.Renters()[0]
   608  
   609  	// Testing setting StreamCacheSize for streaming
   610  	// Test setting it to larger than the defaultCacheSize
   611  	if err := r.RenterSetStreamCacheSizePost(4); err != nil {
   612  		t.Fatal(err, "Could not set StreamCacheSize to 4")
   613  	}
   614  	rg, err := r.RenterGet()
   615  	if err != nil {
   616  		t.Fatal(err)
   617  	}
   618  	if rg.Settings.StreamCacheSize != 4 {
   619  		t.Fatal("StreamCacheSize not set to 4, set to", rg.Settings.StreamCacheSize)
   620  	}
   621  
   622  	// Test resetting to the value of defaultStreamCacheSize (2)
   623  	if err := r.RenterSetStreamCacheSizePost(2); err != nil {
   624  		t.Fatal(err, "Could not set StreamCacheSize to 2")
   625  	}
   626  	rg, err = r.RenterGet()
   627  	if err != nil {
   628  		t.Fatal(err)
   629  	}
   630  	if rg.Settings.StreamCacheSize != 2 {
   631  		t.Fatal("StreamCacheSize not set to 2, set to", rg.Settings.StreamCacheSize)
   632  	}
   633  
   634  	prev := rg.Settings.StreamCacheSize
   635  
   636  	// Test setting to 0
   637  	if err := r.RenterSetStreamCacheSizePost(0); err == nil {
   638  		t.Fatal(err, "expected setting stream cache size to zero to fail with an error")
   639  	}
   640  	rg, err = r.RenterGet()
   641  	if err != nil {
   642  		t.Fatal(err)
   643  	}
   644  	if rg.Settings.StreamCacheSize == 0 {
   645  		t.Fatal("StreamCacheSize set to 0, should have stayed as previous value or", prev)
   646  	}
   647  
   648  	// Set fileSize and redundancy for upload
   649  	dataPieces := uint64(1)
   650  	parityPieces := uint64(len(tg.Hosts())) - dataPieces
   651  
   652  	// Set the bandwidth limit to 1 chunk per second.
   653  	pieceSize := modules.SectorSize - crypto.TwofishOverhead
   654  	chunkSize := int64(pieceSize * dataPieces)
   655  	if err := r.RenterPostRateLimit(chunkSize, chunkSize); err != nil {
   656  		t.Fatal(err)
   657  	}
   658  
   659  	rg, err = r.RenterGet()
   660  	if err != nil {
   661  		t.Fatal(err)
   662  	}
   663  	if rg.Settings.MaxDownloadSpeed != chunkSize {
   664  		t.Fatal(errors.New("MaxDownloadSpeed doesn't match value set through RenterPostRateLimit"))
   665  	}
   666  	if rg.Settings.MaxUploadSpeed != chunkSize {
   667  		t.Fatal(errors.New("MaxUploadSpeed doesn't match value set through RenterPostRateLimit"))
   668  	}
   669  
   670  	// Upload a file that is a single chunk big.
   671  	_, remoteFile, err := r.UploadNewFileBlocking(int(chunkSize), dataPieces, parityPieces)
   672  	if err != nil {
   673  		t.Fatal(err)
   674  	}
   675  
   676  	// Download the same chunk 250 times. This should take at least 250 seconds
   677  	// without caching but not more than 30 with caching.
   678  	start := time.Now()
   679  	for i := 0; i < 250; i++ {
   680  		if _, err := r.Stream(remoteFile); err != nil {
   681  			t.Fatal(err)
   682  		}
   683  		if time.Since(start) > time.Second*30 {
   684  			t.Fatal("download took longer than 30 seconds")
   685  		}
   686  	}
   687  }
   688  
   689  // testUploadDownload is a subtest that uses an existing TestGroup to test if
   690  // uploading and downloading a file works
   691  func testUploadDownload(t *testing.T, tg *siatest.TestGroup) {
   692  	// Grab the first of the group's renters
   693  	renter := tg.Renters()[0]
   694  	// Upload file, creating a piece for each host in the group
   695  	dataPieces := uint64(1)
   696  	parityPieces := uint64(len(tg.Hosts())) - dataPieces
   697  	fileSize := 100 + siatest.Fuzz()
   698  	localFile, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces)
   699  	if err != nil {
   700  		t.Fatal("Failed to upload a file for testing: ", err)
   701  	}
   702  	// Download the file synchronously directly into memory
   703  	_, err = renter.DownloadByStream(remoteFile)
   704  	if err != nil {
   705  		t.Fatal(err)
   706  	}
   707  	// Download the file synchronously to a file on disk
   708  	_, err = renter.DownloadToDisk(remoteFile, false)
   709  	if err != nil {
   710  		t.Fatal(err)
   711  	}
   712  	// Download the file asynchronously and wait for the download to finish.
   713  	localFile, err = renter.DownloadToDisk(remoteFile, true)
   714  	if err != nil {
   715  		t.Error(err)
   716  	}
   717  	if err := renter.WaitForDownload(localFile, remoteFile); err != nil {
   718  		t.Error(err)
   719  	}
   720  	// Stream the file.
   721  	_, err = renter.Stream(remoteFile)
   722  	if err != nil {
   723  		t.Fatal(err)
   724  	}
   725  	// Stream the file partially a few times. At least 1 byte is streamed.
   726  	for i := 0; i < 5; i++ {
   727  		from := fastrand.Intn(fileSize - 1)             // [0..fileSize-2]
   728  		to := from + 1 + fastrand.Intn(fileSize-from-1) // [from+1..fileSize-1]
   729  		_, err = renter.StreamPartial(remoteFile, localFile, uint64(from), uint64(to))
   730  		if err != nil {
   731  			t.Fatal(err)
   732  		}
   733  	}
   734  }
   735  
   736  // TestRenterInterrupt executes a number of subtests using the same TestGroup to
   737  // save time on initialization
   738  func TestRenterInterrupt(t *testing.T) {
   739  	if testing.Short() {
   740  		t.SkipNow()
   741  	}
   742  	t.Parallel()
   743  
   744  	// Create a group for the subtests
   745  	groupParams := siatest.GroupParams{
   746  		Hosts:  5,
   747  		Miners: 1,
   748  	}
   749  
   750  	// Specify sub tests
   751  	subTests := []test{
   752  		{"TestContractInterruptedSaveToDiskAfterDeletion", testContractInterruptedSaveToDiskAfterDeletion},
   753  		{"TestDownloadInterruptedAfterSendingRevision", testDownloadInterruptedAfterSendingRevision},
   754  		{"TestDownloadInterruptedBeforeSendingRevision", testDownloadInterruptedBeforeSendingRevision},
   755  		{"TestUploadInterruptedAfterSendingRevision", testUploadInterruptedAfterSendingRevision},
   756  		{"TestUploadInterruptedBeforeSendingRevision", testUploadInterruptedBeforeSendingRevision},
   757  	}
   758  
   759  	// Run tests
   760  	if err := runRenterTests(t, groupParams, subTests); err != nil {
   761  		t.Fatal(err)
   762  	}
   763  }
   764  
   765  // testContractInterruptedSaveToDiskAfterDeletion runs testDownloadInterrupted with
   766  // a dependency that interrupts the download after sending the signed revision
   767  // to the host.
   768  func testContractInterruptedSaveToDiskAfterDeletion(t *testing.T, tg *siatest.TestGroup) {
   769  	testContractInterrupted(t, tg, newDependencyInterruptContractSaveToDiskAfterDeletion())
   770  }
   771  
   772  // testDownloadInterruptedAfterSendingRevision runs testDownloadInterrupted with
   773  // a dependency that interrupts the download after sending the signed revision
   774  // to the host.
   775  func testDownloadInterruptedAfterSendingRevision(t *testing.T, tg *siatest.TestGroup) {
   776  	testDownloadInterrupted(t, tg, newDependencyInterruptDownloadAfterSendingRevision())
   777  }
   778  
   779  // testDownloadInterruptedBeforeSendingRevision runs testDownloadInterrupted
   780  // with a dependency that interrupts the download before sending the signed
   781  // revision to the host.
   782  func testDownloadInterruptedBeforeSendingRevision(t *testing.T, tg *siatest.TestGroup) {
   783  	testDownloadInterrupted(t, tg, newDependencyInterruptDownloadBeforeSendingRevision())
   784  }
   785  
   786  // testUploadInterruptedAfterSendingRevision runs testUploadInterrupted with a
   787  // dependency that interrupts the upload after sending the signed revision to
   788  // the host.
   789  func testUploadInterruptedAfterSendingRevision(t *testing.T, tg *siatest.TestGroup) {
   790  	testUploadInterrupted(t, tg, newDependencyInterruptUploadAfterSendingRevision())
   791  }
   792  
   793  // testUploadInterruptedBeforeSendingRevision runs testUploadInterrupted with a
   794  // dependency that interrupts the upload before sending the signed revision to
   795  // the host.
   796  func testUploadInterruptedBeforeSendingRevision(t *testing.T, tg *siatest.TestGroup) {
   797  	testUploadInterrupted(t, tg, newDependencyInterruptUploadBeforeSendingRevision())
   798  }
   799  
   800  // testContractInterrupted interrupts a download using the provided dependencies.
   801  func testContractInterrupted(t *testing.T, tg *siatest.TestGroup, deps *siatest.DependencyInterruptOnceOnKeyword) {
   802  	// Add Renter
   803  	testDir := renterTestDir(t.Name())
   804  	renterTemplate := node.Renter(testDir + "/renter")
   805  	renterTemplate.ContractorDeps = deps
   806  	renterTemplate.Allowance = siatest.DefaultAllowance
   807  	renterTemplate.Allowance.Period = 100
   808  	renterTemplate.Allowance.RenewWindow = 75
   809  	nodes, err := tg.AddNodes(renterTemplate)
   810  	if err != nil {
   811  		t.Fatal(err)
   812  	}
   813  	renter := nodes[0]
   814  
   815  	// Call fail on the dependency every 10 ms.
   816  	cancel := make(chan struct{})
   817  	wg := new(sync.WaitGroup)
   818  	wg.Add(1)
   819  	go func() {
   820  		for {
   821  			// Cause the next download to fail.
   822  			deps.Fail()
   823  			select {
   824  			case <-cancel:
   825  				wg.Done()
   826  				return
   827  			case <-time.After(10 * time.Millisecond):
   828  			}
   829  		}
   830  	}()
   831  
   832  	// Renew contracts.
   833  	if err = renewContractsByRenewWindow(renter, tg); err != nil {
   834  		t.Fatal(err)
   835  	}
   836  
   837  	// Disrupt statement should prevent inactive contracts from being created
   838  	err = build.Retry(50, 100*time.Millisecond, func() error {
   839  		rc, err := renter.RenterInactiveContractsGet()
   840  		if err != nil {
   841  			return err
   842  		}
   843  		if len(rc.InactiveContracts) != 0 {
   844  			return fmt.Errorf("Incorrect number of inactive contracts: have %v expected 0", len(rc.InactiveContracts))
   845  		}
   846  		if len(rc.ActiveContracts) != len(tg.Hosts())*2 {
   847  			return fmt.Errorf("Incorrect number of active contracts: have %v expected %v", len(rc.ActiveContracts), len(tg.Hosts())*2)
   848  		}
   849  		return nil
   850  	})
   851  	if err != nil {
   852  		t.Fatal(err)
   853  	}
   854  
   855  	// By mining blocks to trigger threadContractMaintenance,
   856  	// managedCheckForDuplicates should move renewed contracts to inactive even
   857  	// though disrupt statement is still interrtupting renew code
   858  	m := tg.Miners()[0]
   859  	if err = m.MineBlock(); err != nil {
   860  		t.Fatal(err)
   861  	}
   862  	if err = tg.Sync(); err != nil {
   863  		t.Fatal(err)
   864  	}
   865  	err = build.Retry(70, 100*time.Millisecond, func() error {
   866  		rc, err := renter.RenterInactiveContractsGet()
   867  		if err != nil {
   868  			return err
   869  		}
   870  		if len(rc.InactiveContracts) != len(tg.Hosts()) {
   871  			return fmt.Errorf("Incorrect number of inactive contracts: have %v expected %v", len(rc.InactiveContracts), len(tg.Hosts()))
   872  		}
   873  		if len(rc.ActiveContracts) != len(tg.Hosts()) {
   874  			return fmt.Errorf("Incorrect number of active contracts: have %v expected %v", len(rc.ActiveContracts), len(tg.Hosts()))
   875  		}
   876  		if err = m.MineBlock(); err != nil {
   877  			return err
   878  		}
   879  		return nil
   880  	})
   881  	if err != nil {
   882  		t.Fatal(err)
   883  	}
   884  
   885  	// Stop calling fail on the dependency.
   886  	close(cancel)
   887  	wg.Wait()
   888  	deps.Disable()
   889  }
   890  
   891  // testDownloadInterrupted interrupts a download using the provided dependencies.
   892  func testDownloadInterrupted(t *testing.T, tg *siatest.TestGroup, deps *siatest.DependencyInterruptOnceOnKeyword) {
   893  	// Add Renter
   894  	testDir := renterTestDir(t.Name())
   895  	renterTemplate := node.Renter(testDir + "/renter")
   896  	renterTemplate.ContractSetDeps = deps
   897  	nodes, err := tg.AddNodes(renterTemplate)
   898  	if err != nil {
   899  		t.Fatal(err)
   900  	}
   901  
   902  	// Set the bandwidth limit to 1 chunk per second.
   903  	renter := nodes[0]
   904  	dataPieces := uint64(len(tg.Hosts())) - 1
   905  	parityPieces := uint64(1)
   906  	chunkSize := siatest.ChunkSize(uint64(dataPieces))
   907  	_, remoteFile, err := renter.UploadNewFileBlocking(int(chunkSize), dataPieces, parityPieces)
   908  	if err != nil {
   909  		t.Fatal(err)
   910  	}
   911  
   912  	// Set the bandwidth limit to 1 chunk per second.
   913  	if err := renter.RenterPostRateLimit(int64(chunkSize), int64(chunkSize)); err != nil {
   914  		t.Fatal(err)
   915  	}
   916  
   917  	// Call fail on the dependency every 100 ms.
   918  	cancel := make(chan struct{})
   919  	wg := new(sync.WaitGroup)
   920  	wg.Add(1)
   921  	go func() {
   922  		for {
   923  			// Cause the next download to fail.
   924  			deps.Fail()
   925  			select {
   926  			case <-cancel:
   927  				wg.Done()
   928  				return
   929  			case <-time.After(10 * time.Millisecond):
   930  			}
   931  		}
   932  	}()
   933  	// Try downloading the file 5 times.
   934  	for i := 0; i < 5; i++ {
   935  		if _, err := renter.DownloadByStream(remoteFile); err == nil {
   936  			t.Fatal("Download shouldn't succeed since it was interrupted")
   937  		}
   938  	}
   939  	// Stop calling fail on the dependency.
   940  	close(cancel)
   941  	wg.Wait()
   942  	deps.Disable()
   943  	// Download the file once more successfully
   944  	if _, err := renter.DownloadByStream(remoteFile); err != nil {
   945  		t.Fatal("Failed to download the file", err)
   946  	}
   947  }
   948  
   949  // testUploadInterrupted let's the upload fail using the provided dependencies
   950  // and makes sure that this doesn't corrupt the contract.
   951  func testUploadInterrupted(t *testing.T, tg *siatest.TestGroup, deps *siatest.DependencyInterruptOnceOnKeyword) {
   952  	// Add Renter
   953  	testDir := renterTestDir(t.Name())
   954  	renterTemplate := node.Renter(testDir + "/renter")
   955  	renterTemplate.ContractSetDeps = deps
   956  	nodes, err := tg.AddNodes(renterTemplate)
   957  	if err != nil {
   958  		t.Fatal(err)
   959  	}
   960  
   961  	// Set the bandwidth limit to 1 chunk per second.
   962  	renter := nodes[0]
   963  	dataPieces := uint64(len(tg.Hosts())) - 1
   964  	parityPieces := uint64(1)
   965  	chunkSize := siatest.ChunkSize(uint64(dataPieces))
   966  	if err := renter.RenterPostRateLimit(int64(chunkSize), int64(chunkSize)); err != nil {
   967  		t.Fatal(err)
   968  	}
   969  
   970  	// Call fail on the dependency every two seconds to allow some uploads to
   971  	// finish.
   972  	cancel := make(chan struct{})
   973  	done := make(chan struct{})
   974  	wg := new(sync.WaitGroup)
   975  	wg.Add(1)
   976  	go func() {
   977  		defer close(done)
   978  		// Loop until cancel was closed or we reach 5 iterations. Otherwise we
   979  		// might end up blocking the upload for too long.
   980  		for i := 0; i < 10; i++ {
   981  			// Cause the next upload to fail.
   982  			deps.Fail()
   983  			select {
   984  			case <-cancel:
   985  				wg.Done()
   986  				return
   987  			case <-time.After(100 * time.Millisecond):
   988  			}
   989  		}
   990  		wg.Done()
   991  	}()
   992  
   993  	// Upload a file that's 1 chunk large.
   994  	_, remoteFile, err := renter.UploadNewFileBlocking(int(chunkSize), dataPieces, parityPieces)
   995  	if err != nil {
   996  		t.Fatal(err)
   997  	}
   998  	// Make sure that the upload does not finish before the interrupting go
   999  	// routine is finished
  1000  	select {
  1001  	case <-done:
  1002  	default:
  1003  		t.Fatal("Upload finished before interrupt signal is done")
  1004  	}
  1005  	// Stop calling fail on the dependency.
  1006  	close(cancel)
  1007  	wg.Wait()
  1008  	deps.Disable()
  1009  	// Download the file.
  1010  	if _, err := renter.DownloadByStream(remoteFile); err != nil {
  1011  		t.Fatal("Failed to download the file", err)
  1012  	}
  1013  }
  1014  
  1015  // TestRenterAddNodes runs a subset of tests that require adding their own renter
  1016  func TestRenterAddNodes(t *testing.T) {
  1017  	if testing.Short() {
  1018  		t.SkipNow()
  1019  	}
  1020  	t.Parallel()
  1021  
  1022  	// Create a group for testing
  1023  	groupParams := siatest.GroupParams{
  1024  		Hosts:   5,
  1025  		Renters: 1,
  1026  		Miners:  1,
  1027  	}
  1028  
  1029  	// Specify subtests to run
  1030  	subTests := []test{
  1031  		{"TestRedundancyReporting", testRedundancyReporting},
  1032  		{"TestRenterCancelAllowance", testRenterCancelAllowance},
  1033  		{"TestRenewFailing", testRenewFailing}, // Put last because it impacts a host
  1034  	}
  1035  
  1036  	// Run tests
  1037  	if err := runRenterTests(t, groupParams, subTests); err != nil {
  1038  		t.Fatal(err)
  1039  	}
  1040  }
  1041  
  1042  // testRedundancyReporting verifies that redundancy reporting is accurate if
  1043  // contracts become offline.
  1044  func testRedundancyReporting(t *testing.T, tg *siatest.TestGroup) {
  1045  	// Upload a file.
  1046  	dataPieces := uint64(1)
  1047  	parityPieces := uint64(len(tg.Hosts()) - 1)
  1048  
  1049  	renter := tg.Renters()[0]
  1050  	_, rf, err := renter.UploadNewFileBlocking(100, dataPieces, parityPieces)
  1051  	if err != nil {
  1052  		t.Fatal(err)
  1053  	}
  1054  
  1055  	// Stop a host.
  1056  	host := tg.Hosts()[0]
  1057  	if err := tg.StopNode(host); err != nil {
  1058  		t.Fatal(err)
  1059  	}
  1060  
  1061  	// Mine a block to trigger contract maintenance.
  1062  	miner := tg.Miners()[0]
  1063  	if err := miner.MineBlock(); err != nil {
  1064  		t.Fatal(err)
  1065  	}
  1066  
  1067  	// Redundancy should decrease.
  1068  	expectedRedundancy := float64(dataPieces+parityPieces-1) / float64(dataPieces)
  1069  	if err := renter.WaitForDecreasingRedundancy(rf, expectedRedundancy); err != nil {
  1070  		t.Fatal("Redundancy isn't decreasing", err)
  1071  	}
  1072  
  1073  	// Restart the host.
  1074  	if err := tg.StartNode(host); err != nil {
  1075  		t.Fatal(err)
  1076  	}
  1077  
  1078  	// Wait until the host shows up as active again.
  1079  	pk, err := host.HostPublicKey()
  1080  	if err != nil {
  1081  		t.Fatal(err)
  1082  	}
  1083  	err = build.Retry(60, time.Second, func() error {
  1084  		hdag, err := renter.HostDbActiveGet()
  1085  		if err != nil {
  1086  			return err
  1087  		}
  1088  		for _, h := range hdag.Hosts {
  1089  			if reflect.DeepEqual(h.PublicKey, pk) {
  1090  				return nil
  1091  			}
  1092  		}
  1093  		// If host is not active, announce it again and mine a block.
  1094  		if err := host.HostAnnouncePost(); err != nil {
  1095  			return (err)
  1096  		}
  1097  		miner := tg.Miners()[0]
  1098  		if err := miner.MineBlock(); err != nil {
  1099  			return (err)
  1100  		}
  1101  		if err := tg.Sync(); err != nil {
  1102  			return (err)
  1103  		}
  1104  		hg, err := host.HostGet()
  1105  		if err != nil {
  1106  			return err
  1107  		}
  1108  		return fmt.Errorf("host with address %v not active", hg.InternalSettings.NetAddress)
  1109  	})
  1110  	if err != nil {
  1111  		t.Fatal(err)
  1112  	}
  1113  
  1114  	if err := miner.MineBlock(); err != nil {
  1115  		t.Fatal(err)
  1116  	}
  1117  
  1118  	// Redundancy should go back to normal.
  1119  	expectedRedundancy = float64(dataPieces+parityPieces) / float64(dataPieces)
  1120  	if err := renter.WaitForUploadRedundancy(rf, expectedRedundancy); err != nil {
  1121  		t.Fatal("Redundancy is not increasing")
  1122  	}
  1123  }
  1124  
  1125  // testRenewFailing checks if a contract gets marked as !goodForRenew after
  1126  // failing multiple times in a row.
  1127  func testRenewFailing(t *testing.T, tg *siatest.TestGroup) {
  1128  	// Add a renter with a custom allowance to give it plenty of time to renew
  1129  	// the contract later.
  1130  	renterParams := node.Renter(filepath.Join(siatest.TestDir(t.Name()), "renter"))
  1131  	renterParams.Allowance = siatest.DefaultAllowance
  1132  	renterParams.Allowance.Hosts = uint64(len(tg.Hosts()) - 1)
  1133  	renterParams.Allowance.Period = 100
  1134  	renterParams.Allowance.RenewWindow = 50
  1135  	nodes, err := tg.AddNodes(renterParams)
  1136  	if err != nil {
  1137  		t.Fatal(err)
  1138  	}
  1139  	renter := nodes[0]
  1140  
  1141  	// All the contracts of the renter should be goodForRenew. So there should
  1142  	// be no inactive contracts, only active contracts
  1143  	rcg, err := renter.RenterInactiveContractsGet()
  1144  	if err != nil {
  1145  		t.Fatal(err)
  1146  	}
  1147  	if uint64(len(rcg.ActiveContracts)) != renterParams.Allowance.Hosts {
  1148  		for i, c := range rcg.ActiveContracts {
  1149  			fmt.Println(i, c.HostPublicKey)
  1150  		}
  1151  		t.Fatalf("renter had %v contracts but should have %v",
  1152  			len(rcg.ActiveContracts), renterParams.Allowance.Hosts)
  1153  	}
  1154  	if len(rcg.InactiveContracts) != 0 {
  1155  		t.Fatal("Renter should have 0 inactive contracts but has", len(rcg.InactiveContracts))
  1156  	}
  1157  
  1158  	// Create a map of the hosts in the group.
  1159  	hostMap := make(map[string]*siatest.TestNode)
  1160  	for _, host := range tg.Hosts() {
  1161  		pk, err := host.HostPublicKey()
  1162  		if err != nil {
  1163  			t.Fatal(err)
  1164  		}
  1165  		hostMap[pk.String()] = host
  1166  	}
  1167  	// Lock the wallet of one of the used hosts to make the renew fail.
  1168  	for _, c := range rcg.ActiveContracts {
  1169  		if host, used := hostMap[c.HostPublicKey.String()]; used {
  1170  			if err := host.WalletLockPost(); err != nil {
  1171  				t.Fatal(err)
  1172  			}
  1173  			break
  1174  		}
  1175  	}
  1176  	// Wait until the contract is supposed to be renewed.
  1177  	cg, err := renter.ConsensusGet()
  1178  	if err != nil {
  1179  		t.Fatal(err)
  1180  	}
  1181  	rg, err := renter.RenterGet()
  1182  	if err != nil {
  1183  		t.Fatal(err)
  1184  	}
  1185  	miner := tg.Miners()[0]
  1186  	blockHeight := cg.Height
  1187  	for blockHeight+rg.Settings.Allowance.RenewWindow < rcg.ActiveContracts[0].EndHeight {
  1188  		if err := miner.MineBlock(); err != nil {
  1189  			t.Fatal(err)
  1190  		}
  1191  		blockHeight++
  1192  	}
  1193  
  1194  	// there should be no inactive contracts, only active contracts.
  1195  	rcg, err = renter.RenterInactiveContractsGet()
  1196  	if err != nil {
  1197  		t.Fatal(err)
  1198  	}
  1199  	if uint64(len(rcg.ActiveContracts)) != renterParams.Allowance.Hosts {
  1200  		for i, c := range rcg.ActiveContracts {
  1201  			fmt.Println(i, c.HostPublicKey)
  1202  		}
  1203  		t.Fatalf("renter had %v contracts but should have %v",
  1204  			len(rcg.ActiveContracts), renterParams.Allowance.Hosts)
  1205  	}
  1206  	if len(rcg.InactiveContracts) != 0 {
  1207  		t.Fatal("Renter should have 0 inactive contracts but has", len(rcg.InactiveContracts))
  1208  	}
  1209  
  1210  	// mine enough blocks to reach the second half of the renew window.
  1211  	for ; blockHeight+rg.Settings.Allowance.RenewWindow/2 < rcg.ActiveContracts[0].EndHeight; blockHeight++ {
  1212  		if err := miner.MineBlock(); err != nil {
  1213  			t.Fatal(err)
  1214  		}
  1215  	}
  1216  
  1217  	// We should be within the second half of the renew window now. We keep
  1218  	// mining blocks until the host with the locked wallet has been replaced.
  1219  	// This should happen before we reach the endHeight of the contracts. This
  1220  	// means we should have number of hosts - 1 active contracts and number of
  1221  	// hosts - 1 inactive contracts.  One of the inactive contracts will be
  1222  	// !goodForRenew due to the host
  1223  	err = build.Retry(int(rcg.ActiveContracts[0].EndHeight-blockHeight), 5*time.Second, func() error {
  1224  		// contract should be !goodForRenew now.
  1225  		rc, err := renter.RenterInactiveContractsGet()
  1226  		if err != nil {
  1227  			return err
  1228  		}
  1229  		if len(rc.ActiveContracts) != len(tg.Hosts())-1 {
  1230  			return fmt.Errorf("Expected %v active contracts, got %v", len(tg.Hosts())-1, len(rc.ActiveContracts))
  1231  		}
  1232  		if len(rc.InactiveContracts) != len(tg.Hosts())-1 {
  1233  			return fmt.Errorf("Expected %v inactive contracts, got %v", len(tg.Hosts())-1, len(rc.InactiveContracts))
  1234  		}
  1235  
  1236  		notGoodForRenew := 0
  1237  		for _, c := range rc.InactiveContracts {
  1238  			if !c.GoodForRenew {
  1239  				notGoodForRenew++
  1240  			}
  1241  		}
  1242  		if err := miner.MineBlock(); err != nil {
  1243  			return err
  1244  		}
  1245  		if notGoodForRenew != 1 {
  1246  			return fmt.Errorf("there should be exactly 1 inactive contract that is !goodForRenew but was %v",
  1247  				notGoodForRenew)
  1248  		}
  1249  		return nil
  1250  	})
  1251  	if err != nil {
  1252  		t.Fatal(err)
  1253  	}
  1254  }
  1255  
  1256  // testRenterCancelAllowance tests that setting an empty allowance causes
  1257  // uploads, downloads, and renewals to cease as well as tests that resetting the
  1258  // allowance after the allowance was cancelled will trigger the correct contract
  1259  // formation.
  1260  func testRenterCancelAllowance(t *testing.T, tg *siatest.TestGroup) {
  1261  	renterParams := node.Renter(filepath.Join(siatest.TestDir(t.Name()), "renter"))
  1262  	nodes, err := tg.AddNodes(renterParams)
  1263  	if err != nil {
  1264  		t.Fatal(err)
  1265  	}
  1266  	renter := nodes[0]
  1267  
  1268  	// Test Resetting allowance
  1269  	// Cancel the allowance
  1270  	if err := renter.RenterCancelAllowance(); err != nil {
  1271  		t.Fatal(err)
  1272  	}
  1273  
  1274  	// Give it some time to mark the contracts as !goodForUpload and
  1275  	// !goodForRenew.
  1276  	err = build.Retry(200, 100*time.Millisecond, func() error {
  1277  		rc, err := renter.RenterInactiveContractsGet()
  1278  		if err != nil {
  1279  			return err
  1280  		}
  1281  		// Should now only have inactive contracts.
  1282  		if len(rc.ActiveContracts) != 0 {
  1283  			return fmt.Errorf("expected 0 active contracts, got %v", len(rc.ActiveContracts))
  1284  		}
  1285  		if len(rc.InactiveContracts) != len(tg.Hosts()) {
  1286  			return fmt.Errorf("expected %v inactive contracts, got %v", len(tg.Hosts()), len(rc.InactiveContracts))
  1287  		}
  1288  		for _, c := range rc.InactiveContracts {
  1289  			if c.GoodForUpload {
  1290  				return errors.New("contract shouldn't be goodForUpload")
  1291  			}
  1292  			if c.GoodForRenew {
  1293  				return errors.New("contract shouldn't be goodForRenew")
  1294  			}
  1295  		}
  1296  		return nil
  1297  	})
  1298  	if err != nil {
  1299  		t.Fatal(err)
  1300  	}
  1301  
  1302  	// Set the allowance again.
  1303  	if err := renter.RenterPostAllowance(siatest.DefaultAllowance); err != nil {
  1304  		t.Fatal(err)
  1305  	}
  1306  
  1307  	// Mine a block to start the threadedContractMaintenance.
  1308  	if err := tg.Miners()[0].MineBlock(); err != nil {
  1309  		t.Fatal(err)
  1310  	}
  1311  
  1312  	// Give it some time to mark the contracts as goodForUpload and
  1313  	// goodForRenew again.
  1314  	err = build.Retry(200, 100*time.Millisecond, func() error {
  1315  		rc, err := renter.RenterInactiveContractsGet()
  1316  		if err != nil {
  1317  			return err
  1318  		}
  1319  		// Should now only have active contracts.
  1320  		if len(rc.ActiveContracts) != len(tg.Hosts()) {
  1321  			return fmt.Errorf("expected %v active contracts, got %v", len(tg.Hosts()), len(rc.ActiveContracts))
  1322  		}
  1323  		if len(rc.InactiveContracts) != 0 {
  1324  			return fmt.Errorf("expected 0 inactive contracts, got %v", len(rc.InactiveContracts))
  1325  		}
  1326  		for _, c := range rc.ActiveContracts {
  1327  			if !c.GoodForUpload {
  1328  				return errors.New("contract should be goodForUpload")
  1329  			}
  1330  			if !c.GoodForRenew {
  1331  				return errors.New("contract should be goodForRenew")
  1332  			}
  1333  		}
  1334  		return nil
  1335  	})
  1336  	if err != nil {
  1337  		t.Fatal(err)
  1338  	}
  1339  
  1340  	// Test Canceling allowance
  1341  	// Upload a file.
  1342  	dataPieces := uint64(1)
  1343  	parityPieces := uint64(len(tg.Hosts()) - 1)
  1344  	_, rf, err := renter.UploadNewFileBlocking(100, dataPieces, parityPieces)
  1345  	if err != nil {
  1346  		t.Fatal(err)
  1347  	}
  1348  
  1349  	// Cancel the allowance
  1350  	if err := renter.RenterCancelAllowance(); err != nil {
  1351  		t.Fatal(err)
  1352  	}
  1353  
  1354  	// Give it some time to mark the contracts as !goodForUpload and
  1355  	// !goodForRenew.
  1356  	err = build.Retry(200, 100*time.Millisecond, func() error {
  1357  		rc, err := renter.RenterInactiveContractsGet()
  1358  		if err != nil {
  1359  			return err
  1360  		}
  1361  		// Should now have 2 inactive contracts.
  1362  		if len(rc.ActiveContracts) != 0 {
  1363  			return fmt.Errorf("expected 0 active contracts, got %v", len(rc.ActiveContracts))
  1364  		}
  1365  		if len(rc.InactiveContracts) != len(tg.Hosts()) {
  1366  			return fmt.Errorf("expected %v inactive contracts, got %v", len(tg.Hosts()), len(rc.InactiveContracts))
  1367  		}
  1368  		for _, c := range rc.InactiveContracts {
  1369  			if c.GoodForUpload {
  1370  				return errors.New("contract shouldn't be goodForUpload")
  1371  			}
  1372  			if c.GoodForRenew {
  1373  				return errors.New("contract shouldn't be goodForRenew")
  1374  			}
  1375  		}
  1376  		return nil
  1377  	})
  1378  	if err != nil {
  1379  		t.Fatal(err)
  1380  	}
  1381  
  1382  	// Try downloading the file; should succeed.
  1383  	if _, err := renter.DownloadByStream(rf); err != nil {
  1384  		t.Fatal("downloading file failed", err)
  1385  	}
  1386  
  1387  	// Wait for a few seconds to make sure that the upload heap is rebuilt.
  1388  	// The rebuilt interval is 3 seconds. Sleep for 5 to be safe.
  1389  	time.Sleep(5 * time.Second)
  1390  
  1391  	// Try to upload a file after the allowance was cancelled. Should succeed.
  1392  	_, rf2, err := renter.UploadNewFile(100, dataPieces, parityPieces)
  1393  	if err != nil {
  1394  		t.Fatal(err)
  1395  	}
  1396  
  1397  	// Give it some time to upload.
  1398  	time.Sleep(time.Second)
  1399  
  1400  	// Redundancy should still be 0.
  1401  	renterFiles, err := renter.RenterFilesGet()
  1402  	if err != nil {
  1403  		t.Fatal("Failed to get files")
  1404  	}
  1405  	if len(renterFiles.Files) != 2 {
  1406  		t.Fatal("There should be exactly 2 tracked files")
  1407  	}
  1408  	fileInfo, err := renter.File(rf2.SiaPath())
  1409  	if err != nil {
  1410  		t.Fatal(err)
  1411  	}
  1412  	if fileInfo.UploadProgress > 0 || fileInfo.UploadedBytes > 0 || fileInfo.Redundancy > 0 {
  1413  		t.Fatal("Uploading a file after canceling the allowance should fail")
  1414  	}
  1415  
  1416  	// Mine enough blocks for the period to pass and the contracts to expire.
  1417  	miner := tg.Miners()[0]
  1418  	for i := types.BlockHeight(0); i < siatest.DefaultAllowance.Period; i++ {
  1419  		if err := miner.MineBlock(); err != nil {
  1420  			t.Fatal(err)
  1421  		}
  1422  	}
  1423  
  1424  	// All contracts should be archived.
  1425  	err = build.Retry(200, 100*time.Millisecond, func() error {
  1426  		rc, err := renter.RenterInactiveContractsGet()
  1427  		if err != nil {
  1428  			return err
  1429  		}
  1430  		rcExpired, err := renter.RenterExpiredContractsGet()
  1431  		if err != nil {
  1432  			return err
  1433  		}
  1434  		// Should now have num of hosts expired contracts.
  1435  		if len(rc.ActiveContracts) != 0 {
  1436  			return fmt.Errorf("expected 0 active contracts, got %v", len(rc.ActiveContracts))
  1437  		}
  1438  		if len(rc.InactiveContracts) != 0 {
  1439  			return fmt.Errorf("expected 0 inactive contracts, got %v", len(rc.InactiveContracts))
  1440  		}
  1441  		if len(rcExpired.ExpiredContracts) != len(tg.Hosts()) {
  1442  			return fmt.Errorf("expected %v expired contracts, got %v", len(tg.Hosts()), len(rc.InactiveContracts))
  1443  		}
  1444  		return nil
  1445  	})
  1446  	if err != nil {
  1447  		t.Fatal(err)
  1448  	}
  1449  
  1450  	// Try downloading the file; should fail.
  1451  	if _, err := renter.DownloadByStream(rf2); err == nil {
  1452  		t.Fatal("downloading file succeeded even though it shouldnt", err)
  1453  	}
  1454  
  1455  	// The uploaded files should have 0x redundancy now.
  1456  	err = build.Retry(200, 100*time.Millisecond, func() error {
  1457  		rf, err := renter.RenterFilesGet()
  1458  		if err != nil {
  1459  			return errors.New("Failed to get files")
  1460  		}
  1461  		if len(rf.Files) != 2 || rf.Files[0].Redundancy != 0 || rf.Files[1].Redundancy != 0 {
  1462  			return errors.New("file redundancy should be 0 now")
  1463  		}
  1464  		return nil
  1465  	})
  1466  	if err != nil {
  1467  		t.Fatal(err)
  1468  	}
  1469  }
  1470  
  1471  // TestRenterContracts tests the formation of the contracts, the contracts
  1472  // endpoint, and canceling a contract
  1473  func TestRenterContracts(t *testing.T) {
  1474  	if testing.Short() {
  1475  		t.SkipNow()
  1476  	}
  1477  	t.Parallel()
  1478  
  1479  	// Create a group for testing
  1480  	groupParams := siatest.GroupParams{
  1481  		Hosts:   2,
  1482  		Renters: 1,
  1483  		Miners:  1,
  1484  	}
  1485  	testDir := siatest.TestDir(t.Name())
  1486  	tg, err := siatest.NewGroupFromTemplate(testDir, groupParams)
  1487  	if err != nil {
  1488  		t.Fatal("Failed to create group:", err)
  1489  	}
  1490  	defer func() {
  1491  		if err := tg.Close(); err != nil {
  1492  			t.Fatal(err)
  1493  		}
  1494  	}()
  1495  
  1496  	// Get Renter
  1497  	r := tg.Renters()[0]
  1498  	rg, err := r.RenterGet()
  1499  	if err != nil {
  1500  		t.Fatal(err)
  1501  	}
  1502  
  1503  	// Record the start period at the beginning of test
  1504  	currentPeriodStart := rg.CurrentPeriod
  1505  	period := rg.Settings.Allowance.Period
  1506  	renewWindow := rg.Settings.Allowance.RenewWindow
  1507  	numRenewals := 0
  1508  
  1509  	// Check if the current period was set in the past
  1510  	cg, err := r.ConsensusGet()
  1511  	if err != nil {
  1512  		t.Fatal(err)
  1513  	}
  1514  	if currentPeriodStart > cg.Height-renewWindow {
  1515  		t.Fatalf(`Current period not set in the past as expected.
  1516  		CP: %v
  1517  		BH: %v
  1518  		RW: %v
  1519  		`, currentPeriodStart, cg.Height, renewWindow)
  1520  	}
  1521  
  1522  	// Confirm Contracts were created as expected.  There should only be active
  1523  	// contracts and no inactive or expired contracts
  1524  	err = build.Retry(200, 100*time.Millisecond, func() error {
  1525  		rc, err := r.RenterInactiveContractsGet()
  1526  		if err != nil {
  1527  			return err
  1528  		}
  1529  		if len(rc.ActiveContracts) != len(tg.Hosts()) {
  1530  			return fmt.Errorf("Expected %v active contracts, got %v", len(tg.Hosts()), len(rc.ActiveContracts))
  1531  		}
  1532  		if len(rc.InactiveContracts) != 0 {
  1533  			return fmt.Errorf("Expected 0 inactive contracts, got %v", len(rc.InactiveContracts))
  1534  		}
  1535  		rcExpired, err := r.RenterExpiredContractsGet()
  1536  		if err != nil {
  1537  			return err
  1538  		}
  1539  		if len(rcExpired.ExpiredContracts) != 0 {
  1540  			return fmt.Errorf("Expected 0 expired contracts, got %v", len(rcExpired.ExpiredContracts))
  1541  		}
  1542  		return nil
  1543  	})
  1544  	if err != nil {
  1545  		t.Fatal(err)
  1546  	}
  1547  
  1548  	rc, err := r.RenterContractsGet()
  1549  	if err != nil {
  1550  		t.Fatal(err)
  1551  	}
  1552  
  1553  	// Confirm contract end heights were set properly
  1554  	for _, c := range rc.ActiveContracts {
  1555  		if c.EndHeight != currentPeriodStart+period+renewWindow {
  1556  			t.Log("Endheight:", c.EndHeight)
  1557  			t.Log("Allowance Period:", period)
  1558  			t.Log("Renew Window:", renewWindow)
  1559  			t.Log("Current Period:", currentPeriodStart)
  1560  			t.Fatal("Contract endheight not set to Current period + Allowance Period + Renew Window")
  1561  		}
  1562  	}
  1563  
  1564  	// Record original Contracts and create Maps for comparison
  1565  	originalContracts := rc.ActiveContracts
  1566  	originalContractIDMap := make(map[types.FileContractID]struct{})
  1567  	for _, c := range originalContracts {
  1568  		originalContractIDMap[c.ID] = struct{}{}
  1569  	}
  1570  
  1571  	// Mine blocks to force contract renewal
  1572  	if err = renewContractsByRenewWindow(r, tg); err != nil {
  1573  		t.Fatal(err)
  1574  	}
  1575  	numRenewals++
  1576  
  1577  	// Confirm Contracts were renewed as expected, all original contracts should
  1578  	// have been renewed if GoodForRenew = true.  There should be the same
  1579  	// number of active and inactive contracts, and 0 expired contracts since we
  1580  	// are still within the endheight of the original contracts, and the
  1581  	// inactive contracts should be the same contracts as the original active
  1582  	// contracts.
  1583  	err = build.Retry(200, 100*time.Millisecond, func() error {
  1584  		rc, err := r.RenterInactiveContractsGet()
  1585  		if err != nil {
  1586  			return err
  1587  		}
  1588  		if len(originalContracts) != len(rc.InactiveContracts) {
  1589  			return fmt.Errorf("Didn't get expected number of inactive contracts, expected %v got %v", len(originalContracts), len(rc.InactiveContracts))
  1590  		}
  1591  		for _, c := range rc.InactiveContracts {
  1592  			if _, ok := originalContractIDMap[c.ID]; !ok {
  1593  				return errors.New("ID from rc not found in originalContracts")
  1594  			}
  1595  		}
  1596  		rcExpired, err := r.RenterExpiredContractsGet()
  1597  		if err != nil {
  1598  			return err
  1599  		}
  1600  		if len(rcExpired.ExpiredContracts) != 0 {
  1601  			return fmt.Errorf("Expected 0 expired contracts, got %v", len(rcExpired.ExpiredContracts))
  1602  		}
  1603  		// checkContracts will confirm correct number of inactive and active contracts
  1604  		if err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil {
  1605  			return err
  1606  		}
  1607  		if err = checkRenewedContracts(rc.ActiveContracts); err != nil {
  1608  			return err
  1609  		}
  1610  		return nil
  1611  	})
  1612  	if err != nil {
  1613  		t.Fatal(err)
  1614  	}
  1615  
  1616  	// Confirm contract end heights were set properly End height should be the
  1617  	// end of the next period as the contracts are renewed due to reaching the
  1618  	// renew window
  1619  	rc, err = r.RenterInactiveContractsGet()
  1620  	if err != nil {
  1621  		t.Fatal(err)
  1622  	}
  1623  	for _, c := range rc.ActiveContracts {
  1624  		if c.EndHeight != currentPeriodStart+(2*period)+renewWindow && c.GoodForRenew {
  1625  			t.Log("Endheight:", c.EndHeight)
  1626  			t.Log("Allowance Period:", period)
  1627  			t.Log("Renew Window:", renewWindow)
  1628  			t.Log("Current Period:", currentPeriodStart)
  1629  			t.Fatal("Contract endheight not set to Current period + 2 * Allowance Period + Renew Window")
  1630  		}
  1631  	}
  1632  
  1633  	// Record inactive contracts
  1634  	inactiveContracts := rc.InactiveContracts
  1635  	inactiveContractIDMap := make(map[types.FileContractID]struct{})
  1636  	for _, c := range inactiveContracts {
  1637  		inactiveContractIDMap[c.ID] = struct{}{}
  1638  	}
  1639  
  1640  	// Mine to force inactive contracts to be expired contracts
  1641  	m := tg.Miners()[0]
  1642  	cg, err = r.ConsensusGet()
  1643  	if err != nil {
  1644  		t.Fatal(err)
  1645  	}
  1646  	for i := 0; i < int(inactiveContracts[0].EndHeight-cg.Height+types.MaturityDelay); i++ {
  1647  		if err = m.MineBlock(); err != nil {
  1648  			t.Fatal(err)
  1649  		}
  1650  	}
  1651  
  1652  	// Waiting for nodes to sync
  1653  	if err = tg.Sync(); err != nil {
  1654  		t.Fatal(err)
  1655  	}
  1656  
  1657  	// Confirm contracts, the expired contracts should now be the same contracts
  1658  	// as the previous inactive contracts.
  1659  	err = build.Retry(200, 100*time.Millisecond, func() error {
  1660  		rc, err = r.RenterExpiredContractsGet()
  1661  		if err != nil {
  1662  			return err
  1663  		}
  1664  		if len(rc.ActiveContracts) != len(tg.Hosts()) {
  1665  			return errors.New("Waiting for active contracts to form")
  1666  		}
  1667  		if len(rc.ExpiredContracts) != len(inactiveContracts) {
  1668  			return fmt.Errorf("Expected the same number of expired and inactive contracts; got %v expired and %v inactive", len(rc.ExpiredContracts), len(inactiveContracts))
  1669  		}
  1670  		for _, c := range inactiveContracts {
  1671  			if _, ok := inactiveContractIDMap[c.ID]; !ok {
  1672  				return errors.New("ID from rc not found in inactiveContracts")
  1673  			}
  1674  		}
  1675  		return nil
  1676  	})
  1677  	if err != nil {
  1678  		t.Fatal(err)
  1679  	}
  1680  
  1681  	// Record current active and expired contracts
  1682  	err = build.Retry(200, 100*time.Millisecond, func() error {
  1683  		rc, err = r.RenterContractsGet()
  1684  		if err != nil {
  1685  			return err
  1686  		}
  1687  		if len(rc.ActiveContracts) != len(tg.Hosts()) {
  1688  			return fmt.Errorf("waiting for active contracts to form")
  1689  		}
  1690  		return nil
  1691  	})
  1692  	if err != nil {
  1693  		t.Fatal(err)
  1694  	}
  1695  	rc, err = r.RenterExpiredContractsGet()
  1696  	if err != nil {
  1697  		t.Fatal(err)
  1698  	}
  1699  	activeContracts := rc.ActiveContracts
  1700  	expiredContracts := rc.ExpiredContracts
  1701  	if err != nil {
  1702  		t.Fatal(err)
  1703  	}
  1704  	expiredContractIDMap := make(map[types.FileContractID]struct{})
  1705  	for _, c := range expiredContracts {
  1706  		expiredContractIDMap[c.ID] = struct{}{}
  1707  	}
  1708  
  1709  	// Capturing end height to compare against renewed contracts
  1710  	endHeight := rc.ActiveContracts[0].EndHeight
  1711  
  1712  	// Renew contracts by running out of funds
  1713  	startingUploadSpend, err := renewContractsBySpending(r, tg)
  1714  	if err != nil {
  1715  		r.PrintDebugInfo(t, true, true, true)
  1716  		t.Fatal(err)
  1717  	}
  1718  	numRenewals++
  1719  
  1720  	// Confirm contracts were renewed as expected.  Active contracts prior to
  1721  	// renewal should now be in the inactive contracts
  1722  	err = build.Retry(200, 100*time.Millisecond, func() error {
  1723  		rc, err = r.RenterInactiveContractsGet()
  1724  		if err != nil {
  1725  			return err
  1726  		}
  1727  		if len(rc.ActiveContracts) != len(tg.Hosts()) {
  1728  			return errors.New("Waiting for active contracts to form")
  1729  		}
  1730  		rcExpired, err := r.RenterExpiredContractsGet()
  1731  		if err != nil {
  1732  			return err
  1733  		}
  1734  
  1735  		// Confirm active and inactive contracts
  1736  		inactiveContractIDMap := make(map[types.FileContractID]struct{})
  1737  		for _, c := range rc.InactiveContracts {
  1738  			inactiveContractIDMap[c.ID] = struct{}{}
  1739  		}
  1740  		for _, c := range activeContracts {
  1741  			if _, ok := inactiveContractIDMap[c.ID]; !ok && c.UploadSpending.Cmp(startingUploadSpend) <= 0 {
  1742  				return errors.New("ID from activeContacts not found in rc")
  1743  			}
  1744  		}
  1745  
  1746  		// Confirm expired contracts
  1747  		if len(expiredContracts) != len(rcExpired.ExpiredContracts) {
  1748  			return fmt.Errorf("Didn't get expected number of expired contracts, expected %v got %v", len(expiredContracts), len(rcExpired.ExpiredContracts))
  1749  		}
  1750  		for _, c := range rcExpired.ExpiredContracts {
  1751  			if _, ok := expiredContractIDMap[c.ID]; !ok {
  1752  				return errors.New("ID from rcExpired not found in expiredContracts")
  1753  			}
  1754  		}
  1755  
  1756  		return nil
  1757  	})
  1758  	if err != nil {
  1759  		t.Fatal(err)
  1760  	}
  1761  
  1762  	// Confirm contract end heights were set properly
  1763  	// End height should not have changed since the renewal
  1764  	// was due to running out of funds
  1765  	rc, err = r.RenterContractsGet()
  1766  	if err != nil {
  1767  		t.Fatal(err)
  1768  	}
  1769  	for _, c := range rc.ActiveContracts {
  1770  		if c.EndHeight != endHeight && c.GoodForRenew && c.UploadSpending.Cmp(startingUploadSpend) <= 0 {
  1771  			t.Log("Allowance Period:", period)
  1772  			t.Log("Current Period:", currentPeriodStart)
  1773  			t.Fatalf("Contract endheight Changed, EH was %v, expected %v\n", c.EndHeight, endHeight)
  1774  		}
  1775  	}
  1776  
  1777  	// Mine blocks to force contract renewal to start with fresh set of contracts
  1778  	if err = renewContractsByRenewWindow(r, tg); err != nil {
  1779  		t.Fatal(err)
  1780  	}
  1781  	numRenewals++
  1782  
  1783  	// Confirm Contracts were renewed as expected
  1784  	err = build.Retry(200, 100*time.Millisecond, func() error {
  1785  		rc, err := r.RenterInactiveContractsGet()
  1786  		if err != nil {
  1787  			return err
  1788  		}
  1789  		rcExpired, err := r.RenterExpiredContractsGet()
  1790  		if err != nil {
  1791  			return err
  1792  		}
  1793  		// checkContracts will confirm correct number of inactive and active contracts
  1794  		if err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil {
  1795  			return err
  1796  		}
  1797  		return nil
  1798  	})
  1799  	if err != nil {
  1800  		t.Fatal(err)
  1801  	}
  1802  
  1803  	// Test canceling contract
  1804  	// Grab contract to cancel
  1805  	rc, err = r.RenterContractsGet()
  1806  	if err != nil {
  1807  		t.Fatal(err)
  1808  	}
  1809  	contract := rc.ActiveContracts[0]
  1810  	// Cancel Contract
  1811  	if err := r.RenterContractCancelPost(contract.ID); err != nil {
  1812  		t.Fatal(err)
  1813  	}
  1814  
  1815  	// Add a new host so new contract can be formed
  1816  	hostParams := node.Host(testDir + "/host")
  1817  	_, err = tg.AddNodes(hostParams)
  1818  	if err != nil {
  1819  		t.Fatal(err)
  1820  	}
  1821  
  1822  	err = build.Retry(200, 100*time.Millisecond, func() error {
  1823  		// Check that Contract is now in inactive contracts and no longer in Active contracts
  1824  		rc, err = r.RenterInactiveContractsGet()
  1825  		if err != nil {
  1826  			return err
  1827  		}
  1828  		// Confirm Renter has the expected number of contracts, meaning canceled contract should have been replaced.
  1829  		if len(rc.ActiveContracts) < len(tg.Hosts())-1 {
  1830  			return fmt.Errorf("Canceled contract was not replaced, only %v active contracts, expected at least %v", len(rc.ActiveContracts), len(tg.Hosts())-1)
  1831  		}
  1832  		for _, c := range rc.ActiveContracts {
  1833  			if c.ID == contract.ID {
  1834  				return errors.New("Contract not cancelled, contract found in Active Contracts")
  1835  			}
  1836  		}
  1837  		i := 1
  1838  		for _, c := range rc.InactiveContracts {
  1839  			if c.ID == contract.ID {
  1840  				break
  1841  			}
  1842  			if i == len(rc.InactiveContracts) {
  1843  				return errors.New("Contract not found in Inactive Contracts")
  1844  			}
  1845  			i++
  1846  		}
  1847  		return nil
  1848  	})
  1849  	if err != nil {
  1850  		t.Fatal(err)
  1851  	}
  1852  }
  1853  
  1854  // TestRenterPersistData checks if the RenterSettings are persisted
  1855  func TestRenterPersistData(t *testing.T) {
  1856  	if testing.Short() {
  1857  		t.SkipNow()
  1858  	}
  1859  	t.Parallel()
  1860  
  1861  	// Get test directory
  1862  	testDir := renterTestDir(t.Name())
  1863  
  1864  	// Copying legacy file to test directory
  1865  	renterDir := filepath.Join(testDir, "renter")
  1866  	destination := filepath.Join(renterDir, "renter.json")
  1867  	err := os.MkdirAll(renterDir, 0700)
  1868  	if err != nil {
  1869  		t.Fatal(err)
  1870  	}
  1871  	from, err := os.Open("../../compatibility/renter_v04.json")
  1872  	if err != nil {
  1873  		t.Fatal(err)
  1874  	}
  1875  	to, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE, 0700)
  1876  	if err != nil {
  1877  		t.Fatal(err)
  1878  	}
  1879  	_, err = io.Copy(to, from)
  1880  	if err != nil {
  1881  		t.Fatal(err)
  1882  	}
  1883  	if err = from.Close(); err != nil {
  1884  		t.Fatal(err)
  1885  	}
  1886  	if err = to.Close(); err != nil {
  1887  		t.Fatal(err)
  1888  	}
  1889  
  1890  	// Create new node from legacy renter.json persistence file
  1891  	r, err := siatest.NewNode(node.AllModules(testDir))
  1892  	if err != nil {
  1893  		t.Fatal(err)
  1894  	}
  1895  	defer func() {
  1896  		if err = r.Close(); err != nil {
  1897  			t.Fatal(err)
  1898  		}
  1899  	}()
  1900  
  1901  	// Set renter allowance to finish renter set up
  1902  	// Currently /renter POST endpoint errors if the allowance
  1903  	// is not previously set or passed in as an argument
  1904  	err = r.RenterPostAllowance(siatest.DefaultAllowance)
  1905  	if err != nil {
  1906  		t.Fatal(err)
  1907  	}
  1908  
  1909  	// Check Settings, should be defaults
  1910  	rg, err := r.RenterGet()
  1911  	if err != nil {
  1912  		t.Fatal(err)
  1913  	}
  1914  	if rg.Settings.StreamCacheSize != renter.DefaultStreamCacheSize {
  1915  		t.Fatalf("StreamCacheSize not set to default of %v, set to %v",
  1916  			renter.DefaultStreamCacheSize, rg.Settings.StreamCacheSize)
  1917  	}
  1918  	if rg.Settings.MaxDownloadSpeed != renter.DefaultMaxDownloadSpeed {
  1919  		t.Fatalf("MaxDownloadSpeed not set to default of %v, set to %v",
  1920  			renter.DefaultMaxDownloadSpeed, rg.Settings.MaxDownloadSpeed)
  1921  	}
  1922  	if rg.Settings.MaxUploadSpeed != renter.DefaultMaxUploadSpeed {
  1923  		t.Fatalf("MaxUploadSpeed not set to default of %v, set to %v",
  1924  			renter.DefaultMaxUploadSpeed, rg.Settings.MaxUploadSpeed)
  1925  	}
  1926  
  1927  	// Set StreamCacheSize, MaxDownloadSpeed, and MaxUploadSpeed to new values
  1928  	cacheSize := uint64(4)
  1929  	ds := int64(20)
  1930  	us := int64(10)
  1931  	if err := r.RenterSetStreamCacheSizePost(cacheSize); err != nil {
  1932  		t.Fatalf("%v: Could not set StreamCacheSize to %v", err, cacheSize)
  1933  	}
  1934  	if err := r.RenterPostRateLimit(ds, us); err != nil {
  1935  		t.Fatalf("%v: Could not set RateLimits to %v and %v", err, ds, us)
  1936  	}
  1937  
  1938  	// Confirm Settings were updated
  1939  	rg, err = r.RenterGet()
  1940  	if err != nil {
  1941  		t.Fatal(err)
  1942  	}
  1943  	if rg.Settings.StreamCacheSize != cacheSize {
  1944  		t.Fatalf("StreamCacheSize not set to %v, set to %v", cacheSize, rg.Settings.StreamCacheSize)
  1945  	}
  1946  	if rg.Settings.MaxDownloadSpeed != ds {
  1947  		t.Fatalf("MaxDownloadSpeed not set to %v, set to %v", ds, rg.Settings.MaxDownloadSpeed)
  1948  	}
  1949  	if rg.Settings.MaxUploadSpeed != us {
  1950  		t.Fatalf("MaxUploadSpeed not set to %v, set to %v", us, rg.Settings.MaxUploadSpeed)
  1951  	}
  1952  
  1953  	// Restart node
  1954  	err = r.RestartNode()
  1955  	if err != nil {
  1956  		t.Fatal("Failed to restart node:", err)
  1957  	}
  1958  
  1959  	// check Settings, settings should be values set through API endpoints
  1960  	rg, err = r.RenterGet()
  1961  	if err != nil {
  1962  		t.Fatal(err)
  1963  	}
  1964  	if rg.Settings.StreamCacheSize != cacheSize {
  1965  		t.Fatalf("StreamCacheSize not persisted as %v, set to %v", cacheSize, rg.Settings.StreamCacheSize)
  1966  	}
  1967  	if rg.Settings.MaxDownloadSpeed != ds {
  1968  		t.Fatalf("MaxDownloadSpeed not persisted as %v, set to %v", ds, rg.Settings.MaxDownloadSpeed)
  1969  	}
  1970  	if rg.Settings.MaxUploadSpeed != us {
  1971  		t.Fatalf("MaxUploadSpeed not persisted as %v, set to %v", us, rg.Settings.MaxUploadSpeed)
  1972  	}
  1973  }
  1974  
  1975  // TestRenterSpendingReporting checks the accuracy for the reported
  1976  // spending
  1977  func TestRenterSpendingReporting(t *testing.T) {
  1978  	if testing.Short() {
  1979  		t.SkipNow()
  1980  	}
  1981  	t.Parallel()
  1982  
  1983  	// Create a testgroup, creating without renter so the renter's
  1984  	// initial balance can be obtained
  1985  	groupParams := siatest.GroupParams{
  1986  		Hosts:  2,
  1987  		Miners: 1,
  1988  	}
  1989  	testDir := renterTestDir(t.Name())
  1990  	tg, err := siatest.NewGroupFromTemplate(testDir, groupParams)
  1991  	if err != nil {
  1992  		t.Fatal("Failed to create group: ", err)
  1993  	}
  1994  	defer func() {
  1995  		if err := tg.Close(); err != nil {
  1996  			t.Fatal(err)
  1997  		}
  1998  	}()
  1999  
  2000  	// Add a Renter node
  2001  	renterParams := node.Renter(filepath.Join(testDir, "renter"))
  2002  	renterParams.SkipSetAllowance = true
  2003  	nodes, err := tg.AddNodes(renterParams)
  2004  	if err != nil {
  2005  		t.Fatal(err)
  2006  	}
  2007  	r := nodes[0]
  2008  
  2009  	// Get largest WindowSize from Hosts
  2010  	var windowSize types.BlockHeight
  2011  	for _, h := range tg.Hosts() {
  2012  		hg, err := h.HostGet()
  2013  		if err != nil {
  2014  			t.Fatal(err)
  2015  		}
  2016  		if hg.ExternalSettings.WindowSize >= windowSize {
  2017  			windowSize = hg.ExternalSettings.WindowSize
  2018  		}
  2019  	}
  2020  
  2021  	// Get renter's initial siacoin balance
  2022  	wg, err := r.WalletGet()
  2023  	if err != nil {
  2024  		t.Fatal("Failed to get wallet:", err)
  2025  	}
  2026  	initialBalance := wg.ConfirmedSiacoinBalance
  2027  
  2028  	// Set allowance
  2029  	if err = tg.SetRenterAllowance(r, siatest.DefaultAllowance); err != nil {
  2030  		t.Fatal("Failed to set renter allowance:", err)
  2031  	}
  2032  	numRenewals := 0
  2033  
  2034  	// Confirm Contracts were created as expected, check that the funds
  2035  	// allocated when setting the allowance are reflected correctly in the
  2036  	// wallet balance
  2037  	err = build.Retry(200, 100*time.Millisecond, func() error {
  2038  		rc, err := r.RenterInactiveContractsGet()
  2039  		if err != nil {
  2040  			return err
  2041  		}
  2042  		rcExpired, err := r.RenterExpiredContractsGet()
  2043  		if err != nil {
  2044  			return err
  2045  		}
  2046  		if err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil {
  2047  			return err
  2048  		}
  2049  		err = checkBalanceVsSpending(r, initialBalance)
  2050  		if err != nil {
  2051  			return err
  2052  		}
  2053  		return nil
  2054  	})
  2055  	if err != nil {
  2056  		t.Fatal(err)
  2057  	}
  2058  
  2059  	// Upload and download files to show spending
  2060  	var remoteFiles []*siatest.RemoteFile
  2061  	for i := 0; i < 10; i++ {
  2062  		dataPieces := uint64(1)
  2063  		parityPieces := uint64(1)
  2064  		fileSize := 100 + siatest.Fuzz()
  2065  		_, rf, err := r.UploadNewFileBlocking(fileSize, dataPieces, parityPieces)
  2066  		if err != nil {
  2067  			t.Fatal("Failed to upload a file for testing: ", err)
  2068  		}
  2069  		remoteFiles = append(remoteFiles, rf)
  2070  	}
  2071  	for _, rf := range remoteFiles {
  2072  		_, err = r.DownloadToDisk(rf, false)
  2073  		if err != nil {
  2074  			t.Fatal("Could not DownloadToDisk:", err)
  2075  		}
  2076  	}
  2077  
  2078  	// Check to confirm upload and download spending was captured correctly
  2079  	// and reflected in the wallet balance
  2080  	err = build.Retry(200, 100*time.Millisecond, func() error {
  2081  		err = checkBalanceVsSpending(r, initialBalance)
  2082  		if err != nil {
  2083  			return err
  2084  		}
  2085  		return nil
  2086  	})
  2087  	if err != nil {
  2088  		t.Fatal(err)
  2089  	}
  2090  
  2091  	// Mine blocks to force contract renewal
  2092  	if err = renewContractsByRenewWindow(r, tg); err != nil {
  2093  		t.Fatal(err)
  2094  	}
  2095  	numRenewals++
  2096  
  2097  	// Confirm Contracts were renewed as expected
  2098  	err = build.Retry(200, 100*time.Millisecond, func() error {
  2099  		rc, err := r.RenterInactiveContractsGet()
  2100  		if err != nil {
  2101  			return err
  2102  		}
  2103  		rcExpired, err := r.RenterExpiredContractsGet()
  2104  		if err != nil {
  2105  			return err
  2106  		}
  2107  		if err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil {
  2108  			return err
  2109  		}
  2110  		if err = checkRenewedContracts(rc.ActiveContracts); err != nil {
  2111  			return err
  2112  		}
  2113  		return nil
  2114  	})
  2115  	if err != nil {
  2116  		t.Fatal(err)
  2117  	}
  2118  
  2119  	// Mine Block to confirm contracts and spending into blockchain
  2120  	m := tg.Miners()[0]
  2121  	if err = m.MineBlock(); err != nil {
  2122  		t.Fatal(err)
  2123  	}
  2124  
  2125  	// Waiting for nodes to sync
  2126  	if err = tg.Sync(); err != nil {
  2127  		t.Fatal(err)
  2128  	}
  2129  
  2130  	// Check contract spending against reported spending
  2131  	rc, err := r.RenterInactiveContractsGet()
  2132  	if err != nil {
  2133  		t.Fatal(err)
  2134  	}
  2135  	rcExpired, err := r.RenterExpiredContractsGet()
  2136  	if err != nil {
  2137  		t.Fatal(err)
  2138  	}
  2139  	if err = checkContractVsReportedSpending(r, windowSize, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil {
  2140  		t.Fatal(err)
  2141  	}
  2142  
  2143  	// Check to confirm reported spending is still accurate with the renewed contracts
  2144  	// and reflected in the wallet balance
  2145  	err = build.Retry(200, 100*time.Millisecond, func() error {
  2146  		err = checkBalanceVsSpending(r, initialBalance)
  2147  		if err != nil {
  2148  			return err
  2149  		}
  2150  		return nil
  2151  	})
  2152  	if err != nil {
  2153  		t.Fatal(err)
  2154  	}
  2155  
  2156  	// Record current Wallet Balance
  2157  	wg, err = r.WalletGet()
  2158  	if err != nil {
  2159  		t.Fatal("Failed to get wallet:", err)
  2160  	}
  2161  	initialPeriodEndBalance := wg.ConfirmedSiacoinBalance
  2162  
  2163  	// Mine blocks to force contract renewal and new period
  2164  	cg, err := r.ConsensusGet()
  2165  	if err != nil {
  2166  		t.Fatal("Failed to get consensus:", err)
  2167  	}
  2168  	blockHeight := cg.Height
  2169  	endHeight := rc.ActiveContracts[0].EndHeight
  2170  	rg, err := r.RenterGet()
  2171  	if err != nil {
  2172  		t.Fatal("Failed to get renter:", err)
  2173  	}
  2174  	rw := rg.Settings.Allowance.RenewWindow
  2175  	for i := 0; i < int(endHeight-rw-blockHeight+types.MaturityDelay); i++ {
  2176  		if err = m.MineBlock(); err != nil {
  2177  			t.Fatal(err)
  2178  		}
  2179  	}
  2180  	numRenewals++
  2181  
  2182  	// Waiting for nodes to sync
  2183  	if err = tg.Sync(); err != nil {
  2184  		t.Fatal(err)
  2185  	}
  2186  
  2187  	// Check if Unspent unallocated funds were released after allowance period
  2188  	// was exceeded
  2189  	wg, err = r.WalletGet()
  2190  	if err != nil {
  2191  		t.Fatal("Failed to get wallet:", err)
  2192  	}
  2193  	if initialPeriodEndBalance.Cmp(wg.ConfirmedSiacoinBalance) > 0 {
  2194  		t.Fatal("Unspent Unallocated funds not released after contract renewal and maturity delay")
  2195  	}
  2196  
  2197  	// Confirm Contracts were renewed as expected
  2198  	err = build.Retry(200, 100*time.Millisecond, func() error {
  2199  		rc, err := r.RenterInactiveContractsGet()
  2200  		if err != nil {
  2201  			return err
  2202  		}
  2203  		rcExpired, err := r.RenterExpiredContractsGet()
  2204  		if err != nil {
  2205  			return err
  2206  		}
  2207  		if err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil {
  2208  			return err
  2209  		}
  2210  		if err = checkRenewedContracts(rc.ActiveContracts); err != nil {
  2211  			return err
  2212  		}
  2213  		return nil
  2214  	})
  2215  	if err != nil {
  2216  		t.Fatal(err)
  2217  	}
  2218  
  2219  	// Mine Block to confirm contracts and spending on blockchain
  2220  	if err = m.MineBlock(); err != nil {
  2221  		t.Fatal(err)
  2222  	}
  2223  
  2224  	// Waiting for nodes to sync
  2225  	if err = tg.Sync(); err != nil {
  2226  		t.Fatal(err)
  2227  	}
  2228  
  2229  	// Check contract spending against reported spending
  2230  	rc, err = r.RenterInactiveContractsGet()
  2231  	if err != nil {
  2232  		t.Fatal(err)
  2233  	}
  2234  	rcExpired, err = r.RenterExpiredContractsGet()
  2235  	if err != nil {
  2236  		t.Fatal(err)
  2237  	}
  2238  	if err = checkContractVsReportedSpending(r, windowSize, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil {
  2239  		t.Fatal(err)
  2240  	}
  2241  
  2242  	// Check to confirm reported spending is still accurate with the renewed contracts
  2243  	// and a new period and reflected in the wallet balance
  2244  	err = build.Retry(200, 100*time.Millisecond, func() error {
  2245  		err = checkBalanceVsSpending(r, initialBalance)
  2246  		if err != nil {
  2247  			return err
  2248  		}
  2249  		return nil
  2250  	})
  2251  	if err != nil {
  2252  		t.Fatal(err)
  2253  	}
  2254  
  2255  	// Renew contracts by running out of funds
  2256  	_, err = renewContractsBySpending(r, tg)
  2257  	if err != nil {
  2258  		r.PrintDebugInfo(t, true, true, true)
  2259  		t.Fatal(err)
  2260  	}
  2261  	numRenewals++
  2262  
  2263  	// Confirm Contracts were renewed as expected
  2264  	err = build.Retry(200, 100*time.Millisecond, func() error {
  2265  		rc, err := r.RenterInactiveContractsGet()
  2266  		if err != nil {
  2267  			return err
  2268  		}
  2269  		rcExpired, err := r.RenterExpiredContractsGet()
  2270  		if err != nil {
  2271  			return err
  2272  		}
  2273  		if err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil {
  2274  			return err
  2275  		}
  2276  		if err = checkRenewedContracts(rc.ActiveContracts); err != nil {
  2277  			return err
  2278  		}
  2279  		return nil
  2280  	})
  2281  	if err != nil {
  2282  		t.Fatal(err)
  2283  	}
  2284  
  2285  	// Mine Block to confirm contracts and spending on blockchain
  2286  	if err = m.MineBlock(); err != nil {
  2287  		t.Fatal(err)
  2288  	}
  2289  
  2290  	// Waiting for nodes to sync
  2291  	if err = tg.Sync(); err != nil {
  2292  		t.Fatal(err)
  2293  	}
  2294  
  2295  	// Check contract spending against reported spending
  2296  	rc, err = r.RenterInactiveContractsGet()
  2297  	if err != nil {
  2298  		t.Fatal(err)
  2299  	}
  2300  	rcExpired, err = r.RenterExpiredContractsGet()
  2301  	if err != nil {
  2302  		t.Fatal(err)
  2303  	}
  2304  	if err = checkContractVsReportedSpending(r, windowSize, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil {
  2305  		t.Fatal(err)
  2306  	}
  2307  
  2308  	// Check to confirm reported spending is still accurate with the renewed contracts
  2309  	// and a new period and reflected in the wallet balance
  2310  	err = build.Retry(200, 100*time.Millisecond, func() error {
  2311  		err = checkBalanceVsSpending(r, initialBalance)
  2312  		if err != nil {
  2313  			return err
  2314  		}
  2315  		return nil
  2316  	})
  2317  	if err != nil {
  2318  		t.Fatal(err)
  2319  	}
  2320  
  2321  	// Mine blocks to force contract renewal
  2322  	if err = renewContractsByRenewWindow(r, tg); err != nil {
  2323  		t.Fatal(err)
  2324  	}
  2325  	numRenewals++
  2326  
  2327  	// Confirm Contracts were renewed as expected
  2328  	err = build.Retry(200, 100*time.Millisecond, func() error {
  2329  		rc, err := r.RenterInactiveContractsGet()
  2330  		if err != nil {
  2331  			return err
  2332  		}
  2333  		rcExpired, err := r.RenterExpiredContractsGet()
  2334  		if err != nil {
  2335  			return err
  2336  		}
  2337  		if err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil {
  2338  			return err
  2339  		}
  2340  		if err = checkRenewedContracts(rc.ActiveContracts); err != nil {
  2341  			return err
  2342  		}
  2343  		return nil
  2344  	})
  2345  	if err != nil {
  2346  		t.Fatal(err)
  2347  	}
  2348  
  2349  	// Mine Block to confirm contracts and spending into blockchain
  2350  	if err = m.MineBlock(); err != nil {
  2351  		t.Fatal(err)
  2352  	}
  2353  
  2354  	// Waiting for nodes to sync
  2355  	if err = tg.Sync(); err != nil {
  2356  		t.Fatal(err)
  2357  	}
  2358  
  2359  	// Check contract spending against reported spending
  2360  	rc, err = r.RenterInactiveContractsGet()
  2361  	if err != nil {
  2362  		t.Fatal(err)
  2363  	}
  2364  	rcExpired, err = r.RenterExpiredContractsGet()
  2365  	if err != nil {
  2366  		t.Fatal(err)
  2367  	}
  2368  	if err = checkContractVsReportedSpending(r, windowSize, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil {
  2369  		t.Fatal(err)
  2370  	}
  2371  
  2372  	// Check to confirm reported spending is still accurate with the renewed contracts
  2373  	// and reflected in the wallet balance
  2374  	err = build.Retry(200, 100*time.Millisecond, func() error {
  2375  		err = checkBalanceVsSpending(r, initialBalance)
  2376  		if err != nil {
  2377  			return err
  2378  		}
  2379  		return nil
  2380  	})
  2381  	if err != nil {
  2382  		t.Fatal(err)
  2383  	}
  2384  }
  2385  
  2386  // TestZeroByteFile tests uploading and downloading a 0 and 1 byte file
  2387  func TestZeroByteFile(t *testing.T) {
  2388  	if testing.Short() {
  2389  		t.SkipNow()
  2390  	}
  2391  	t.Parallel()
  2392  
  2393  	// Create a testgroup, creating without renter so the renter's
  2394  	// initial balance can be obtained
  2395  	groupParams := siatest.GroupParams{
  2396  		Hosts:   2,
  2397  		Miners:  1,
  2398  		Renters: 1,
  2399  	}
  2400  	testDir := renterTestDir(t.Name())
  2401  	tg, err := siatest.NewGroupFromTemplate(testDir, groupParams)
  2402  	if err != nil {
  2403  		t.Fatal("Failed to create group: ", err)
  2404  	}
  2405  	defer func() {
  2406  		if err := tg.Close(); err != nil {
  2407  			t.Fatal(err)
  2408  		}
  2409  	}()
  2410  
  2411  	// Grab renter
  2412  	r := tg.Renters()[0]
  2413  
  2414  	// Create 0 and 1 byte file
  2415  	zeroByteFile := 0
  2416  	oneByteFile := 1
  2417  
  2418  	// Test uploading 0 byte file
  2419  	dataPieces := uint64(1)
  2420  	parityPieces := uint64(len(tg.Hosts())) - dataPieces
  2421  	redundancy := float64((dataPieces + parityPieces) / dataPieces)
  2422  	_, zeroRF, err := r.UploadNewFile(zeroByteFile, dataPieces, parityPieces)
  2423  	if err != nil {
  2424  		t.Fatal(err)
  2425  	}
  2426  	// Get renter files, should only be 0 byte file
  2427  	rf, err := r.RenterFilesGet()
  2428  	if err != nil {
  2429  		t.Fatal(err)
  2430  	}
  2431  	if len(rf.Files) != 1 {
  2432  		t.Fatalf("Expected 1 file, got %v", len(rf.Files))
  2433  	}
  2434  	// Check redundancy and upload progress
  2435  	if rf.Files[0].Redundancy != redundancy {
  2436  		t.Fatalf("Expected redundancy to be %v, got %v", redundancy, rf.Files[0].Redundancy)
  2437  	}
  2438  	if rf.Files[0].UploadProgress != 100 {
  2439  		t.Fatalf("Expected upload progress to be 100, got %v", rf.Files[0].UploadProgress)
  2440  	}
  2441  
  2442  	// Test uploading 1 byte file
  2443  	_, oneRF, err := r.UploadNewFileBlocking(oneByteFile, dataPieces, parityPieces)
  2444  	if err != nil {
  2445  		t.Fatal(err)
  2446  	}
  2447  
  2448  	// Test downloading 0 byte file
  2449  	_, err = r.DownloadToDisk(zeroRF, false)
  2450  	if err != nil {
  2451  		t.Fatal(err)
  2452  	}
  2453  
  2454  	// Test downloading 1 byte file
  2455  	_, err = r.DownloadToDisk(oneRF, false)
  2456  	if err != nil {
  2457  		t.Fatal(err)
  2458  	}
  2459  }
  2460  
  2461  // The following are helper functions for the renter tests
  2462  
  2463  // checkBalanceVsSpending checks the renters confirmed siacoin balance in their
  2464  // wallet against their reported spending
  2465  func checkBalanceVsSpending(r *siatest.TestNode, initialBalance types.Currency) error {
  2466  	// Getting initial financial metrics
  2467  	// Setting variables to easier reference
  2468  	rg, err := r.RenterGet()
  2469  	if err != nil {
  2470  		return err
  2471  	}
  2472  	fm := rg.FinancialMetrics
  2473  
  2474  	// Check balance after allowance is set
  2475  	wg, err := r.WalletGet()
  2476  	if err != nil {
  2477  		return err
  2478  	}
  2479  	expectedBalance := initialBalance.Sub(fm.TotalAllocated).Sub(fm.WithheldFunds).Sub(fm.PreviousSpending)
  2480  	if expectedBalance.Cmp(wg.ConfirmedSiacoinBalance) != 0 {
  2481  		details := fmt.Sprintf(`Initial balance minus Renter Reported Spending does not equal wallet Confirmed Siacoin Balance
  2482  		Expected Balance:   %v
  2483  		Wallet Balance:     %v
  2484  		Actual difference:  %v
  2485  		ExpectedBalance:    %v
  2486  		walletBalance:      %v
  2487  		`, expectedBalance.HumanString(), wg.ConfirmedSiacoinBalance.HumanString(), initialBalance.Sub(wg.ConfirmedSiacoinBalance).HumanString(),
  2488  			expectedBalance.HumanString(), wg.ConfirmedSiacoinBalance.HumanString())
  2489  		var diff string
  2490  		if expectedBalance.Cmp(wg.ConfirmedSiacoinBalance) > 0 {
  2491  			diff = fmt.Sprintf("Under reported by:  %v\n", expectedBalance.Sub(wg.ConfirmedSiacoinBalance).HumanString())
  2492  		} else {
  2493  			diff = fmt.Sprintf("Over reported by:   %v\n", wg.ConfirmedSiacoinBalance.Sub(expectedBalance).HumanString())
  2494  		}
  2495  		err := details + diff
  2496  		return errors.New(err)
  2497  	}
  2498  	return nil
  2499  }
  2500  
  2501  // checkContracts confirms that contracts are renewed as expected, renewed
  2502  // contracts should be the renter's active contracts and oldContracts should be
  2503  // the renter's inactive and expired contracts
  2504  func checkContracts(numHosts, numRenewals int, oldContracts, renewedContracts []api.RenterContract) error {
  2505  	if len(renewedContracts) != numHosts {
  2506  		return fmt.Errorf("Incorrect number of Active contracts: have %v expected %v", len(renewedContracts), numHosts)
  2507  	}
  2508  	if len(oldContracts) == 0 && numRenewals == 0 {
  2509  		return nil
  2510  	}
  2511  	// Confirm contracts were renewed, this will also mean there are old contracts
  2512  	// Verify there are not more renewedContracts than there are oldContracts
  2513  	// This would mean contracts are not getting archived
  2514  	if len(oldContracts) < len(renewedContracts) {
  2515  		return errors.New("Too many renewed contracts")
  2516  	}
  2517  	if len(oldContracts) != numHosts*numRenewals {
  2518  		return fmt.Errorf("Incorrect number of Old contracts: have %v expected %v", len(oldContracts), numHosts*numRenewals)
  2519  	}
  2520  
  2521  	// Create Maps for comparison
  2522  	initialContractIDMap := make(map[types.FileContractID]struct{})
  2523  	initialContractKeyMap := make(map[crypto.Hash]struct{})
  2524  	for _, c := range oldContracts {
  2525  		initialContractIDMap[c.ID] = struct{}{}
  2526  		initialContractKeyMap[crypto.HashBytes(c.HostPublicKey.Key)] = struct{}{}
  2527  	}
  2528  
  2529  	for _, c := range renewedContracts {
  2530  		// Verify that all the contracts marked as GoodForRenew
  2531  		// were renewed
  2532  		if _, ok := initialContractIDMap[c.ID]; ok {
  2533  			return errors.New("ID from renewedContracts found in oldContracts")
  2534  		}
  2535  		// Verifying that Renewed Contracts have the same HostPublicKey
  2536  		// as an initial contract
  2537  		if _, ok := initialContractKeyMap[crypto.HashBytes(c.HostPublicKey.Key)]; !ok {
  2538  			return errors.New("Host Public Key from renewedContracts not found in oldContracts")
  2539  		}
  2540  	}
  2541  	return nil
  2542  }
  2543  
  2544  // checkContractVsReportedSpending confirms that the spending recorded in the
  2545  // renter's contracts matches the reported spending for the renter. Renewed
  2546  // contracts should be the renter's active contracts and oldContracts should be
  2547  // the renter's inactive and expired contracts
  2548  func checkContractVsReportedSpending(r *siatest.TestNode, WindowSize types.BlockHeight, oldContracts, renewedContracts []api.RenterContract) error {
  2549  	// Get Current BlockHeight
  2550  	cg, err := r.ConsensusGet()
  2551  	if err != nil {
  2552  		return err
  2553  	}
  2554  
  2555  	// Getting financial metrics after uploads, downloads, and
  2556  	// contract renewal
  2557  	rg, err := r.RenterGet()
  2558  	if err != nil {
  2559  		return err
  2560  	}
  2561  
  2562  	fm := rg.FinancialMetrics
  2563  	totalSpent := fm.ContractFees.Add(fm.UploadSpending).
  2564  		Add(fm.DownloadSpending).Add(fm.StorageSpending)
  2565  	total := totalSpent.Add(fm.Unspent)
  2566  	allowance := rg.Settings.Allowance
  2567  
  2568  	// Check that renter financial metrics add up to allowance
  2569  	if total.Cmp(allowance.Funds) != 0 {
  2570  		return fmt.Errorf(`Combined Total of reported spending and unspent funds not equal to allowance:
  2571  			total:     %v
  2572  			allowance: %v
  2573  			`, total.HumanString(), allowance.Funds.HumanString())
  2574  	}
  2575  
  2576  	// Check renter financial metrics against contract spending
  2577  	var spending modules.ContractorSpending
  2578  	for _, contract := range oldContracts {
  2579  		if contract.StartHeight >= rg.CurrentPeriod {
  2580  			// Calculate ContractFees
  2581  			spending.ContractFees = spending.ContractFees.Add(contract.Fees)
  2582  			// Calculate TotalAllocated
  2583  			spending.TotalAllocated = spending.TotalAllocated.Add(contract.TotalCost)
  2584  			// Calculate Spending
  2585  			spending.DownloadSpending = spending.DownloadSpending.Add(contract.DownloadSpending)
  2586  			spending.UploadSpending = spending.UploadSpending.Add(contract.UploadSpending)
  2587  			spending.StorageSpending = spending.StorageSpending.Add(contract.StorageSpending)
  2588  		} else if contract.EndHeight+WindowSize+types.MaturityDelay > cg.Height {
  2589  			// Calculated funds that are being withheld in contracts
  2590  			spending.WithheldFunds = spending.WithheldFunds.Add(contract.RenterFunds)
  2591  			// Record the largest window size for worst case when reporting the spending
  2592  			if contract.EndHeight+WindowSize+types.MaturityDelay >= spending.ReleaseBlock {
  2593  				spending.ReleaseBlock = contract.EndHeight + WindowSize + types.MaturityDelay
  2594  			}
  2595  			// Calculate Previous spending
  2596  			spending.PreviousSpending = spending.PreviousSpending.Add(contract.Fees).
  2597  				Add(contract.DownloadSpending).Add(contract.UploadSpending).Add(contract.StorageSpending)
  2598  		} else {
  2599  			// Calculate Previous spending
  2600  			spending.PreviousSpending = spending.PreviousSpending.Add(contract.Fees).
  2601  				Add(contract.DownloadSpending).Add(contract.UploadSpending).Add(contract.StorageSpending)
  2602  		}
  2603  	}
  2604  	for _, contract := range renewedContracts {
  2605  		if contract.GoodForRenew {
  2606  			// Calculate ContractFees
  2607  			spending.ContractFees = spending.ContractFees.Add(contract.Fees)
  2608  			// Calculate TotalAllocated
  2609  			spending.TotalAllocated = spending.TotalAllocated.Add(contract.TotalCost)
  2610  			// Calculate Spending
  2611  			spending.DownloadSpending = spending.DownloadSpending.Add(contract.DownloadSpending)
  2612  			spending.UploadSpending = spending.UploadSpending.Add(contract.UploadSpending)
  2613  			spending.StorageSpending = spending.StorageSpending.Add(contract.StorageSpending)
  2614  		}
  2615  	}
  2616  
  2617  	// Compare contract fees
  2618  	if fm.ContractFees.Cmp(spending.ContractFees) != 0 {
  2619  		return fmt.Errorf(`Fees not equal:
  2620  			Financial Metrics Fees: %v
  2621  			Contract Fees:          %v
  2622  			`, fm.ContractFees.HumanString(), spending.ContractFees.HumanString())
  2623  	}
  2624  	// Compare Total Allocated
  2625  	if fm.TotalAllocated.Cmp(spending.TotalAllocated) != 0 {
  2626  		return fmt.Errorf(`Total Allocated not equal:
  2627  			Financial Metrics TA: %v
  2628  			Contract TA:          %v
  2629  			`, fm.TotalAllocated.HumanString(), spending.TotalAllocated.HumanString())
  2630  	}
  2631  	// Compare Upload Spending
  2632  	if fm.UploadSpending.Cmp(spending.UploadSpending) != 0 {
  2633  		return fmt.Errorf(`Upload spending not equal:
  2634  			Financial Metrics US: %v
  2635  			Contract US:          %v
  2636  			`, fm.UploadSpending.HumanString(), spending.UploadSpending.HumanString())
  2637  	}
  2638  	// Compare Download Spending
  2639  	if fm.DownloadSpending.Cmp(spending.DownloadSpending) != 0 {
  2640  		return fmt.Errorf(`Download spending not equal:
  2641  			Financial Metrics DS: %v
  2642  			Contract DS:          %v
  2643  			`, fm.DownloadSpending.HumanString(), spending.DownloadSpending.HumanString())
  2644  	}
  2645  	// Compare Storage Spending
  2646  	if fm.StorageSpending.Cmp(spending.StorageSpending) != 0 {
  2647  		return fmt.Errorf(`Storage spending not equal:
  2648  			Financial Metrics SS: %v
  2649  			Contract SS:          %v
  2650  			`, fm.StorageSpending.HumanString(), spending.StorageSpending.HumanString())
  2651  	}
  2652  	// Compare Withheld Funds
  2653  	if fm.WithheldFunds.Cmp(spending.WithheldFunds) != 0 {
  2654  		return fmt.Errorf(`Withheld Funds not equal:
  2655  			Financial Metrics WF: %v
  2656  			Contract WF:          %v
  2657  			`, fm.WithheldFunds.HumanString(), spending.WithheldFunds.HumanString())
  2658  	}
  2659  	// Compare Release Block
  2660  	if fm.ReleaseBlock != spending.ReleaseBlock {
  2661  		return fmt.Errorf(`Release Block not equal:
  2662  			Financial Metrics RB: %v
  2663  			Contract RB:          %v
  2664  			`, fm.ReleaseBlock, spending.ReleaseBlock)
  2665  	}
  2666  	// Compare Previous Spending
  2667  	if fm.PreviousSpending.Cmp(spending.PreviousSpending) != 0 {
  2668  		return fmt.Errorf(`Previous spending not equal:
  2669  			Financial Metrics PS: %v
  2670  			Contract PS:          %v
  2671  			`, fm.PreviousSpending.HumanString(), spending.PreviousSpending.HumanString())
  2672  	}
  2673  
  2674  	return nil
  2675  }
  2676  
  2677  // checkRenewedContracts confirms that renewed contracts have zero upload and
  2678  // download spending. Renewed contracts should be the renter's active contracts
  2679  func checkRenewedContracts(renewedContracts []api.RenterContract) error {
  2680  	for _, c := range renewedContracts {
  2681  		if c.UploadSpending.Cmp(types.ZeroCurrency) != 0 && c.GoodForUpload {
  2682  			return fmt.Errorf("Upload spending on renewed contract equal to %v, expected zero", c.UploadSpending.HumanString())
  2683  		}
  2684  		if c.DownloadSpending.Cmp(types.ZeroCurrency) != 0 {
  2685  			return fmt.Errorf("Download spending on renewed contract equal to %v, expected zero", c.DownloadSpending.HumanString())
  2686  		}
  2687  	}
  2688  	return nil
  2689  }
  2690  
  2691  // renewContractByRenewWindow mines blocks to force contract renewal
  2692  func renewContractsByRenewWindow(renter *siatest.TestNode, tg *siatest.TestGroup) error {
  2693  	rg, err := renter.RenterGet()
  2694  	if err != nil {
  2695  		return err
  2696  	}
  2697  	cg, err := renter.ConsensusGet()
  2698  	if err != nil {
  2699  		return err
  2700  	}
  2701  	rc, err := renter.RenterContractsGet()
  2702  	if err != nil {
  2703  		return err
  2704  	}
  2705  	blocksToMine := rc.ActiveContracts[0].EndHeight - rg.Settings.Allowance.RenewWindow - cg.Height
  2706  	m := tg.Miners()[0]
  2707  	for i := 0; i < int(blocksToMine); i++ {
  2708  		if err = m.MineBlock(); err != nil {
  2709  			return err
  2710  		}
  2711  	}
  2712  
  2713  	// Waiting for nodes to sync
  2714  	if err = tg.Sync(); err != nil {
  2715  		return err
  2716  	}
  2717  	return nil
  2718  }
  2719  
  2720  // renewContractsBySpending uploads files until the contracts renew due to
  2721  // running out of funds
  2722  func renewContractsBySpending(renter *siatest.TestNode, tg *siatest.TestGroup) (startingUploadSpend types.Currency, err error) {
  2723  	// Renew contracts by running out of funds
  2724  	// Set upload price to max price
  2725  	maxStoragePrice := types.SiacoinPrecision.Mul64(3e6).Div(modules.BlockBytesPerMonthTerabyte)
  2726  	maxUploadPrice := maxStoragePrice.Mul64(30 * 4320)
  2727  	hosts := tg.Hosts()
  2728  	for _, h := range hosts {
  2729  		err := h.HostModifySettingPost(client.HostParamMinUploadBandwidthPrice, maxUploadPrice)
  2730  		if err != nil {
  2731  			return types.ZeroCurrency, errors.AddContext(err, "could not set Host Upload Price")
  2732  		}
  2733  	}
  2734  
  2735  	// Waiting for nodes to sync
  2736  	m := tg.Miners()[0]
  2737  	if err := m.MineBlock(); err != nil {
  2738  		return types.ZeroCurrency, errors.AddContext(err, "error mining block")
  2739  	}
  2740  	if err := tg.Sync(); err != nil {
  2741  		return types.ZeroCurrency, err
  2742  	}
  2743  
  2744  	// Set upload parameters
  2745  	dataPieces := uint64(1)
  2746  	parityPieces := uint64(1)
  2747  	chunkSize := siatest.ChunkSize(1)
  2748  
  2749  	// Upload once to show upload spending
  2750  	_, _, err = renter.UploadNewFileBlocking(int(chunkSize), dataPieces, parityPieces)
  2751  	if err != nil {
  2752  		return types.ZeroCurrency, errors.AddContext(err, "failed to upload first file in renewContractsBySpending")
  2753  	}
  2754  
  2755  	// Get current upload spend, previously contracts had zero upload spend
  2756  	rc, err := renter.RenterContractsGet()
  2757  	if err != nil {
  2758  		return types.ZeroCurrency, errors.AddContext(err, "could not get renter active contracts")
  2759  	}
  2760  	startingUploadSpend = rc.ActiveContracts[0].UploadSpending
  2761  
  2762  	// Upload files to force contract renewal due to running out of funds
  2763  LOOP:
  2764  	for {
  2765  		// To protect against contracts not renewing during uploads
  2766  		for _, c := range rc.ActiveContracts {
  2767  			percentRemaining, _ := big.NewRat(0, 1).SetFrac(c.RenterFunds.Big(), c.TotalCost.Big()).Float64()
  2768  			if percentRemaining < float64(0.03) {
  2769  				break LOOP
  2770  			}
  2771  		}
  2772  		_, _, err = renter.UploadNewFileBlocking(int(chunkSize), dataPieces, parityPieces)
  2773  		if err != nil {
  2774  			pr, _ := big.NewRat(0, 1).SetFrac(rc.ActiveContracts[0].RenterFunds.Big(), rc.ActiveContracts[0].TotalCost.Big()).Float64()
  2775  			s := fmt.Sprintf("failed to upload file in renewContractsBySpending loop, percentRemaining: %v", pr)
  2776  			return types.ZeroCurrency, errors.AddContext(err, s)
  2777  		}
  2778  
  2779  		rc, err = renter.RenterContractsGet()
  2780  		if err != nil {
  2781  			return types.ZeroCurrency, errors.AddContext(err, "could not get renter active contracts")
  2782  		}
  2783  	}
  2784  	if err = m.MineBlock(); err != nil {
  2785  		return startingUploadSpend, err
  2786  	}
  2787  	if err := tg.Sync(); err != nil {
  2788  		return types.ZeroCurrency, err
  2789  	}
  2790  	return startingUploadSpend, nil
  2791  }
  2792  
  2793  // testSetFileTrackingPath tests if changing the repairPath of a file works.
  2794  func testSetFileTrackingPath(t *testing.T, tg *siatest.TestGroup) {
  2795  	// Grab the first of the group's renters
  2796  	renter := tg.Renters()[0]
  2797  	// Check that we have enough hosts for this test.
  2798  	if len(tg.Hosts()) < 2 {
  2799  		t.Fatal("This test requires at least 2 hosts")
  2800  	}
  2801  	// Set fileSize and redundancy for upload
  2802  	fileSize := int(modules.SectorSize)
  2803  	dataPieces := uint64(1)
  2804  	parityPieces := uint64(len(tg.Hosts())) - dataPieces
  2805  
  2806  	// Upload file
  2807  	localFile, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces)
  2808  	if err != nil {
  2809  		t.Fatal(err)
  2810  	}
  2811  	// Move the file to a new location.
  2812  	if err := localFile.Move(); err != nil {
  2813  		t.Fatal(err)
  2814  	}
  2815  	// Take down all the hosts.
  2816  	numHosts := len(tg.Hosts())
  2817  	for _, host := range tg.Hosts() {
  2818  		if err := tg.RemoveNode(host); err != nil {
  2819  			t.Fatal("Failed to shutdown host", err)
  2820  		}
  2821  	}
  2822  	// File should have 0 redunancy now.
  2823  	if err := renter.WaitForDecreasingRedundancy(remoteFile, 0); err != nil {
  2824  		t.Fatal("Redundancy isn't decreasing", err)
  2825  	}
  2826  	// Rename the repairPath to match the new location.
  2827  	if err := renter.SetFileRepairPath(remoteFile, localFile); err != nil {
  2828  		t.Fatal("Failed to change the repair path", err)
  2829  	}
  2830  	// Create new hosts.
  2831  	_, err = tg.AddNodeN(node.HostTemplate, numHosts)
  2832  	if err != nil {
  2833  		t.Fatal("Failed to create a new host", err)
  2834  	}
  2835  	// We should reach full redundancy again.
  2836  	expectedRedundancy := float64((dataPieces + parityPieces)) / float64(dataPieces)
  2837  	if err := renter.WaitForUploadRedundancy(remoteFile, expectedRedundancy); err != nil {
  2838  		t.Logf("numHosts: %v", len(tg.Hosts()))
  2839  		t.Fatal("File wasn't repaired", err)
  2840  	}
  2841  	// We should be able to download
  2842  	if _, err := renter.DownloadByStream(remoteFile); err != nil {
  2843  		t.Fatal("Failed to download file", err)
  2844  	}
  2845  	// Create a new file that is smaller then the first one.
  2846  	smallFile, err := renter.NewFile(fileSize / 2)
  2847  	if err != nil {
  2848  		t.Fatal(err)
  2849  	}
  2850  	// Try to change the repairPath of the remote file again. This shouldn't
  2851  	// work.
  2852  	if err := renter.SetFileRepairPath(remoteFile, smallFile); err == nil {
  2853  		t.Fatal("Changing repair path to file of different size shouldn't work")
  2854  	}
  2855  	// Delete the small file and try again. This also shouldn't work.
  2856  	if err := smallFile.Delete(); err != nil {
  2857  		t.Fatal(err)
  2858  	}
  2859  	if err := renter.SetFileRepairPath(remoteFile, smallFile); err == nil {
  2860  		t.Fatal("Changing repair path to a nonexistent file shouldn't work")
  2861  	}
  2862  }