gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/renter/siafile/siafile_test.go (about)

     1  package siafile
     2  
     3  import (
     4  	"encoding/hex"
     5  	"fmt"
     6  	"os"
     7  	"path/filepath"
     8  	"reflect"
     9  	"sync"
    10  	"sync/atomic"
    11  	"testing"
    12  	"time"
    13  
    14  	"gitlab.com/NebulousLabs/errors"
    15  	"gitlab.com/NebulousLabs/fastrand"
    16  
    17  	"gitlab.com/SiaPrime/SiaPrime/crypto"
    18  	"gitlab.com/SiaPrime/SiaPrime/modules"
    19  	"gitlab.com/SiaPrime/SiaPrime/types"
    20  )
    21  
    22  // dummyEntry wraps a SiaFile into a siaFileSetEntry.
    23  func dummyEntry(s *SiaFile) *siaFileSetEntry {
    24  	return &siaFileSetEntry{
    25  		SiaFile: s,
    26  		staticSiaFileSet: &SiaFileSet{
    27  			staticSiaFileDir: filepath.Dir(s.SiaFilePath()),
    28  			siaFileMap:       make(map[SiafileUID]*siaFileSetEntry),
    29  			siapathToUID:     make(map[modules.SiaPath]SiafileUID),
    30  			wal:              nil,
    31  		},
    32  		threadMap: make(map[uint64]threadInfo),
    33  	}
    34  }
    35  
    36  // randomChunk is a helper method for testing that creates a random chunk.
    37  func randomChunk() chunk {
    38  	numPieces := 30
    39  	chunk := chunk{}
    40  	chunk.Pieces = make([][]piece, numPieces)
    41  	fastrand.Read(chunk.ExtensionInfo[:])
    42  
    43  	// Add 0-3 pieces for each pieceIndex within the file.
    44  	for pieceIndex := range chunk.Pieces {
    45  		n := fastrand.Intn(4) // [0;3]
    46  		// Create and add n pieces at pieceIndex i.
    47  		for i := 0; i < n; i++ {
    48  			var piece piece
    49  			piece.HostTableOffset = uint32(fastrand.Intn(100))
    50  			fastrand.Read(piece.MerkleRoot[:])
    51  			chunk.Pieces[pieceIndex] = append(chunk.Pieces[pieceIndex], piece)
    52  		}
    53  	}
    54  	return chunk
    55  }
    56  
    57  // randomPiece is a helper method for testing that creates a random piece.
    58  func randomPiece() piece {
    59  	var piece piece
    60  	piece.HostTableOffset = uint32(fastrand.Intn(100))
    61  	fastrand.Read(piece.MerkleRoot[:])
    62  	return piece
    63  }
    64  
    65  // setCombinedChunkOfTestFile adds one or two Combined chunks to a SiaFile for
    66  // tests to be able to use a SiaFile that already has its partial chunk
    67  // contained within a combined chunk. If the SiaFile doesn't have a partial
    68  // chunk, this is a no-op. The combined chunk will be stored in the provided
    69  // 'dir'.
    70  func setCombinedChunkOfTestFile(sf *SiaFile) error {
    71  	return setCustomCombinedChunkOfTestFile(sf, fastrand.Intn(2)+1)
    72  }
    73  
    74  // setCustomCombinedChunkOfTestFile sets either 1 or 2 combined chunks of a
    75  // SiaFile for testing and changes its status to completed.
    76  func setCustomCombinedChunkOfTestFile(sf *SiaFile, numCombinedChunks int) error {
    77  	if numCombinedChunks != 1 && numCombinedChunks != 2 {
    78  		return errors.New("numCombinedChunks should be 1 or 2")
    79  	}
    80  	partialChunkSize := sf.Size() % sf.ChunkSize()
    81  	if partialChunkSize == 0 {
    82  		// no partial chunk
    83  		return nil
    84  	}
    85  	var partialChunks []modules.PartialChunk
    86  	for i := 0; i < numCombinedChunks; i++ {
    87  		partialChunks = append(partialChunks, modules.PartialChunk{
    88  			ChunkID:        modules.CombinedChunkID(hex.EncodeToString(fastrand.Bytes(16))),
    89  			InPartialsFile: false,
    90  		})
    91  	}
    92  	var err error
    93  	if numCombinedChunks == 1 {
    94  		partialChunks[0].Offset = 0
    95  		partialChunks[0].Length = partialChunkSize
    96  		err = sf.SetPartialChunks(partialChunks, nil)
    97  	} else if numCombinedChunks == 2 {
    98  		partialChunks[0].Offset = sf.ChunkSize() - 1
    99  		partialChunks[0].Length = 1
   100  		partialChunks[1].Offset = 0
   101  		partialChunks[1].Length = partialChunkSize - 1
   102  		err = sf.SetPartialChunks(partialChunks, nil)
   103  	}
   104  	if err != nil {
   105  		return err
   106  	}
   107  	// Force the status to completed.
   108  	for i := 0; i < numCombinedChunks; i++ {
   109  		err = errors.Compose(err, sf.SetChunkStatusCompleted(uint64(i)))
   110  	}
   111  	return err
   112  }
   113  
   114  // TestFileNumChunks checks the numChunks method of the file type.
   115  func TestFileNumChunks(t *testing.T) {
   116  	fileSize := func(numSectors uint64) uint64 {
   117  		return numSectors*modules.SectorSize + uint64(fastrand.Intn(int(modules.SectorSize)))
   118  	}
   119  	// Since the pieceSize is 'random' now we test a variety of random inputs.
   120  	tests := []struct {
   121  		fileSize   uint64
   122  		dataPieces int
   123  	}{
   124  		{fileSize(10), 10},
   125  		{fileSize(50), 10},
   126  		{fileSize(100), 10},
   127  
   128  		{fileSize(11), 10},
   129  		{fileSize(51), 10},
   130  		{fileSize(101), 10},
   131  
   132  		{fileSize(10), 100},
   133  		{fileSize(50), 100},
   134  		{fileSize(100), 100},
   135  
   136  		{fileSize(11), 100},
   137  		{fileSize(51), 100},
   138  		{fileSize(101), 100},
   139  
   140  		{0, 10}, // 0-length
   141  	}
   142  
   143  	for _, test := range tests {
   144  		// Create erasure-coder
   145  		rsc, _ := NewRSCode(test.dataPieces, 1) // can't use 0
   146  		// Create the file
   147  		siaFilePath, _, source, _, sk, _, _, fileMode := newTestFileParams(1, true)
   148  		f, _, _ := customTestFileAndWAL(siaFilePath, source, rsc, sk, test.fileSize, -1, fileMode)
   149  		// Make sure the file reports the correct pieceSize.
   150  		if f.PieceSize() != modules.SectorSize-f.MasterKey().Type().Overhead() {
   151  			t.Fatal("file has wrong pieceSize for its encryption type")
   152  		}
   153  		// Check that the number of chunks matches the expected number.
   154  		expectedNumChunks := test.fileSize / (f.PieceSize() * uint64(test.dataPieces))
   155  		if expectedNumChunks == 0 && test.fileSize > 0 {
   156  			// There is at least 1 chunk for non 0-byte files.
   157  			expectedNumChunks = 1
   158  		} else if expectedNumChunks%(f.PieceSize()*uint64(test.dataPieces)) != 0 {
   159  			// If it doesn't divide evenly there will be 1 chunk padding.
   160  			expectedNumChunks++
   161  		}
   162  		if f.NumChunks() != expectedNumChunks {
   163  			t.Errorf("Test %v: expected %v, got %v", test, expectedNumChunks, f.NumChunks())
   164  		}
   165  	}
   166  }
   167  
   168  // TestFileRedundancy tests that redundancy is correctly calculated for files
   169  // with varying number of filecontracts and erasure code settings.
   170  func TestFileRedundancy(t *testing.T) {
   171  	if testing.Short() {
   172  		t.SkipNow()
   173  	}
   174  	nDatas := []int{1, 2, 10}
   175  	neverOffline := make(map[string]bool)
   176  	goodForRenew := make(map[string]bool)
   177  	for i := 0; i < 6; i++ {
   178  		pk := types.SiaPublicKey{Key: []byte{byte(i)}}
   179  		neverOffline[pk.String()] = false
   180  		goodForRenew[pk.String()] = true
   181  	}
   182  	// Create a testDir.
   183  	dir := filepath.Join(os.TempDir(), t.Name())
   184  	if err := os.RemoveAll(dir); err != nil {
   185  		t.Fatal(err)
   186  	}
   187  	if err := os.MkdirAll(dir, 0600); err != nil {
   188  		t.Fatal(err)
   189  	}
   190  
   191  	for _, nData := range nDatas {
   192  		rsc, _ := NewRSCode(nData, 10)
   193  		siaFilePath, _, source, _, sk, fileSize, numChunks, fileMode := newTestFileParamsWithRC(2, false, rsc)
   194  		f, _, _ := customTestFileAndWAL(siaFilePath, source, rsc, sk, fileSize, numChunks, fileMode)
   195  		// If the file has a partial chunk, fake a combined chunk to make sure we can
   196  		// add a piece to it.
   197  		if err := setCombinedChunkOfTestFile(f); err != nil {
   198  			t.Fatal(err)
   199  		}
   200  		// Test that an empty file has 0 redundancy.
   201  		r, ur, err := f.Redundancy(neverOffline, goodForRenew)
   202  		if err != nil {
   203  			t.Fatal(err)
   204  		}
   205  		if r != 0 || ur != 0 {
   206  			t.Error("expected 0 and 0 redundancy, got", r, ur)
   207  		}
   208  		// Test that a file with 1 host that has a piece for every chunk but
   209  		// one chunk still has a redundancy of 0.
   210  		for i := uint64(0); i < f.NumChunks()-1; i++ {
   211  			err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(0)}}, i, 0, crypto.Hash{})
   212  			if err != nil {
   213  				t.Fatal(err)
   214  			}
   215  		}
   216  		r, ur, err = f.Redundancy(neverOffline, goodForRenew)
   217  		if err != nil {
   218  			t.Fatal(err)
   219  		}
   220  		if r != 0 || ur != 0 {
   221  			t.Error("expected 0 and 0 redundancy, got", r, ur)
   222  		}
   223  		// Test that adding another host with a piece for every chunk but one
   224  		// chunk still results in a file with redundancy 0.
   225  		for i := uint64(0); i < f.NumChunks()-1; i++ {
   226  			err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(1)}}, i, 1, crypto.Hash{})
   227  			if err != nil {
   228  				t.Fatal(err)
   229  			}
   230  		}
   231  		r, ur, err = f.Redundancy(neverOffline, goodForRenew)
   232  		if err != nil {
   233  			t.Fatal(err)
   234  		}
   235  		if r != 0 || ur != 0 {
   236  			t.Error("expected 0 and 0 redundancy, got", r, ur)
   237  		}
   238  		// Test that adding a file contract with a piece for the missing chunk
   239  		// results in a file with redundancy > 0 && <= 1.
   240  		err = f.AddPiece(types.SiaPublicKey{Key: []byte{byte(2)}}, f.NumChunks()-1, 0, crypto.Hash{})
   241  		if err != nil {
   242  			t.Fatal(err)
   243  		}
   244  		// 1.0 / MinPieces because the chunk with the least number of pieces has 1 piece.
   245  		expectedR := 1.0 / float64(f.ErasureCode().MinPieces())
   246  		r, ur, err = f.Redundancy(neverOffline, goodForRenew)
   247  		if err != nil {
   248  			t.Fatal(err)
   249  		}
   250  		if r != expectedR || ur != expectedR {
   251  			t.Errorf("expected %f redundancy, got %f %f", expectedR, r, ur)
   252  		}
   253  		// Test that adding a file contract that has erasureCode.MinPieces() pieces
   254  		// per chunk for all chunks results in a file with redundancy > 1.
   255  		for iChunk := uint64(0); iChunk < f.NumChunks(); iChunk++ {
   256  			for iPiece := uint64(1); iPiece < uint64(f.ErasureCode().MinPieces()); iPiece++ {
   257  				err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(3)}}, iChunk, iPiece, crypto.Hash{})
   258  				if err != nil {
   259  					t.Fatal(err)
   260  				}
   261  			}
   262  			err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(4)}}, iChunk, uint64(f.ErasureCode().MinPieces()), crypto.Hash{})
   263  			if err != nil {
   264  				t.Fatal(err)
   265  			}
   266  		}
   267  		// 1+MinPieces / MinPieces because the chunk with the least number of pieces has 1+MinPieces pieces.
   268  		expectedR = float64(1+f.ErasureCode().MinPieces()) / float64(f.ErasureCode().MinPieces())
   269  		r, ur, err = f.Redundancy(neverOffline, goodForRenew)
   270  		if err != nil {
   271  			t.Fatal(err)
   272  		}
   273  		if r != expectedR || ur != expectedR {
   274  			t.Errorf("expected %f redundancy, got %f", expectedR, r)
   275  		}
   276  
   277  		// verify offline file contracts are not counted in the redundancy
   278  		for iChunk := uint64(0); iChunk < f.NumChunks(); iChunk++ {
   279  			for iPiece := uint64(0); iPiece < uint64(f.ErasureCode().MinPieces()); iPiece++ {
   280  				err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(5)}}, iChunk, iPiece, crypto.Hash{})
   281  				if err != nil {
   282  					t.Fatal(err)
   283  				}
   284  			}
   285  		}
   286  		specificOffline := make(map[string]bool)
   287  		for pk := range goodForRenew {
   288  			specificOffline[pk] = false
   289  		}
   290  		specificOffline[string(byte(5))] = true
   291  		r, ur, err = f.Redundancy(specificOffline, goodForRenew)
   292  		if err != nil {
   293  			t.Fatal(err)
   294  		}
   295  		if r != expectedR || ur != expectedR {
   296  			t.Errorf("expected redundancy to ignore offline file contracts, wanted %f got %f", expectedR, r)
   297  		}
   298  	}
   299  }
   300  
   301  // TestFileHealth tests that the health of the file is correctly calculated.
   302  //
   303  // Health is equal to (targetParityPieces - actualParityPieces)/targetParityPieces
   304  func TestFileHealth(t *testing.T) {
   305  	if testing.Short() {
   306  		t.SkipNow()
   307  	}
   308  	t.Parallel()
   309  
   310  	// Create a Zero byte file
   311  	rsc, _ := NewRSCode(10, 20)
   312  	siaFilePath, _, source, _, sk, _, _, fileMode := newTestFileParams(1, true)
   313  	zeroFile, _, _ := customTestFileAndWAL(siaFilePath, source, rsc, sk, 0, 0, fileMode)
   314  
   315  	// Create offline map
   316  	offlineMap := make(map[string]bool)
   317  	goodForRenewMap := make(map[string]bool)
   318  
   319  	// Confirm the health is correct
   320  	health, stuckHealth, userHealth, userStuckHealth, numStuckChunks := zeroFile.Health(offlineMap, goodForRenewMap)
   321  	if health != 0 {
   322  		t.Fatal("Expected health to be 0 but was", health)
   323  	}
   324  	if stuckHealth != 0 {
   325  		t.Fatal("Expected stuck health to be 0 but was", stuckHealth)
   326  	}
   327  	if userHealth != 0 {
   328  		t.Fatal("Expected userHealth to be 0 but was", health)
   329  	}
   330  	if userStuckHealth != 0 {
   331  		t.Fatal("Expected user stuck health to be 0 but was", stuckHealth)
   332  	}
   333  	if numStuckChunks != 0 {
   334  		t.Fatal("Expected no stuck chunks but found", numStuckChunks)
   335  	}
   336  
   337  	// Create File with 1 chunk
   338  	siaFilePath, _, source, _, sk, _, _, fileMode = newTestFileParams(1, true)
   339  	f, _, _ := customTestFileAndWAL(siaFilePath, source, rsc, sk, 100, 1, fileMode)
   340  
   341  	// Check file health, since there are no pieces in the chunk yet no good
   342  	// pieces will be found resulting in a health of 1.5 with the erasure code
   343  	// settings of 10/30. Since there are no stuck chunks the stuckHealth of the
   344  	// file should be 0
   345  	//
   346  	// 1 - ((0 - 10) / 20)
   347  	health, stuckHealth, _, _, _ = f.Health(offlineMap, goodForRenewMap)
   348  	if health != 1.5 {
   349  		t.Fatalf("Health of file not as expected, got %v expected 1.5", health)
   350  	}
   351  	if stuckHealth != float64(0) {
   352  		t.Fatalf("Stuck Health of file not as expected, got %v expected 0", stuckHealth)
   353  	}
   354  
   355  	// Add good pieces to first Piece Set
   356  	if err := setCustomCombinedChunkOfTestFile(f, 1); err != nil {
   357  		t.Fatal(err)
   358  	}
   359  	if f.PartialChunks()[0].Status != CombinedChunkStatusCompleted {
   360  		t.Fatal("File has wrong combined chunk status")
   361  	}
   362  	for i := 0; i < 2; i++ {
   363  		host := fmt.Sprintln("host", i)
   364  		spk := types.SiaPublicKey{}
   365  		spk.LoadString(host)
   366  		offlineMap[spk.String()] = false
   367  		goodForRenewMap[spk.String()] = true
   368  		if err := f.AddPiece(spk, 0, 0, crypto.Hash{}); err != nil {
   369  			t.Fatal(err)
   370  		}
   371  	}
   372  
   373  	// Check health, even though two pieces were added the health should be 1.45
   374  	// since the two good pieces were added to the same pieceSet
   375  	//
   376  	// 1 - ((1 - 10) / 20)
   377  	health, _, _, _, _ = f.Health(offlineMap, goodForRenewMap)
   378  	if health != 1.45 {
   379  		t.Fatalf("Health of file not as expected, got %v expected 1.45", health)
   380  	}
   381  
   382  	// Add one good pieces to second piece set, confirm health is now 1.40.
   383  	host := fmt.Sprintln("host", 0)
   384  	spk := types.SiaPublicKey{}
   385  	spk.LoadString(host)
   386  	offlineMap[spk.String()] = false
   387  	goodForRenewMap[spk.String()] = true
   388  	if err := f.AddPiece(spk, 0, 1, crypto.Hash{}); err != nil {
   389  		t.Fatal(err)
   390  	}
   391  	health, _, _, _, _ = f.Health(offlineMap, goodForRenewMap)
   392  	if health != 1.40 {
   393  		t.Fatalf("Health of file not as expected, got %v expected 1.40", health)
   394  	}
   395  
   396  	// Add another good pieces to second piece set, confirm health is still 1.40.
   397  	host = fmt.Sprintln("host", 1)
   398  	spk = types.SiaPublicKey{}
   399  	spk.LoadString(host)
   400  	offlineMap[spk.String()] = false
   401  	goodForRenewMap[spk.String()] = true
   402  	if err := f.AddPiece(spk, 0, 1, crypto.Hash{}); err != nil {
   403  		t.Fatal(err)
   404  	}
   405  	health, _, _, _, _ = f.Health(offlineMap, goodForRenewMap)
   406  	if health != 1.40 {
   407  		t.Fatalf("Health of file not as expected, got %v expected 1.40", health)
   408  	}
   409  
   410  	// Mark chunk as stuck
   411  	err := f.SetStuck(0, true)
   412  	if err != nil {
   413  		t.Fatal(err)
   414  	}
   415  	health, stuckHealth, _, _, numStuckChunks = f.Health(offlineMap, goodForRenewMap)
   416  	// Health should now be 0 since there are no unstuck chunks
   417  	if health != 0 {
   418  		t.Fatalf("Health of file not as expected, got %v expected 0", health)
   419  	}
   420  	// Stuck Health should now be 1.4
   421  	if stuckHealth != 1.40 {
   422  		t.Fatalf("Stuck Health of file not as expected, got %v expected 1.40", stuckHealth)
   423  	}
   424  	// There should be 1 stuck chunk
   425  	if numStuckChunks != 1 {
   426  		t.Fatalf("Expected 1 stuck chunk but found %v", numStuckChunks)
   427  	}
   428  
   429  	// Create File with 2 chunks
   430  	siaFilePath, _, source, _, sk, _, _, fileMode = newTestFileParams(1, true)
   431  	f, _, _ = customTestFileAndWAL(siaFilePath, source, rsc, sk, 5e4, 2, fileMode)
   432  	if err != nil {
   433  		t.Fatal(err)
   434  	}
   435  
   436  	// Create offline map
   437  	offlineMap = make(map[string]bool)
   438  	goodForRenewMap = make(map[string]bool)
   439  
   440  	// Check file health, since there are no pieces in the chunk yet no good
   441  	// pieces will be found resulting in a health of 1.5
   442  	health, _, _, _, _ = f.Health(offlineMap, goodForRenewMap)
   443  	if health != 1.5 {
   444  		t.Fatalf("Health of file not as expected, got %v expected 1.5", health)
   445  	}
   446  
   447  	// Add good pieces to the first chunk
   448  	for i := 0; i < 4; i++ {
   449  		host := fmt.Sprintln("host", i)
   450  		spk := types.SiaPublicKey{}
   451  		spk.LoadString(host)
   452  		offlineMap[spk.String()] = false
   453  		goodForRenewMap[spk.String()] = true
   454  		if err := f.AddPiece(spk, 0, uint64(i%2), crypto.Hash{}); err != nil {
   455  			t.Fatal(err)
   456  		}
   457  	}
   458  
   459  	// Check health, should still be 1.5 because other chunk doesn't have any
   460  	// good pieces
   461  	health, stuckHealth, _, _, _ = f.Health(offlineMap, goodForRenewMap)
   462  	if health != 1.5 {
   463  		t.Fatalf("Health of file not as expected, got %v expected 1.5", health)
   464  	}
   465  
   466  	// Add good pieces to second chunk, confirm health is 1.40 since both chunks
   467  	// have 2 good pieces.
   468  	if err := setCustomCombinedChunkOfTestFile(f, 1); err != nil {
   469  		t.Fatal(err)
   470  	}
   471  	for i := 0; i < 4; i++ {
   472  		host := fmt.Sprintln("host", i)
   473  		spk := types.SiaPublicKey{}
   474  		spk.LoadString(host)
   475  		offlineMap[spk.String()] = false
   476  		goodForRenewMap[spk.String()] = true
   477  		if err := f.AddPiece(spk, 1, uint64(i%2), crypto.Hash{}); err != nil {
   478  			t.Fatal(err)
   479  		}
   480  	}
   481  	health, _, _, _, _ = f.Health(offlineMap, goodForRenewMap)
   482  	if health != 1.40 {
   483  		t.Fatalf("Health of file not as expected, got %v expected 1.40", health)
   484  	}
   485  
   486  	// Mark second chunk as stuck
   487  	err = f.SetStuck(1, true)
   488  	if err != nil {
   489  		t.Fatal(err)
   490  	}
   491  	health, stuckHealth, _, _, numStuckChunks = f.Health(offlineMap, goodForRenewMap)
   492  	// Since both chunks have the same health, the file health and the file stuck health should be the same
   493  	if health != 1.40 {
   494  		t.Fatalf("Health of file not as expected, got %v expected 1.40", health)
   495  	}
   496  	if stuckHealth != 1.40 {
   497  		t.Fatalf("Stuck Health of file not as expected, got %v expected 1.4", stuckHealth)
   498  	}
   499  	// Check health, verify there is 1 stuck chunk
   500  	if numStuckChunks != 1 {
   501  		t.Fatalf("Expected 1 stuck chunk but found %v", numStuckChunks)
   502  	}
   503  }
   504  
   505  // TestGrowNumChunks is a unit test for the SiaFile's GrowNumChunks method.
   506  func TestGrowNumChunks(t *testing.T) {
   507  	if testing.Short() {
   508  		t.SkipNow()
   509  	}
   510  	t.Parallel()
   511  
   512  	// Create a blank file.
   513  	siaFilePath, _, source, rc, sk, fileSize, numChunks, fileMode := newTestFileParams(1, false)
   514  	sf, wal, _ := customTestFileAndWAL(siaFilePath, source, rc, sk, fileSize, numChunks, fileMode)
   515  	expectedChunks := sf.NumChunks()
   516  	expectedSize := sf.Size()
   517  
   518  	// Declare a check method.
   519  	checkFile := func(sf *SiaFile, numChunks, size uint64) {
   520  		if numChunks != sf.NumChunks() {
   521  			t.Fatalf("Expected %v chunks but was %v", numChunks, sf.NumChunks())
   522  		}
   523  		if size != sf.Size() {
   524  			t.Fatalf("Expected size to be %v but was %v", size, sf.Size())
   525  		}
   526  	}
   527  
   528  	// Increase the size of the file by 1 chunk.
   529  	expectedChunks++
   530  	expectedSize += sf.ChunkSize()
   531  	err := sf.GrowNumChunks(expectedChunks)
   532  	if err != nil {
   533  		t.Fatal(err)
   534  	}
   535  	// Check the file after growing the chunks.
   536  	checkFile(sf, expectedChunks, expectedSize)
   537  	// Load the file from disk again to also check that persistence works.
   538  	sf, err = LoadSiaFile(sf.siaFilePath, wal)
   539  	if err != nil {
   540  		t.Fatal(err)
   541  	}
   542  	// Check that size and chunks still match.
   543  	checkFile(sf, expectedChunks, expectedSize)
   544  
   545  	// Call GrowNumChunks with the same argument again. This should be a no-op.
   546  	err = sf.GrowNumChunks(expectedChunks)
   547  	if err != nil {
   548  		t.Fatal(err)
   549  	}
   550  	// Check the file after growing the chunks.
   551  	checkFile(sf, expectedChunks, expectedSize)
   552  	// Load the file from disk again to also check that no wrong persistence
   553  	// happened.
   554  	sf, err = LoadSiaFile(sf.siaFilePath, wal)
   555  	if err != nil {
   556  		t.Fatal(err)
   557  	}
   558  	// Check that size and chunks still match.
   559  	checkFile(sf, expectedChunks, expectedSize)
   560  
   561  	// Grow the file by 2 chunks to see if multiple chunks also work.
   562  	expectedChunks += 2
   563  	expectedSize += 2 * sf.ChunkSize()
   564  	err = sf.GrowNumChunks(expectedChunks)
   565  	if err != nil {
   566  		t.Fatal(err)
   567  	}
   568  	// Check the file after growing the chunks.
   569  	checkFile(sf, expectedChunks, expectedSize)
   570  	// Load the file from disk again to also check that persistence works.
   571  	sf, err = LoadSiaFile(sf.siaFilePath, wal)
   572  	if err != nil {
   573  		t.Fatal(err)
   574  	}
   575  	// Check that size and chunks still match.
   576  	checkFile(sf, expectedChunks, expectedSize)
   577  }
   578  
   579  // TestPruneHosts is a unit test for the pruneHosts method.
   580  func TestPruneHosts(t *testing.T) {
   581  	if testing.Short() {
   582  		t.SkipNow()
   583  	}
   584  	t.Parallel()
   585  
   586  	// Create a siafile without partial chunk since partial chunk.
   587  	siaFilePath, _, source, rc, sk, fileSize, numChunks, fileMode := newTestFileParams(1, false)
   588  	sf, _, _ := customTestFileAndWAL(siaFilePath, source, rc, sk, fileSize, numChunks, fileMode)
   589  
   590  	// Add 3 random hostkeys to the file.
   591  	sf.addRandomHostKeys(3)
   592  
   593  	// Save changes to disk.
   594  	updates, err := sf.saveHeaderUpdates()
   595  	if err != nil {
   596  		t.Fatal(err)
   597  	}
   598  	if err := sf.createAndApplyTransaction(updates...); err != nil {
   599  		t.Fatal(err)
   600  	}
   601  
   602  	// Add one piece for every host to every pieceSet of the
   603  	for _, hk := range sf.HostPublicKeys() {
   604  		err := sf.iterateChunksReadonly(func(chunk chunk) error {
   605  			for pieceIndex := range chunk.Pieces {
   606  				if err := sf.AddPiece(hk, uint64(chunk.Index), uint64(pieceIndex), crypto.Hash{}); err != nil {
   607  					t.Fatal(err)
   608  				}
   609  			}
   610  			return nil
   611  		})
   612  		if err != nil {
   613  			t.Fatal(err)
   614  		}
   615  	}
   616  
   617  	// Mark hostkeys 0 and 2 as unused.
   618  	sf.pubKeyTable[0].Used = false
   619  	sf.pubKeyTable[2].Used = false
   620  	remainingKey := sf.pubKeyTable[1]
   621  
   622  	// Prune the file.
   623  	updates, err = sf.pruneHosts()
   624  	if err != nil {
   625  		t.Fatal(err)
   626  	}
   627  	if err := sf.createAndApplyTransaction(updates...); err != nil {
   628  		t.Fatal(err)
   629  	}
   630  
   631  	// Check that there is only a single key left.
   632  	if len(sf.pubKeyTable) != 1 {
   633  		t.Fatalf("There should only be 1 key left but was %v", len(sf.pubKeyTable))
   634  	}
   635  	// The last key should be the correct one.
   636  	if !reflect.DeepEqual(remainingKey, sf.pubKeyTable[0]) {
   637  		t.Fatal("Remaining key doesn't match")
   638  	}
   639  	// Loop over all the pieces and make sure that the pieces with missing
   640  	// hosts were pruned and that the remaining pieces have the correct offset
   641  	// now.
   642  	err = sf.iterateChunksReadonly(func(chunk chunk) error {
   643  		for _, pieceSet := range chunk.Pieces {
   644  			if len(pieceSet) != 1 {
   645  				t.Fatalf("Expected 1 piece in the set but was %v", len(pieceSet))
   646  			}
   647  			// The HostTableOffset should always be 0 since the keys at index 0
   648  			// and 2 were pruned which means that index 1 is now index 0.
   649  			for _, piece := range pieceSet {
   650  				if piece.HostTableOffset != 0 {
   651  					t.Fatalf("HostTableOffset should be 0 but was %v", piece.HostTableOffset)
   652  				}
   653  			}
   654  		}
   655  		return nil
   656  	})
   657  	if err != nil {
   658  		t.Fatal(err)
   659  	}
   660  }
   661  
   662  // TestNumPieces tests the chunk's numPieces method.
   663  func TestNumPieces(t *testing.T) {
   664  	// create a random chunk.
   665  	chunk := randomChunk()
   666  
   667  	// get the number of pieces of the chunk.
   668  	totalPieces := 0
   669  	for _, pieceSet := range chunk.Pieces {
   670  		totalPieces += len(pieceSet)
   671  	}
   672  
   673  	// compare it to the one reported by numPieces.
   674  	if totalPieces != chunk.numPieces() {
   675  		t.Fatalf("Expected %v pieces but was %v", totalPieces, chunk.numPieces())
   676  	}
   677  }
   678  
   679  // TestDefragChunk tests if the defragChunk methods correctly prunes pieces
   680  // from a chunk.
   681  func TestDefragChunk(t *testing.T) {
   682  	if testing.Short() {
   683  		t.SkipNow()
   684  	}
   685  	t.Parallel()
   686  	// Get a blank
   687  	sf, _, _ := newBlankTestFileAndWAL(2) // make sure we have 1 full chunk at the beginning of sf.fullChunks
   688  
   689  	// Use the first chunk of the file for testing.
   690  	chunk, err := sf.chunk(0)
   691  	if err != nil {
   692  		t.Fatal(err)
   693  	}
   694  
   695  	// Add 100 pieces to each set of pieces, all belonging to the same unused
   696  	// host.
   697  	sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: false})
   698  	for i := range chunk.Pieces {
   699  		for j := 0; j < 100; j++ {
   700  			chunk.Pieces[i] = append(chunk.Pieces[i], piece{HostTableOffset: 0})
   701  		}
   702  	}
   703  
   704  	// Defrag the chunk. This should remove all the pieces since the host is
   705  	// unused.
   706  	sf.defragChunk(&chunk)
   707  	if chunk.numPieces() != 0 {
   708  		t.Fatalf("chunk should have 0 pieces after defrag but was %v", chunk.numPieces())
   709  	}
   710  
   711  	// Do the same thing again, but this time the host is marked as used.
   712  	sf.pubKeyTable[0].Used = true
   713  	for i := range chunk.Pieces {
   714  		for j := 0; j < 100; j++ {
   715  			chunk.Pieces[i] = append(chunk.Pieces[i], piece{HostTableOffset: 0})
   716  		}
   717  	}
   718  
   719  	// Defrag the chunk.
   720  	maxChunkSize := int64(sf.staticMetadata.StaticPagesPerChunk) * pageSize
   721  	maxPieces := (maxChunkSize - marshaledChunkOverhead) / marshaledPieceSize
   722  	maxPiecesPerSet := maxPieces / int64(len(chunk.Pieces))
   723  	sf.defragChunk(&chunk)
   724  
   725  	// The chunk should be smaller than maxChunkSize.
   726  	if chunkSize := marshaledChunkSize(chunk.numPieces()); chunkSize > maxChunkSize {
   727  		t.Errorf("chunkSize is too big %v > %v", chunkSize, maxChunkSize)
   728  	}
   729  	// The chunk should have less than maxPieces pieces.
   730  	if int64(chunk.numPieces()) > maxPieces {
   731  		t.Errorf("chunk should have <= %v pieces after defrag but was %v",
   732  			maxPieces, chunk.numPieces())
   733  	}
   734  	// The chunk should have numPieces * maxPiecesPerSet pieces.
   735  	if expectedPieces := int64(sf.ErasureCode().NumPieces()) * maxPiecesPerSet; expectedPieces != int64(chunk.numPieces()) {
   736  		t.Errorf("chunk should have %v pieces but was %v", expectedPieces, chunk.numPieces())
   737  	}
   738  	// Every set of pieces should have maxPiecesPerSet pieces.
   739  	for i, pieceSet := range chunk.Pieces {
   740  		if int64(len(pieceSet)) != maxPiecesPerSet {
   741  			t.Errorf("pieceSet%v length is %v which is greater than %v",
   742  				i, len(pieceSet), maxPiecesPerSet)
   743  		}
   744  	}
   745  
   746  	// Create a new file with 2 used hosts and 1 unused one. This file should
   747  	// use 2 pages per chunk.
   748  	sf, _, _ = newBlankTestFileAndWAL(2) // make sure we have 1 full chunk at the beginning of the file.
   749  	sf.staticMetadata.StaticPagesPerChunk = 2
   750  	sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: true})
   751  	sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: true})
   752  	sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: false})
   753  	sf.pubKeyTable[0].PublicKey.Key = fastrand.Bytes(crypto.EntropySize)
   754  	sf.pubKeyTable[1].PublicKey.Key = fastrand.Bytes(crypto.EntropySize)
   755  	sf.pubKeyTable[2].PublicKey.Key = fastrand.Bytes(crypto.EntropySize)
   756  
   757  	// Save the above changes to disk to avoid failing sanity checks when
   758  	// calling AddPiece.
   759  	updates, err := sf.saveHeaderUpdates()
   760  	if err != nil {
   761  		t.Fatal(err)
   762  	}
   763  	if err := sf.createAndApplyTransaction(updates...); err != nil {
   764  		t.Fatal(err)
   765  	}
   766  
   767  	// Add 500 pieces to the first chunk of the file, randomly belonging to
   768  	// any of the 3 hosts. This should never produce an error.
   769  	var duration time.Duration
   770  	for i := 0; i < 50; i++ {
   771  		chunk, err := sf.chunk(0)
   772  		if err != nil {
   773  			t.Fatal(err)
   774  		}
   775  		pk := sf.pubKeyTable[fastrand.Intn(len(sf.pubKeyTable))].PublicKey
   776  		pieceIndex := fastrand.Intn(len(chunk.Pieces))
   777  		before := time.Now()
   778  		if err := sf.AddPiece(pk, 0, uint64(pieceIndex), crypto.Hash{}); err != nil {
   779  			t.Fatal(err)
   780  		}
   781  		duration += time.Since(before)
   782  	}
   783  
   784  	// Save the file to disk again to make sure cached fields are persisted.
   785  	updates, err = sf.saveHeaderUpdates()
   786  	if err != nil {
   787  		t.Fatal(err)
   788  	}
   789  	if err := sf.createAndApplyTransaction(updates...); err != nil {
   790  		t.Fatal(err)
   791  	}
   792  
   793  	// Finally load the file from disk again and compare it to the original.
   794  	sf2, err := LoadSiaFile(sf.siaFilePath, sf.wal)
   795  	if err != nil {
   796  		t.Fatal(err)
   797  	}
   798  	// Compare the files.
   799  	if err := equalFiles(sf, sf2); err != nil {
   800  		t.Fatal(err)
   801  	}
   802  }
   803  
   804  // TestChunkHealth probes the chunkHealth method
   805  func TestChunkHealth(t *testing.T) {
   806  	if testing.Short() {
   807  		t.SkipNow()
   808  	}
   809  	t.Parallel()
   810  	// Get a blank siafile with at least 3 chunks.
   811  	sf, _, _ := newBlankTestFileAndWAL(3)
   812  	rc := sf.ErasureCode()
   813  
   814  	// Create offline map
   815  	offlineMap := make(map[string]bool)
   816  	goodForRenewMap := make(map[string]bool)
   817  
   818  	// Check and Record file health of initialized file
   819  	fileHealth, _, _, _, _ := sf.Health(offlineMap, goodForRenewMap)
   820  	initHealth := float64(1) - (float64(0-rc.MinPieces()) / float64(rc.NumPieces()-rc.MinPieces()))
   821  	if fileHealth != initHealth {
   822  		t.Fatalf("Expected file to be %v, got %v", initHealth, fileHealth)
   823  	}
   824  
   825  	// Since we are using a pre set offlineMap, all the chunks should have the
   826  	// same health as the file
   827  	err := sf.iterateChunksReadonly(func(chunk chunk) error {
   828  		chunkHealth, _, err := sf.chunkHealth(chunk, offlineMap, goodForRenewMap)
   829  		if err != nil {
   830  			return err
   831  		}
   832  		if chunkHealth != fileHealth {
   833  			t.Log("ChunkHealth:", chunkHealth)
   834  			t.Log("FileHealth:", fileHealth)
   835  			t.Fatal("Expected file and chunk to have same health")
   836  		}
   837  		return nil
   838  	})
   839  	if err != nil {
   840  		t.Fatal(err)
   841  	}
   842  
   843  	// Add good piece to first chunk
   844  	host := fmt.Sprintln("host_0")
   845  	spk := types.SiaPublicKey{}
   846  	spk.LoadString(host)
   847  	offlineMap[spk.String()] = false
   848  	goodForRenewMap[spk.String()] = true
   849  	if err := setCombinedChunkOfTestFile(sf); err != nil {
   850  		t.Fatal(err)
   851  	}
   852  	if err := sf.AddPiece(spk, 0, 0, crypto.Hash{}); err != nil {
   853  		t.Fatal(err)
   854  	}
   855  
   856  	// Chunk at index 0 should now have a health of 1 higher than before
   857  	chunk, err := sf.chunk(0)
   858  	if err != nil {
   859  		t.Fatal(err)
   860  	}
   861  	newHealth := float64(1) - (float64(1-rc.MinPieces()) / float64(rc.NumPieces()-rc.MinPieces()))
   862  	ch, _, err := sf.chunkHealth(chunk, offlineMap, goodForRenewMap)
   863  	if err != nil {
   864  		t.Fatal(err)
   865  	}
   866  	if ch != newHealth {
   867  		t.Fatalf("Expected chunk health to be %v, got %v", newHealth, ch)
   868  	}
   869  
   870  	// Chunk at index 1 should still have lower health
   871  	chunk, err = sf.chunk(1)
   872  	if err != nil {
   873  		t.Fatal(err)
   874  	}
   875  	ch, _, err = sf.chunkHealth(chunk, offlineMap, goodForRenewMap)
   876  	if err != nil {
   877  		t.Fatal(err)
   878  	}
   879  	if ch != fileHealth {
   880  		t.Fatalf("Expected chunk health to be %v, got %v", fileHealth, ch)
   881  	}
   882  
   883  	// Add good piece to second chunk
   884  	host = fmt.Sprintln("host_1")
   885  	spk = types.SiaPublicKey{}
   886  	spk.LoadString(host)
   887  	offlineMap[spk.String()] = false
   888  	goodForRenewMap[spk.String()] = true
   889  	if err := sf.AddPiece(spk, 1, 0, crypto.Hash{}); err != nil {
   890  		t.Fatal(err)
   891  	}
   892  
   893  	// Chunk at index 1 should now have a health of 1 higher than before
   894  	chunk, err = sf.chunk(1)
   895  	if err != nil {
   896  		t.Fatal(err)
   897  	}
   898  	ch, _, err = sf.chunkHealth(chunk, offlineMap, goodForRenewMap)
   899  	if err != nil {
   900  		t.Fatal(err)
   901  	}
   902  	if ch != newHealth {
   903  		t.Fatalf("Expected chunk health to be %v, got %v", newHealth, ch)
   904  	}
   905  
   906  	// Mark Chunk at index 1 as stuck and confirm that doesn't impact the result
   907  	// of chunkHealth
   908  	if err := sf.SetStuck(1, true); err != nil {
   909  		t.Fatal(err)
   910  	}
   911  	chunk, err = sf.chunk(1)
   912  	if err != nil {
   913  		t.Fatal(err)
   914  	}
   915  	ch, _, err = sf.chunkHealth(chunk, offlineMap, goodForRenewMap)
   916  	if err != nil {
   917  		t.Fatal(err)
   918  	}
   919  	if ch != newHealth {
   920  		t.Fatalf("Expected file to be %v, got %v", newHealth, ch)
   921  	}
   922  }
   923  
   924  // TestStuckChunks checks to make sure the NumStuckChunks return the expected
   925  // values and that the stuck chunks are persisted properly
   926  func TestStuckChunks(t *testing.T) {
   927  	if testing.Short() {
   928  		t.SkipNow()
   929  	}
   930  	t.Parallel()
   931  
   932  	// Create siafile
   933  	sf, sfs, err := newTestSiaFileSetWithFile()
   934  	if err != nil {
   935  		t.Fatal(err)
   936  	}
   937  
   938  	// Mark every other chunk as stuck
   939  	expectedStuckChunks := 0
   940  	for chunkIndex := 0; chunkIndex < sf.numChunks; chunkIndex++ {
   941  		if (chunkIndex % 2) != 0 {
   942  			continue
   943  		}
   944  		if sf.staticMetadata.HasPartialChunk && len(sf.PartialChunks()) == 0 && chunkIndex == sf.numChunks-1 {
   945  			continue // not included partial chunk at the end can't be stuck
   946  		}
   947  		if err := sf.SetStuck(uint64(chunkIndex), true); err != nil {
   948  			t.Fatal(err)
   949  		}
   950  		expectedStuckChunks++
   951  	}
   952  
   953  	// Check that the total number of stuck chunks is consistent
   954  	numStuckChunks := sf.NumStuckChunks()
   955  	if numStuckChunks != uint64(expectedStuckChunks) {
   956  		t.Fatalf("Wrong number of stuck chunks, got %v expected %v", numStuckChunks, expectedStuckChunks)
   957  	}
   958  
   959  	// Close file and confirm it and its partialsSiaFile are out of memory
   960  	siaPath := sfs.SiaPath(sf)
   961  	if err = sf.Close(); err != nil {
   962  		t.Fatal(err)
   963  	}
   964  	if len(sfs.siaFileMap) != 0 {
   965  		t.Fatal("File not removed from memory")
   966  	}
   967  	if len(sfs.siapathToUID) != 0 {
   968  		t.Fatal("File not removed from uid map")
   969  	}
   970  
   971  	// Load siafile from disk
   972  	sf, err = sfs.Open(siaPath)
   973  	if err != nil {
   974  		t.Fatal(err)
   975  	}
   976  
   977  	// Check that the total number of stuck chunks is consistent
   978  	if numStuckChunks != sf.NumStuckChunks() {
   979  		t.Fatalf("Wrong number of stuck chunks, got %v expected %v", numStuckChunks, sf.NumStuckChunks())
   980  	}
   981  
   982  	// Check chunks and Stuck Chunk Table
   983  	err = sf.iterateChunksReadonly(func(chunk chunk) error {
   984  		if sf.staticMetadata.HasPartialChunk && len(sf.staticMetadata.PartialChunks) == 0 &&
   985  			uint64(chunk.Index) == sf.NumChunks()-1 {
   986  			return nil // partial chunk at the end can't be stuck
   987  		}
   988  		if chunk.Index%2 != 0 {
   989  			if chunk.Stuck {
   990  				t.Fatal("Found stuck chunk when un-stuck chunk was expected")
   991  			}
   992  			return nil
   993  		}
   994  		if !chunk.Stuck {
   995  			t.Fatal("Found un-stuck chunk when stuck chunk was expected")
   996  		}
   997  		return nil
   998  	})
   999  	if err != nil {
  1000  		t.Fatal(err)
  1001  	}
  1002  }
  1003  
  1004  // TestUploadedBytes tests that uploadedBytes() returns the expected values for
  1005  // total and unique uploaded bytes.
  1006  func TestUploadedBytes(t *testing.T) {
  1007  	if testing.Short() {
  1008  		t.SkipNow()
  1009  	}
  1010  	// Create a new blank test file
  1011  	f := newBlankTestFile()
  1012  	if err := setCombinedChunkOfTestFile(f); err != nil {
  1013  		t.Fatal(err)
  1014  	}
  1015  	// Add multiple pieces to the first pieceSet of the first piece of the first
  1016  	// chunk
  1017  	for i := 0; i < 4; i++ {
  1018  		err := f.AddPiece(types.SiaPublicKey{}, uint64(0), 0, crypto.Hash{})
  1019  		if err != nil {
  1020  			t.Fatal(err)
  1021  		}
  1022  	}
  1023  	totalBytes, uniqueBytes, err := f.uploadedBytes()
  1024  	if err != nil {
  1025  		t.Fatal(err)
  1026  	}
  1027  	if totalBytes != 4*modules.SectorSize {
  1028  		t.Errorf("expected totalBytes to be %v, got %v", 4*modules.SectorSize, totalBytes)
  1029  	}
  1030  	if uniqueBytes != modules.SectorSize {
  1031  		t.Errorf("expected uniqueBytes to be %v, got %v", modules.SectorSize, uniqueBytes)
  1032  	}
  1033  }
  1034  
  1035  // TestFileUploadProgressPinning verifies that uploadProgress() returns at most
  1036  // 100%, even if more pieces have been uploaded,
  1037  func TestFileUploadProgressPinning(t *testing.T) {
  1038  	if testing.Short() {
  1039  		t.SkipNow()
  1040  	}
  1041  	f := newBlankTestFile()
  1042  	if err := setCombinedChunkOfTestFile(f); err != nil {
  1043  		t.Fatal(err)
  1044  	}
  1045  
  1046  	for chunkIndex := uint64(0); chunkIndex < f.NumChunks(); chunkIndex++ {
  1047  		for pieceIndex := uint64(0); pieceIndex < uint64(f.ErasureCode().NumPieces()); pieceIndex++ {
  1048  			err1 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(0)}}, chunkIndex, pieceIndex, crypto.Hash{})
  1049  			err2 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(1)}}, chunkIndex, pieceIndex, crypto.Hash{})
  1050  			if err := errors.Compose(err1, err2); err != nil {
  1051  				t.Fatal(err)
  1052  			}
  1053  		}
  1054  	}
  1055  	if f.staticMetadata.CachedUploadProgress != 100 {
  1056  		t.Fatal("expected uploadProgress to report 100% but was", f.staticMetadata.CachedUploadProgress)
  1057  	}
  1058  }
  1059  
  1060  // TestFileExpiration probes the expiration method of the file type.
  1061  func TestFileExpiration(t *testing.T) {
  1062  	if testing.Short() {
  1063  		t.SkipNow()
  1064  	}
  1065  	siaFilePath, _, source, rc, sk, fileSize, numChunks, fileMode := newTestFileParams(1, false)
  1066  	f, _, _ := customTestFileAndWAL(siaFilePath, source, rc, sk, fileSize, numChunks, fileMode)
  1067  	contracts := make(map[string]modules.RenterContract)
  1068  	_ = f.Expiration(contracts)
  1069  	if f.staticMetadata.CachedExpiration != 0 {
  1070  		t.Error("file with no pieces should report as having no time remaining")
  1071  	}
  1072  	// Set a combined chunk for the file if necessary.
  1073  	if err := setCombinedChunkOfTestFile(f); err != nil {
  1074  		t.Fatal(err)
  1075  	}
  1076  	// Create 3 public keys
  1077  	pk1 := types.SiaPublicKey{Key: []byte{0}}
  1078  	pk2 := types.SiaPublicKey{Key: []byte{1}}
  1079  	pk3 := types.SiaPublicKey{Key: []byte{2}}
  1080  
  1081  	// Add a piece for each key to the file.
  1082  	err1 := f.AddPiece(pk1, 0, 0, crypto.Hash{})
  1083  	err2 := f.AddPiece(pk2, 0, 1, crypto.Hash{})
  1084  	err3 := f.AddPiece(pk3, 0, 2, crypto.Hash{})
  1085  	if err := errors.Compose(err1, err2, err3); err != nil {
  1086  		t.Fatal(err)
  1087  	}
  1088  
  1089  	// Add a contract.
  1090  	fc := modules.RenterContract{}
  1091  	fc.EndHeight = 100
  1092  	contracts[pk1.String()] = fc
  1093  	_ = f.Expiration(contracts)
  1094  	if f.staticMetadata.CachedExpiration != 100 {
  1095  		t.Error("file did not report lowest WindowStart", f.staticMetadata.CachedExpiration)
  1096  	}
  1097  
  1098  	// Add a contract with a lower WindowStart.
  1099  	fc.EndHeight = 50
  1100  	contracts[pk2.String()] = fc
  1101  	_ = f.Expiration(contracts)
  1102  	if f.staticMetadata.CachedExpiration != 50 {
  1103  		t.Error("file did not report lowest WindowStart", f.staticMetadata.CachedExpiration)
  1104  	}
  1105  
  1106  	// Add a contract with a higher WindowStart.
  1107  	fc.EndHeight = 75
  1108  	contracts[pk3.String()] = fc
  1109  	_ = f.Expiration(contracts)
  1110  	if f.staticMetadata.CachedExpiration != 50 {
  1111  		t.Error("file did not report lowest WindowStart", f.staticMetadata.CachedExpiration)
  1112  	}
  1113  }
  1114  
  1115  // BenchmarkLoadSiaFile benchmarks loading an existing siafile's metadata into
  1116  // memory.
  1117  func BenchmarkLoadSiaFile(b *testing.B) {
  1118  	// Get new file params
  1119  	siaFilePath, _, source, _, sk, _, _, fileMode := newTestFileParams(1, false)
  1120  	// Create the path to the file.
  1121  	dir, _ := filepath.Split(siaFilePath)
  1122  	if err := os.MkdirAll(dir, 0700); err != nil {
  1123  		b.Fatal(err)
  1124  	}
  1125  	// Create the file.
  1126  	wal, _ := newTestWAL()
  1127  	rc, err := NewRSSubCode(10, 20, crypto.SegmentSize)
  1128  	if err != nil {
  1129  		b.Fatal(err)
  1130  	}
  1131  	sf, err := New(siaFilePath, source, wal, rc, sk, 1, fileMode, nil, true) // 1 chunk file
  1132  	if err != nil {
  1133  		b.Fatal(err)
  1134  	}
  1135  	if err := sf.GrowNumChunks(10); err != nil { // Grow file to 10 chunks
  1136  		b.Fatal(err)
  1137  	}
  1138  	// Add pieces to chunks until every chunk has full redundancy.
  1139  	hostKeys := make([]types.SiaPublicKey, rc.NumPieces())
  1140  	for i := range hostKeys {
  1141  		fastrand.Read(hostKeys[i].Key)
  1142  	}
  1143  	for pieceIndex := 0; pieceIndex < rc.NumPieces(); pieceIndex++ {
  1144  		for chunkIndex := 0; chunkIndex < int(sf.NumChunks()); chunkIndex++ {
  1145  			if err := sf.AddPiece(hostKeys[pieceIndex], uint64(chunkIndex), uint64(pieceIndex), crypto.Hash{}); err != nil {
  1146  				b.Fatal(err)
  1147  			}
  1148  		}
  1149  	}
  1150  	b.ResetTimer()
  1151  	for loads := 0; loads < b.N; loads++ {
  1152  		sf, err = LoadSiaFile(siaFilePath, wal)
  1153  		if err != nil {
  1154  			b.Fatal(err)
  1155  		}
  1156  	}
  1157  }
  1158  
  1159  func BenchmarkRandomChunkWriteSingleThreaded(b *testing.B) {
  1160  	benchmarkRandomChunkWrite(1, b)
  1161  }
  1162  func BenchmarkRandomChunkWriteMultiThreaded(b *testing.B) {
  1163  	// 50 seems reasonable since it matches the number of hosts we usually have contracts with
  1164  	benchmarkRandomChunkWrite(50, b)
  1165  }
  1166  
  1167  // BenchmarkRandomChunkWrite benchmarks writing pieces to random chunks within a
  1168  // siafile.
  1169  func benchmarkRandomChunkWrite(numThreads int, b *testing.B) {
  1170  	// Get new file params
  1171  	siaFilePath, _, source, _, sk, _, _, fileMode := newTestFileParams(1, false)
  1172  	// Create the path to the file.
  1173  	dir, _ := filepath.Split(siaFilePath)
  1174  	if err := os.MkdirAll(dir, 0700); err != nil {
  1175  		b.Fatal(err)
  1176  	}
  1177  	// Create the file.
  1178  	wal, _ := newTestWAL()
  1179  	rc, err := NewRSSubCode(10, 20, crypto.SegmentSize)
  1180  	if err != nil {
  1181  		b.Fatal(err)
  1182  	}
  1183  	sf, err := New(siaFilePath, source, wal, rc, sk, 1, fileMode, nil, true) // 1 chunk file
  1184  	if err != nil {
  1185  		b.Fatal(err)
  1186  	}
  1187  	if err := sf.GrowNumChunks(100); err != nil { // Grow file to 100 chunks
  1188  		b.Fatal(err)
  1189  	}
  1190  	// Add pieces to random chunks until every chunk has full redundancy.
  1191  	var writes uint64
  1192  	piecePerm := fastrand.Perm(rc.NumPieces())
  1193  	chunkPerm := fastrand.Perm(int(sf.NumChunks()))
  1194  	hostKeys := make([]types.SiaPublicKey, rc.NumPieces())
  1195  	for i := range hostKeys {
  1196  		fastrand.Read(hostKeys[i].Key)
  1197  	}
  1198  	start := make(chan struct{})
  1199  	worker := func() {
  1200  		<-start
  1201  		for atomic.LoadUint64(&writes) < uint64(b.N) {
  1202  			for _, pieceIndex := range piecePerm {
  1203  				for _, chunkIndex := range chunkPerm {
  1204  					if err := sf.AddPiece(hostKeys[pieceIndex], uint64(chunkIndex), uint64(pieceIndex), crypto.Hash{}); err != nil {
  1205  						b.Fatal(err)
  1206  					}
  1207  					atomic.AddUint64(&writes, 1)
  1208  					if atomic.LoadUint64(&writes) >= uint64(b.N) {
  1209  						return
  1210  					}
  1211  				}
  1212  			}
  1213  		}
  1214  	}
  1215  	// Spawn worker threads.
  1216  	var wg sync.WaitGroup
  1217  	for i := 0; i < numThreads; i++ {
  1218  		wg.Add(1)
  1219  		go func() {
  1220  			defer wg.Done()
  1221  			worker()
  1222  		}()
  1223  	}
  1224  	// Reset timer and start threads.
  1225  	b.ResetTimer()
  1226  	close(start)
  1227  	wg.Wait()
  1228  }
  1229  
  1230  // BenchmarkRandomChunkRead benchmarks reading pieces of a random chunks within
  1231  // a siafile.
  1232  func BenchmarkRandomChunkRead(b *testing.B) {
  1233  	// Get new file params
  1234  	siaFilePath, _, source, _, sk, _, _, fileMode := newTestFileParams(1, false)
  1235  	// Create the path to the file.
  1236  	dir, _ := filepath.Split(siaFilePath)
  1237  	if err := os.MkdirAll(dir, 0700); err != nil {
  1238  		b.Fatal(err)
  1239  	}
  1240  	// Create the file.
  1241  	wal, _ := newTestWAL()
  1242  	rc, err := NewRSSubCode(10, 20, crypto.SegmentSize)
  1243  	if err != nil {
  1244  		b.Fatal(err)
  1245  	}
  1246  	sf, err := New(siaFilePath, source, wal, rc, sk, 1, fileMode, nil, true) // 1 chunk file
  1247  	if err != nil {
  1248  		b.Fatal(err)
  1249  	}
  1250  	if err := sf.GrowNumChunks(10); err != nil { // Grow file to 10 chunks
  1251  		b.Fatal(err)
  1252  	}
  1253  	// Add pieces to chunks until every chunk has full redundancy.
  1254  	hostKeys := make([]types.SiaPublicKey, rc.NumPieces())
  1255  	for i := range hostKeys {
  1256  		fastrand.Read(hostKeys[i].Key)
  1257  	}
  1258  	for pieceIndex := 0; pieceIndex < rc.NumPieces(); pieceIndex++ {
  1259  		for chunkIndex := 0; chunkIndex < int(sf.NumChunks()); chunkIndex++ {
  1260  			if err := sf.AddPiece(hostKeys[pieceIndex], uint64(chunkIndex), uint64(pieceIndex), crypto.Hash{}); err != nil {
  1261  				b.Fatal(err)
  1262  			}
  1263  		}
  1264  	}
  1265  	// Read random pieces
  1266  	reads := 0
  1267  	chunkPerm := fastrand.Perm(int(sf.NumChunks()))
  1268  	b.ResetTimer()
  1269  	for reads < b.N {
  1270  		for _, chunkIndex := range chunkPerm {
  1271  			if _, err := sf.Pieces(uint64(chunkIndex)); err != nil {
  1272  				b.Fatal(err)
  1273  			}
  1274  			reads++
  1275  			if reads == b.N {
  1276  				return
  1277  			}
  1278  		}
  1279  	}
  1280  }