gitlab.com/SkynetLabs/skyd@v1.6.9/skymodules/renter/filesystem/siafile/siafile_test.go (about)

     1  package siafile
     2  
     3  import (
     4  	"fmt"
     5  	"math"
     6  	"os"
     7  	"path/filepath"
     8  	"reflect"
     9  	"sync"
    10  	"sync/atomic"
    11  	"testing"
    12  	"time"
    13  
    14  	"gitlab.com/NebulousLabs/errors"
    15  	"gitlab.com/NebulousLabs/fastrand"
    16  
    17  	"gitlab.com/SkynetLabs/skyd/skymodules"
    18  	"go.sia.tech/siad/crypto"
    19  	"go.sia.tech/siad/modules"
    20  	"go.sia.tech/siad/types"
    21  )
    22  
    23  // randomChunk is a helper method for testing that creates a random chunk.
    24  func randomChunk() chunk {
    25  	numPieces := 30
    26  	chunk := chunk{}
    27  	chunk.Pieces = make([][]piece, numPieces)
    28  	fastrand.Read(chunk.ExtensionInfo[:])
    29  
    30  	// Add 0-3 pieces for each pieceIndex within the file.
    31  	for pieceIndex := range chunk.Pieces {
    32  		n := fastrand.Intn(4) // [0;3]
    33  		// Create and add n pieces at pieceIndex i.
    34  		for i := 0; i < n; i++ {
    35  			var piece piece
    36  			piece.HostTableOffset = uint32(fastrand.Intn(100))
    37  			fastrand.Read(piece.MerkleRoot[:])
    38  			chunk.Pieces[pieceIndex] = append(chunk.Pieces[pieceIndex], piece)
    39  		}
    40  	}
    41  	return chunk
    42  }
    43  
    44  // randomPiece is a helper method for testing that creates a random piece.
    45  func randomPiece() piece {
    46  	var piece piece
    47  	piece.HostTableOffset = uint32(fastrand.Intn(100))
    48  	fastrand.Read(piece.MerkleRoot[:])
    49  	return piece
    50  }
    51  
    52  // TestFileNumChunks checks the numChunks method of the file type.
    53  func TestFileNumChunks(t *testing.T) {
    54  	if testing.Short() {
    55  		t.SkipNow()
    56  	}
    57  	t.Parallel()
    58  
    59  	fileSize := func(numSectors uint64) uint64 {
    60  		return numSectors*modules.SectorSize + uint64(fastrand.Intn(int(modules.SectorSize)))
    61  	}
    62  	// Since the pieceSize is 'random' now we test a variety of random inputs.
    63  	tests := []struct {
    64  		fileSize   uint64
    65  		dataPieces int
    66  	}{
    67  		{fileSize(10), 10},
    68  		{fileSize(50), 10},
    69  		{fileSize(100), 10},
    70  
    71  		{fileSize(11), 10},
    72  		{fileSize(51), 10},
    73  		{fileSize(101), 10},
    74  
    75  		{fileSize(10), 100},
    76  		{fileSize(50), 100},
    77  		{fileSize(100), 100},
    78  
    79  		{fileSize(11), 100},
    80  		{fileSize(51), 100},
    81  		{fileSize(101), 100},
    82  
    83  		{0, 10}, // 0-length
    84  	}
    85  
    86  	for _, test := range tests {
    87  		// Create erasure-coder
    88  		rsc, _ := skymodules.NewRSCode(test.dataPieces, 1) // can't use 0
    89  		// Create the file
    90  		siaFilePath, _, source, _, sk, _, _, fileMode := newTestFileParams(1, true)
    91  		f, _, _ := customTestFileAndWAL(siaFilePath, source, rsc, sk, test.fileSize, -1, fileMode)
    92  		// Make sure the file reports the correct pieceSize.
    93  		if f.PieceSize() != modules.SectorSize-f.MasterKey().Type().Overhead() {
    94  			t.Fatal("file has wrong pieceSize for its encryption type")
    95  		}
    96  		// Check that the number of chunks matches the expected number.
    97  		expectedNumChunks := test.fileSize / (f.PieceSize() * uint64(test.dataPieces))
    98  		if expectedNumChunks == 0 && test.fileSize > 0 {
    99  			// There is at least 1 chunk for non 0-byte files.
   100  			expectedNumChunks = 1
   101  		} else if expectedNumChunks%(f.PieceSize()*uint64(test.dataPieces)) != 0 {
   102  			// If it doesn't divide evenly there will be 1 chunk padding.
   103  			expectedNumChunks++
   104  		}
   105  		if f.NumChunks() != expectedNumChunks {
   106  			t.Errorf("Test %v: expected %v, got %v", test, expectedNumChunks, f.NumChunks())
   107  		}
   108  		if err := ensureMetadataValid(f.Metadata()); err != nil {
   109  			t.Fatal(err)
   110  		}
   111  	}
   112  }
   113  
   114  // TestFileRedundancy tests that redundancy is correctly calculated for files
   115  // with varying number of filecontracts and erasure code settings.
   116  func TestFileRedundancy(t *testing.T) {
   117  	if testing.Short() {
   118  		t.SkipNow()
   119  	}
   120  	nDatas := []int{1, 2, 10}
   121  	neverOffline := make(map[string]bool)
   122  	goodForRenew := make(map[string]bool)
   123  	for i := 0; i < 6; i++ {
   124  		pk := types.SiaPublicKey{Key: []byte{byte(i)}}
   125  		neverOffline[pk.String()] = false
   126  		goodForRenew[pk.String()] = true
   127  	}
   128  	// Create a testDir.
   129  	dir := filepath.Join(os.TempDir(), t.Name())
   130  	if err := os.RemoveAll(dir); err != nil {
   131  		t.Fatal(err)
   132  	}
   133  	if err := os.MkdirAll(dir, 0600); err != nil {
   134  		t.Fatal(err)
   135  	}
   136  
   137  	for _, nData := range nDatas {
   138  		rsc, _ := skymodules.NewRSCode(nData, 10)
   139  		siaFilePath, _, source, _, sk, fileSize, numChunks, fileMode := newTestFileParamsWithRC(2, false, rsc)
   140  		f, _, _ := customTestFileAndWAL(siaFilePath, source, rsc, sk, fileSize, numChunks, fileMode)
   141  		// Test that an empty file has 0 redundancy.
   142  		r, ur, err := f.Redundancy(neverOffline, goodForRenew)
   143  		if err != nil {
   144  			t.Fatal(err)
   145  		}
   146  		if r != 0 || ur != 0 {
   147  			t.Error("expected 0 and 0 redundancy, got", r, ur)
   148  		}
   149  		// Test that a file with 1 host that has a piece for every chunk but
   150  		// one chunk still has a redundancy of 0.
   151  		for i := uint64(0); i < f.NumChunks()-1; i++ {
   152  			err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(0)}}, i, 0, crypto.Hash{})
   153  			if err != nil {
   154  				t.Fatal(err)
   155  			}
   156  		}
   157  		r, ur, err = f.Redundancy(neverOffline, goodForRenew)
   158  		if err != nil {
   159  			t.Fatal(err)
   160  		}
   161  		if r != 0 || ur != 0 {
   162  			t.Error("expected 0 and 0 redundancy, got", r, ur)
   163  		}
   164  		// Test that adding another host with a piece for every chunk but one
   165  		// chunk still results in a file with redundancy 0.
   166  		for i := uint64(0); i < f.NumChunks()-1; i++ {
   167  			err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(1)}}, i, 1, crypto.Hash{})
   168  			if err != nil {
   169  				t.Fatal(err)
   170  			}
   171  		}
   172  		r, ur, err = f.Redundancy(neverOffline, goodForRenew)
   173  		if err != nil {
   174  			t.Fatal(err)
   175  		}
   176  		if r != 0 || ur != 0 {
   177  			t.Error("expected 0 and 0 redundancy, got", r, ur)
   178  		}
   179  		// Test that adding a file contract with a piece for the missing chunk
   180  		// results in a file with redundancy > 0 && <= 1.
   181  		err = f.AddPiece(types.SiaPublicKey{Key: []byte{byte(2)}}, f.NumChunks()-1, 0, crypto.Hash{})
   182  		if err != nil {
   183  			t.Fatal(err)
   184  		}
   185  		// 1.0 / MinPieces because the chunk with the least number of pieces has 1 piece.
   186  		expectedR := 1.0 / float64(f.ErasureCode().MinPieces())
   187  		r, ur, err = f.Redundancy(neverOffline, goodForRenew)
   188  		if err != nil {
   189  			t.Fatal(err)
   190  		}
   191  		if r != expectedR || ur != expectedR {
   192  			t.Errorf("expected %f redundancy, got %f %f", expectedR, r, ur)
   193  		}
   194  		// Test that adding a file contract that has erasureCode.MinPieces() pieces
   195  		// per chunk for all chunks results in a file with redundancy > 1.
   196  		for iChunk := uint64(0); iChunk < f.NumChunks(); iChunk++ {
   197  			for iPiece := uint64(1); iPiece < uint64(f.ErasureCode().MinPieces()); iPiece++ {
   198  				err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(3)}}, iChunk, iPiece, crypto.Hash{})
   199  				if err != nil {
   200  					t.Fatal(err)
   201  				}
   202  			}
   203  			err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(4)}}, iChunk, uint64(f.ErasureCode().MinPieces()), crypto.Hash{})
   204  			if err != nil {
   205  				t.Fatal(err)
   206  			}
   207  		}
   208  		// 1+MinPieces / MinPieces because the chunk with the least number of pieces has 1+MinPieces pieces.
   209  		expectedR = float64(1+f.ErasureCode().MinPieces()) / float64(f.ErasureCode().MinPieces())
   210  		r, ur, err = f.Redundancy(neverOffline, goodForRenew)
   211  		if err != nil {
   212  			t.Fatal(err)
   213  		}
   214  		if r != expectedR || ur != expectedR {
   215  			t.Errorf("expected %f redundancy, got %f", expectedR, r)
   216  		}
   217  
   218  		// verify offline file contracts are not counted in the redundancy
   219  		for iChunk := uint64(0); iChunk < f.NumChunks(); iChunk++ {
   220  			for iPiece := uint64(0); iPiece < uint64(f.ErasureCode().MinPieces()); iPiece++ {
   221  				err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(5)}}, iChunk, iPiece, crypto.Hash{})
   222  				if err != nil {
   223  					t.Fatal(err)
   224  				}
   225  			}
   226  		}
   227  		specificOffline := make(map[string]bool)
   228  		for pk := range goodForRenew {
   229  			specificOffline[pk] = false
   230  		}
   231  		specificOffline[string(byte(5))] = true
   232  		r, ur, err = f.Redundancy(specificOffline, goodForRenew)
   233  		if err != nil {
   234  			t.Fatal(err)
   235  		}
   236  		if r != expectedR || ur != expectedR {
   237  			t.Errorf("expected redundancy to ignore offline file contracts, wanted %f got %f", expectedR, r)
   238  		}
   239  		if err := ensureMetadataValid(f.Metadata()); err != nil {
   240  			t.Fatal(err)
   241  		}
   242  	}
   243  }
   244  
   245  // TestFileHealth tests that the health of the file is correctly calculated.
   246  //
   247  // Health is equal to (targetParityPieces - actualParityPieces)/targetParityPieces
   248  func TestFileHealth(t *testing.T) {
   249  	if testing.Short() {
   250  		t.SkipNow()
   251  	}
   252  	t.Parallel()
   253  
   254  	// Create a Zero byte file
   255  	rsc, _ := skymodules.NewRSCode(10, 20)
   256  	siaFilePath, _, source, _, sk, _, _, fileMode := newTestFileParams(1, true)
   257  	zeroFile, _, _ := customTestFileAndWAL(siaFilePath, source, rsc, sk, 0, 0, fileMode)
   258  
   259  	// Create offline map
   260  	offlineMap := make(map[string]bool)
   261  	goodForRenewMap := make(map[string]bool)
   262  
   263  	// Confirm the health is correct
   264  	health, stuckHealth, userHealth, userStuckHealth, numStuckChunks, repairBytes, stuckBytes := zeroFile.Health(offlineMap, goodForRenewMap)
   265  	if health != 0 {
   266  		t.Error("Expected health to be 0 but was", health)
   267  	}
   268  	if stuckHealth != 0 {
   269  		t.Error("Expected stuck health to be 0 but was", stuckHealth)
   270  	}
   271  	if userHealth != 0 {
   272  		t.Error("Expected userHealth to be 0 but was", health)
   273  	}
   274  	if userStuckHealth != 0 {
   275  		t.Error("Expected user stuck health to be 0 but was", stuckHealth)
   276  	}
   277  	if numStuckChunks != 0 {
   278  		t.Error("Expected no stuck chunks but found", numStuckChunks)
   279  	}
   280  	if repairBytes != 0 {
   281  		t.Errorf("Repair Bytes of file not as expected, got %v expected %v", repairBytes, 0)
   282  	}
   283  	if stuckBytes != 0 {
   284  		t.Errorf("Stuck Bytes of file not as expected, got %v expected %v", stuckBytes, 0)
   285  	}
   286  
   287  	// Create File with 1 chunk
   288  	siaFilePath, _, source, _, sk, _, _, fileMode = newTestFileParams(1, true)
   289  	f, _, _ := customTestFileAndWAL(siaFilePath, source, rsc, sk, 100, 1, fileMode)
   290  
   291  	// Check file health, since there are no pieces in the chunk yet no good
   292  	// pieces will be found resulting in a health of 1.5 with the erasure code
   293  	// settings of 10/30. Since there are no stuck chunks the stuckHealth of the
   294  	// file should be 0
   295  	//
   296  	// 1 - ((0 - 10) / 20)
   297  	health, stuckHealth, _, _, _, repairBytes, stuckBytes = f.Health(offlineMap, goodForRenewMap)
   298  	if health != 1.5 {
   299  		t.Errorf("Health of file not as expected, got %v expected 1.5", health)
   300  	}
   301  	if stuckHealth != float64(0) {
   302  		t.Errorf("Stuck Health of file not as expected, got %v expected 0", stuckHealth)
   303  	}
   304  	expected := uint64(rsc.NumPieces()) * modules.SectorSize
   305  	if repairBytes != expected {
   306  		t.Errorf("Repair Bytes of file not as expected, got %v expected %v", repairBytes, expected)
   307  	}
   308  	if stuckBytes != 0 {
   309  		t.Errorf("Stuck Bytes of file not as expected, got %v expected %v", stuckBytes, 0)
   310  	}
   311  
   312  	for i := 0; i < 2; i++ {
   313  		spk := types.SiaPublicKey{Algorithm: types.SignatureEd25519, Key: []byte{byte(i)}}
   314  		offlineMap[spk.String()] = false
   315  		goodForRenewMap[spk.String()] = true
   316  		if err := f.AddPiece(spk, 0, 0, crypto.Hash{}); err != nil {
   317  			t.Fatal(err)
   318  		}
   319  	}
   320  
   321  	// Check health, even though two pieces were added the health should be 1.45
   322  	// since the two good pieces were added to the same pieceSet
   323  	//
   324  	// 1 - ((1 - 10) / 20)
   325  	health, _, _, _, _, repairBytes, stuckBytes = f.Health(offlineMap, goodForRenewMap)
   326  	if health != 1.45 {
   327  		t.Fatalf("Health of file not as expected, got %v expected 1.45", health)
   328  	}
   329  	expected = uint64(rsc.NumPieces()-1) * modules.SectorSize
   330  	if repairBytes != expected {
   331  		t.Errorf("Repair Bytes of file not as expected, got %v expected %v", repairBytes, expected)
   332  	}
   333  	if stuckBytes != 0 {
   334  		t.Errorf("Stuck Bytes of file not as expected, got %v expected %v", stuckBytes, 0)
   335  	}
   336  
   337  	// Add one good pieces to second piece set, confirm health is now 1.40.
   338  	spk := types.SiaPublicKey{Algorithm: types.SignatureEd25519, Key: []byte{0}}
   339  	offlineMap[spk.String()] = false
   340  	goodForRenewMap[spk.String()] = true
   341  	if err := f.AddPiece(spk, 0, 1, crypto.Hash{}); err != nil {
   342  		t.Fatal(err)
   343  	}
   344  	health, _, _, _, _, repairBytes, stuckBytes = f.Health(offlineMap, goodForRenewMap)
   345  	if health != 1.40 {
   346  		t.Fatalf("Health of file not as expected, got %v expected 1.40", health)
   347  	}
   348  	expected = uint64(rsc.NumPieces()-2) * modules.SectorSize
   349  	if repairBytes != expected {
   350  		t.Errorf("Repair Bytes of file not as expected, got %v expected %v", repairBytes, expected)
   351  	}
   352  	if stuckBytes != 0 {
   353  		t.Errorf("Stuck Bytes of file not as expected, got %v expected %v", stuckBytes, 0)
   354  	}
   355  
   356  	// Add another good pieces to second piece set, confirm health is still 1.40.
   357  	spk = types.SiaPublicKey{Algorithm: types.SignatureEd25519, Key: []byte{1}}
   358  	offlineMap[spk.String()] = false
   359  	goodForRenewMap[spk.String()] = true
   360  	if err := f.AddPiece(spk, 0, 1, crypto.Hash{}); err != nil {
   361  		t.Fatal(err)
   362  	}
   363  	health, _, _, _, _, repairBytes, stuckBytes = f.Health(offlineMap, goodForRenewMap)
   364  	if health != 1.40 {
   365  		t.Fatalf("Health of file not as expected, got %v expected 1.40", health)
   366  	}
   367  	expected = uint64(rsc.NumPieces()-2) * modules.SectorSize
   368  	if repairBytes != expected {
   369  		t.Errorf("Repair Bytes of file not as expected, got %v expected %v", repairBytes, expected)
   370  	}
   371  	if stuckBytes != 0 {
   372  		t.Errorf("Stuck Bytes of file not as expected, got %v expected %v", stuckBytes, 0)
   373  	}
   374  
   375  	// Mark chunk as stuck
   376  	err := f.SetStuck(0, true)
   377  	if err != nil {
   378  		t.Fatal(err)
   379  	}
   380  	health, stuckHealth, _, _, numStuckChunks, repairBytes, stuckBytes = f.Health(offlineMap, goodForRenewMap)
   381  	// Health should now be 0 since there are no unstuck chunks
   382  	if health != 0 {
   383  		t.Fatalf("Health of file not as expected, got %v expected 0", health)
   384  	}
   385  	// Stuck Health should now be 1.4
   386  	if stuckHealth != 1.40 {
   387  		t.Fatalf("Stuck Health of file not as expected, got %v expected 1.40", stuckHealth)
   388  	}
   389  	// There should be 1 stuck chunk
   390  	if numStuckChunks != 1 {
   391  		t.Fatalf("Expected 1 stuck chunk but found %v", numStuckChunks)
   392  	}
   393  	expected = uint64(rsc.NumPieces()-2) * modules.SectorSize
   394  	if repairBytes != 0 {
   395  		t.Errorf("Repair Bytes of file not as expected, got %v expected %v", repairBytes, 0)
   396  	}
   397  	if stuckBytes != expected {
   398  		t.Errorf("Stuck Bytes of file not as expected, got %v expected %v", stuckBytes, expected)
   399  	}
   400  
   401  	// Add good pieces until the file health is below the RepairThreshold
   402  	thresholdPieces := rsc.NumPieces() - 1
   403  	for i := 0; i < thresholdPieces; i++ {
   404  		spk := types.SiaPublicKey{Algorithm: types.SignatureEd25519, Key: []byte{byte(i)}}
   405  		offlineMap[spk.String()] = false
   406  		goodForRenewMap[spk.String()] = true
   407  		if err := f.AddPiece(spk, 0, uint64(i), crypto.Hash{}); err != nil {
   408  			t.Fatal(err)
   409  		}
   410  	}
   411  
   412  	// Health should still be 0 since there are no unstuck chunks
   413  	health, stuckHealth, _, _, numStuckChunks, repairBytes, stuckBytes = f.Health(offlineMap, goodForRenewMap)
   414  	if health != 0 {
   415  		t.Fatalf("Health of file not as expected, got %v expected 0", health)
   416  	}
   417  	// Stuck Health should now be 0.05
   418  	if stuckHealth != 0.05 {
   419  		t.Fatalf("Stuck Health of file not as expected, got %v expected 0.05", stuckHealth)
   420  	}
   421  	// There should be 1 stuck chunk
   422  	if numStuckChunks != 1 {
   423  		t.Fatalf("Expected 1 stuck chunk but found %v", numStuckChunks)
   424  	}
   425  	// There should be no repair bytes
   426  	if repairBytes != 0 {
   427  		t.Errorf("Repair Bytes of file not as expected, got %v expected %v", repairBytes, 0)
   428  	}
   429  	// There should be stuck bytes
   430  	expected = uint64(rsc.NumPieces()-thresholdPieces) * modules.SectorSize
   431  	if stuckBytes != expected {
   432  		t.Errorf("Stuck Bytes of file not as expected, got %v expected %v", stuckBytes, expected)
   433  	}
   434  
   435  	// Mark as not stuck
   436  	err = f.SetStuck(0, false)
   437  	if err != nil {
   438  		t.Fatal(err)
   439  	}
   440  
   441  	// Health should still be 0.05 now
   442  	health, stuckHealth, _, _, numStuckChunks, repairBytes, stuckBytes = f.Health(offlineMap, goodForRenewMap)
   443  	if health != 0.05 {
   444  		t.Fatalf("Health of file not as expected, got %v expected 0.05", health)
   445  	}
   446  	// Stuck Health should now be 0
   447  	if stuckHealth != 0 {
   448  		t.Fatalf("Stuck Health of file not as expected, got %v expected 0", stuckHealth)
   449  	}
   450  	// There should be 0 stuck chunks
   451  	if numStuckChunks != 0 {
   452  		t.Fatalf("Expected 0 stuck chunks but found %v", numStuckChunks)
   453  	}
   454  	// There should be no repair bytes
   455  	if repairBytes != 0 {
   456  		t.Errorf("Repair Bytes of file not as expected, got %v expected %v", repairBytes, 0)
   457  	}
   458  	// There should be no stuck bytes
   459  	if stuckBytes != 0 {
   460  		t.Errorf("Stuck Bytes of file not as expected, got %v expected %v", stuckBytes, 0)
   461  	}
   462  
   463  	// Create File with 2 chunks
   464  	siaFilePath, _, source, _, sk, _, _, fileMode = newTestFileParams(1, true)
   465  	f, _, _ = customTestFileAndWAL(siaFilePath, source, rsc, sk, 5e4, 2, fileMode)
   466  	if err != nil {
   467  		t.Fatal(err)
   468  	}
   469  
   470  	// Create offline map
   471  	offlineMap = make(map[string]bool)
   472  	goodForRenewMap = make(map[string]bool)
   473  
   474  	// Check file health, since there are no pieces in the chunk yet no good
   475  	// pieces will be found resulting in a health of 1.5
   476  	health, _, _, _, _, repairBytes, stuckBytes = f.Health(offlineMap, goodForRenewMap)
   477  	if health != 1.5 {
   478  		t.Fatalf("Health of file not as expected, got %v expected 1.5", health)
   479  	}
   480  	firstRepair := uint64(rsc.NumPieces()) * modules.SectorSize
   481  	secondRepair := uint64(rsc.NumPieces()) * modules.SectorSize
   482  	expected = firstRepair + secondRepair
   483  	if repairBytes != expected {
   484  		t.Errorf("Repair Bytes of file not as expected, got %v expected %v", repairBytes, expected)
   485  	}
   486  	if stuckBytes != 0 {
   487  		t.Errorf("Stuck Bytes of file not as expected, got %v expected %v", stuckBytes, 0)
   488  	}
   489  
   490  	// Add good pieces to the first chunk
   491  	for i := 0; i < 4; i++ {
   492  		spk := types.SiaPublicKey{Algorithm: types.SignatureEd25519, Key: []byte{byte(i)}}
   493  		offlineMap[spk.String()] = false
   494  		goodForRenewMap[spk.String()] = true
   495  		if err := f.AddPiece(spk, 0, uint64(i%2), crypto.Hash{}); err != nil {
   496  			t.Fatal(err)
   497  		}
   498  	}
   499  
   500  	// Check health, should still be 1.5 because other chunk doesn't have any
   501  	// good pieces
   502  	health, stuckHealth, _, _, _, repairBytes, stuckBytes = f.Health(offlineMap, goodForRenewMap)
   503  	if health != 1.5 {
   504  		t.Fatalf("Health of file not as expected, got %v expected 1.5", health)
   505  	}
   506  	firstRepair = uint64(rsc.NumPieces()-2) * modules.SectorSize
   507  	secondRepair = uint64(rsc.NumPieces()) * modules.SectorSize
   508  	expected = firstRepair + secondRepair
   509  	if repairBytes != expected {
   510  		t.Errorf("Repair Bytes of file not as expected, got %v expected %v", repairBytes, expected)
   511  	}
   512  	if stuckBytes != 0 {
   513  		t.Errorf("Stuck Bytes of file not as expected, got %v expected %v", stuckBytes, 0)
   514  	}
   515  
   516  	for i := 0; i < 4; i++ {
   517  		spk := types.SiaPublicKey{Algorithm: types.SignatureEd25519, Key: []byte{byte(i)}}
   518  		offlineMap[spk.String()] = false
   519  		goodForRenewMap[spk.String()] = true
   520  		if err := f.AddPiece(spk, 1, uint64(i%2), crypto.Hash{}); err != nil {
   521  			t.Fatal(err)
   522  		}
   523  	}
   524  	health, _, _, _, _, repairBytes, stuckBytes = f.Health(offlineMap, goodForRenewMap)
   525  	if health != 1.40 {
   526  		t.Fatalf("Health of file not as expected, got %v expected 1.40", health)
   527  	}
   528  	firstRepair = uint64(rsc.NumPieces()-2) * modules.SectorSize
   529  	secondRepair = uint64(rsc.NumPieces()-2) * modules.SectorSize
   530  	expected = firstRepair + secondRepair
   531  	if repairBytes != expected {
   532  		t.Errorf("Repair Bytes of file not as expected, got %v expected %v", repairBytes, expected)
   533  	}
   534  	if stuckBytes != 0 {
   535  		t.Errorf("Stuck Bytes of file not as expected, got %v expected %v", stuckBytes, 0)
   536  	}
   537  
   538  	// Mark second chunk as stuck
   539  	err = f.SetStuck(1, true)
   540  	if err != nil {
   541  		t.Fatal(err)
   542  	}
   543  	health, stuckHealth, _, _, numStuckChunks, repairBytes, stuckBytes = f.Health(offlineMap, goodForRenewMap)
   544  	// Since both chunks have the same health, the file health and the file stuck health should be the same
   545  	if health != 1.40 {
   546  		t.Fatalf("Health of file not as expected, got %v expected 1.40", health)
   547  	}
   548  	if stuckHealth != 1.40 {
   549  		t.Fatalf("Stuck Health of file not as expected, got %v expected 1.4", stuckHealth)
   550  	}
   551  	// Check health, verify there is 1 stuck chunk
   552  	if numStuckChunks != 1 {
   553  		t.Fatalf("Expected 1 stuck chunk but found %v", numStuckChunks)
   554  	}
   555  	if err := ensureMetadataValid(f.Metadata()); err != nil {
   556  		t.Fatal(err)
   557  	}
   558  	firstRepair = uint64(rsc.NumPieces()-2) * modules.SectorSize
   559  	secondRepair = uint64(rsc.NumPieces()-2) * modules.SectorSize
   560  	if repairBytes != firstRepair {
   561  		t.Errorf("Repair Bytes of file not as expected, got %v expected %v", repairBytes, firstRepair)
   562  	}
   563  	if stuckBytes != secondRepair {
   564  		t.Errorf("Stuck Bytes of file not as expected, got %v expected %v", stuckBytes, secondRepair)
   565  	}
   566  }
   567  
   568  // TestShrink is a unit test for the Shrink method.
   569  func TestShrink(t *testing.T) {
   570  	if testing.Short() {
   571  		t.SkipNow()
   572  	}
   573  	t.Parallel()
   574  
   575  	// Declare a check method.
   576  	checkFile := func(sf *SiaFile, numChunks, size, diskSize uint64) error {
   577  		if numChunks != sf.NumChunks() {
   578  			return fmt.Errorf("Expected %v chunks but was %v", numChunks, sf.NumChunks())
   579  		}
   580  		if size != sf.Size() {
   581  			return fmt.Errorf("Expected size to be %v but was %v", size, sf.Size())
   582  		}
   583  		fi, err := os.Stat(sf.siaFilePath)
   584  		if err != nil {
   585  			return err
   586  		}
   587  		if fi.Size() != int64(diskSize) {
   588  			return fmt.Errorf("Expected diskSize to be %v but was %v", diskSize, fi.Size())
   589  		}
   590  		return nil
   591  	}
   592  
   593  	// Create a siafile with 3 chunks.
   594  	siaFilePath, _, source, rc, sk, _, _, fileMode := newTestFileParams(1, false)
   595  	numChunks := 3
   596  	chunkSize := skymodules.ChunkSize(crypto.TypeDefaultRenter, uint64(rc.MinPieces()))
   597  	fileSize := chunkSize*uint64(numChunks) - 1 // last chunk is partial
   598  	sf, _, _ := customTestFileAndWAL(siaFilePath, source, rc, sk, fileSize, numChunks, fileMode)
   599  
   600  	// Check initial file. The initial size is 3 full pages + a marshaled
   601  	// piece. One page for the metadata, 2 for the first 2 chunks and the
   602  	// empty chunk at the end.
   603  	if err := checkFile(sf, 3, fileSize, 3*pageSize+uint64(marshaledChunkSize(0))); err != nil {
   604  		t.Fatal(err)
   605  	}
   606  
   607  	// Shrink to 2 chunks.
   608  	err := sf.Shrink(2)
   609  	if err != nil {
   610  		t.Fatal(err)
   611  	}
   612  	if err := checkFile(sf, 2, 2*chunkSize, 3*pageSize); err != nil {
   613  		t.Fatal(err)
   614  	}
   615  
   616  	// Shrink to 0 chunks.
   617  	err = sf.Shrink(0)
   618  	if err != nil {
   619  		t.Fatal(err)
   620  	}
   621  	if err := checkFile(sf, 0, 0, pageSize); err != nil {
   622  		t.Fatal(err)
   623  	}
   624  
   625  	// Shrink to 1 chunk. Should fail.
   626  	err = sf.Shrink(1)
   627  	if !errors.Contains(err, errShrinkWithTooManyChunks) {
   628  		t.Fatal("growing the file should fail", err)
   629  	}
   630  	if err := checkFile(sf, 0, 0, pageSize); err != nil {
   631  		t.Fatal(err)
   632  	}
   633  }
   634  
   635  // TestGrowNumChunks is a unit test for the SiaFile's GrowNumChunks method.
   636  func TestGrowNumChunks(t *testing.T) {
   637  	if testing.Short() {
   638  		t.SkipNow()
   639  	}
   640  	t.Parallel()
   641  
   642  	// Create a blank file.
   643  	siaFilePath, _, source, rc, sk, fileSize, numChunks, fileMode := newTestFileParams(1, false)
   644  	sf, wal, _ := customTestFileAndWAL(siaFilePath, source, rc, sk, fileSize, numChunks, fileMode)
   645  	expectedChunks := sf.NumChunks()
   646  	expectedSize := sf.Size()
   647  
   648  	// Declare a check method.
   649  	checkFile := func(sf *SiaFile, numChunks, size uint64) {
   650  		if numChunks != sf.NumChunks() {
   651  			t.Fatalf("Expected %v chunks but was %v", numChunks, sf.NumChunks())
   652  		}
   653  		if size != sf.Size() {
   654  			t.Fatalf("Expected size to be %v but was %v", size, sf.Size())
   655  		}
   656  	}
   657  
   658  	// Increase the size of the file by 1 chunk.
   659  	expectedChunks++
   660  	expectedSize += sf.ChunkSize()
   661  	err := sf.GrowNumChunks(expectedChunks)
   662  	if err != nil {
   663  		t.Fatal(err)
   664  	}
   665  	// Check the file after growing the chunks.
   666  	checkFile(sf, expectedChunks, expectedSize)
   667  	// Load the file from disk again to also check that persistence works.
   668  	sf, err = LoadSiaFile(sf.siaFilePath, wal)
   669  	if err != nil {
   670  		t.Fatal(err)
   671  	}
   672  	// Check that size and chunks still match.
   673  	checkFile(sf, expectedChunks, expectedSize)
   674  
   675  	// Call GrowNumChunks with the same argument again. This should be a no-op.
   676  	err = sf.GrowNumChunks(expectedChunks)
   677  	if err != nil {
   678  		t.Fatal(err)
   679  	}
   680  	// Check the file after growing the chunks.
   681  	checkFile(sf, expectedChunks, expectedSize)
   682  	// Load the file from disk again to also check that no wrong persistence
   683  	// happened.
   684  	sf, err = LoadSiaFile(sf.siaFilePath, wal)
   685  	if err != nil {
   686  		t.Fatal(err)
   687  	}
   688  	// Check that size and chunks still match.
   689  	checkFile(sf, expectedChunks, expectedSize)
   690  
   691  	// Grow the file by 2 chunks to see if multiple chunks also work.
   692  	expectedChunks += 2
   693  	expectedSize += 2 * sf.ChunkSize()
   694  	err = sf.GrowNumChunks(expectedChunks)
   695  	if err != nil {
   696  		t.Fatal(err)
   697  	}
   698  	// Check the file after growing the chunks.
   699  	checkFile(sf, expectedChunks, expectedSize)
   700  	// Load the file from disk again to also check that persistence works.
   701  	sf, err = LoadSiaFile(sf.siaFilePath, wal)
   702  	if err != nil {
   703  		t.Fatal(err)
   704  	}
   705  	// Check that size and chunks still match.
   706  	checkFile(sf, expectedChunks, expectedSize)
   707  	if err := ensureMetadataValid(sf.Metadata()); err != nil {
   708  		t.Fatal(err)
   709  	}
   710  }
   711  
   712  // TestPruneHosts is a unit test for the pruneHosts method.
   713  func TestPruneHosts(t *testing.T) {
   714  	if testing.Short() {
   715  		t.SkipNow()
   716  	}
   717  	t.Parallel()
   718  
   719  	// Create siafile.
   720  	siaFilePath, _, source, rc, sk, fileSize, numChunks, fileMode := newTestFileParams(1, false)
   721  	sf, _, _ := customTestFileAndWAL(siaFilePath, source, rc, sk, fileSize, numChunks, fileMode)
   722  
   723  	// Add 3 random hostkeys to the file.
   724  	sf.addRandomHostKeys(3)
   725  
   726  	// Save changes to disk.
   727  	updates, err := sf.saveHeaderUpdates()
   728  	if err != nil {
   729  		t.Fatal(err)
   730  	}
   731  	if err := sf.createAndApplyTransaction(updates...); err != nil {
   732  		t.Fatal(err)
   733  	}
   734  
   735  	// Add one piece for every host to every pieceSet.
   736  	for _, hk := range sf.HostPublicKeys() {
   737  		err := sf.iterateChunksReadonly(func(chunk chunk) error {
   738  			for pieceIndex := range chunk.Pieces {
   739  				if err := sf.AddPiece(hk, uint64(chunk.Index), uint64(pieceIndex), crypto.Hash{}); err != nil {
   740  					t.Fatal(err)
   741  				}
   742  			}
   743  			return nil
   744  		})
   745  		if err != nil {
   746  			t.Fatal(err)
   747  		}
   748  	}
   749  
   750  	// Mark hostkeys 0 and 2 as unused.
   751  	sf.pubKeyTable[0].Used = false
   752  	sf.pubKeyTable[2].Used = false
   753  	remainingKey := sf.pubKeyTable[1]
   754  
   755  	// Prune the file.
   756  	updates, err = sf.pruneHosts(0)
   757  	if err != nil {
   758  		t.Fatal(err)
   759  	}
   760  	if err := sf.createAndApplyTransaction(updates...); err != nil {
   761  		t.Fatal(err)
   762  	}
   763  
   764  	// Check that there is only a single key left.
   765  	if len(sf.pubKeyTable) != 1 {
   766  		t.Fatalf("There should only be 1 key left but was %v", len(sf.pubKeyTable))
   767  	}
   768  	// The last key should be the correct one.
   769  	if !reflect.DeepEqual(remainingKey, sf.pubKeyTable[0]) {
   770  		t.Fatal("Remaining key doesn't match")
   771  	}
   772  	// Loop over all the pieces and make sure that the pieces with missing
   773  	// hosts were pruned and that the remaining pieces have the correct offset
   774  	// now.
   775  	err = sf.iterateChunksReadonly(func(chunk chunk) error {
   776  		for _, pieceSet := range chunk.Pieces {
   777  			if len(pieceSet) != 1 {
   778  				t.Fatalf("Expected 1 piece in the set but was %v", len(pieceSet))
   779  			}
   780  			// The HostTableOffset should always be 0 since the keys at index 0
   781  			// and 2 were pruned which means that index 1 is now index 0.
   782  			for _, piece := range pieceSet {
   783  				if piece.HostTableOffset != 0 {
   784  					t.Fatalf("HostTableOffset should be 0 but was %v", piece.HostTableOffset)
   785  				}
   786  			}
   787  		}
   788  		return nil
   789  	})
   790  	if err != nil {
   791  		t.Fatal(err)
   792  	}
   793  	if err := ensureMetadataValid(sf.Metadata()); err != nil {
   794  		t.Fatal(err)
   795  	}
   796  }
   797  
   798  // TestPruneHosts is a unit test for the pruneHosts method with an input >0.
   799  func TestPruneHostsWithMaxUnused(t *testing.T) {
   800  	if testing.Short() {
   801  		t.SkipNow()
   802  	}
   803  	t.Parallel()
   804  
   805  	// Create siafile.
   806  	siaFilePath, _, source, rc, sk, fileSize, numChunks, fileMode := newTestFileParams(1, false)
   807  	sf, _, _ := customTestFileAndWAL(siaFilePath, source, rc, sk, fileSize, numChunks, fileMode)
   808  
   809  	// Add 3 random hostkeys to the file.
   810  	sf.addRandomHostKeys(3)
   811  
   812  	// Save changes to disk.
   813  	updates, err := sf.saveHeaderUpdates()
   814  	if err != nil {
   815  		t.Fatal(err)
   816  	}
   817  	if err := sf.createAndApplyTransaction(updates...); err != nil {
   818  		t.Fatal(err)
   819  	}
   820  
   821  	// Add one piece for every host to every pieceSet.
   822  	for _, hk := range sf.HostPublicKeys() {
   823  		err := sf.iterateChunksReadonly(func(chunk chunk) error {
   824  			for pieceIndex := range chunk.Pieces {
   825  				if err := sf.AddPiece(hk, uint64(chunk.Index), uint64(pieceIndex), crypto.Hash{}); err != nil {
   826  					t.Fatal(err)
   827  				}
   828  			}
   829  			return nil
   830  		})
   831  		if err != nil {
   832  			t.Fatal(err)
   833  		}
   834  	}
   835  
   836  	// Mark hostkeys 0 and 2 as unused.
   837  	sf.pubKeyTable[0].Used = false
   838  	sf.pubKeyTable[2].Used = false
   839  	firstKey := sf.pubKeyTable[0]
   840  	usedKey := sf.pubKeyTable[1]
   841  	thirdKey := sf.pubKeyTable[2]
   842  
   843  	// Prune the file.
   844  	updates, err = sf.pruneHosts(1)
   845  	if err != nil {
   846  		t.Fatal(err)
   847  	}
   848  	if err := sf.createAndApplyTransaction(updates...); err != nil {
   849  		t.Fatal(err)
   850  	}
   851  
   852  	// Check that there 2 keys left.
   853  	if len(sf.pubKeyTable) != 2 {
   854  		t.Fatalf("There should only be 2 key left but was %v", len(sf.pubKeyTable))
   855  	}
   856  
   857  	// Check which keys we found. Should always find the used one and then
   858  	// either of the other two. Also remember the offset mapping.
   859  	var firstFound, usedFound, thirdFound bool
   860  	offsetMap := make(map[uint32]uint32)
   861  	for i, hpk := range sf.pubKeyTable {
   862  		if reflect.DeepEqual(firstKey, hpk) {
   863  			firstFound = true
   864  			offsetMap[0] = uint32(i)
   865  		} else if reflect.DeepEqual(usedKey, hpk) {
   866  			usedFound = true
   867  			offsetMap[1] = uint32(i)
   868  		} else if reflect.DeepEqual(thirdKey, hpk) {
   869  			thirdFound = true
   870  			offsetMap[2] = uint32(i)
   871  		}
   872  	}
   873  	if !usedFound {
   874  		t.Fatal("couldn't find used key")
   875  	}
   876  	if !((firstFound && !thirdFound) || (!firstFound && thirdFound)) {
   877  		t.Fatal("couldn't find the right combination of unused keys", firstFound, thirdFound)
   878  	}
   879  
   880  	// Loop over all the pieces and make sure that the pieces with missing
   881  	// hosts were pruned and that the remaining pieces have the correct offset
   882  	// now.
   883  	err = sf.iterateChunksReadonly(func(chunk chunk) error {
   884  		for _, pieceSet := range chunk.Pieces {
   885  			if len(pieceSet) != 2 {
   886  				t.Fatalf("Expected 2 piece in the set but was %v", len(pieceSet))
   887  			}
   888  			// If we found the first key before, we dropped the
   889  			// third one. So we expect the pieces to be updated to
   890  			// the new offsets of key 1 and key 2.
   891  			if firstFound {
   892  				if pieceSet[0].HostTableOffset != offsetMap[0] {
   893  					t.Fatal("invalid offset", offsetMap[0])
   894  				}
   895  				if pieceSet[1].HostTableOffset != offsetMap[1] {
   896  					t.Fatal("invalid offset", offsetMap[1])
   897  				}
   898  			}
   899  			// If we found the third key before, we dropped the
   900  			// first one. So we expect the pieces to be updated to
   901  			// the new offsets of key 2 and key 3.
   902  			if thirdFound {
   903  				if pieceSet[0].HostTableOffset != offsetMap[1] {
   904  					t.Fatal("invalid offset", offsetMap[1])
   905  				}
   906  				if pieceSet[1].HostTableOffset != offsetMap[2] {
   907  					t.Fatal("invalid offset", offsetMap[2])
   908  				}
   909  			}
   910  		}
   911  		return nil
   912  	})
   913  	if err != nil {
   914  		t.Fatal(err)
   915  	}
   916  	if err := ensureMetadataValid(sf.Metadata()); err != nil {
   917  		t.Fatal(err)
   918  	}
   919  }
   920  
   921  // TestNumPieces tests the chunk's numPieces method.
   922  func TestNumPieces(t *testing.T) {
   923  	// create a random chunk.
   924  	chunk := randomChunk()
   925  
   926  	// get the number of pieces of the chunk.
   927  	totalPieces := 0
   928  	for _, pieceSet := range chunk.Pieces {
   929  		totalPieces += len(pieceSet)
   930  	}
   931  
   932  	// compare it to the one reported by numPieces.
   933  	if totalPieces != chunk.numPieces() {
   934  		t.Fatalf("Expected %v pieces but was %v", totalPieces, chunk.numPieces())
   935  	}
   936  }
   937  
   938  // TestDefragChunk tests if the defragChunk methods correctly prunes pieces
   939  // from a chunk.
   940  func TestDefragChunk(t *testing.T) {
   941  	if testing.Short() {
   942  		t.SkipNow()
   943  	}
   944  	t.Parallel()
   945  	// Get a blank
   946  	sf, _, _ := newBlankTestFileAndWAL(2) // make sure we have 1 full chunk at the beginning of sf.fullChunks
   947  
   948  	// Use the first chunk of the file for testing.
   949  	chunk, err := sf.chunk(0)
   950  	if err != nil {
   951  		t.Fatal(err)
   952  	}
   953  
   954  	// Add 100 pieces to each set of pieces, all belonging to the same unused
   955  	// host.
   956  	sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: false})
   957  	for i := range chunk.Pieces {
   958  		for j := 0; j < 100; j++ {
   959  			chunk.Pieces[i] = append(chunk.Pieces[i], piece{HostTableOffset: 0})
   960  		}
   961  	}
   962  
   963  	// Defrag the chunk. This should remove all the pieces since the host is
   964  	// unused.
   965  	sf.defragChunk(&chunk)
   966  	if chunk.numPieces() != 0 {
   967  		t.Fatalf("chunk should have 0 pieces after defrag but was %v", chunk.numPieces())
   968  	}
   969  
   970  	// Do the same thing again, but this time the host is marked as used.
   971  	sf.pubKeyTable[0].Used = true
   972  	for i := range chunk.Pieces {
   973  		for j := 0; j < 100; j++ {
   974  			chunk.Pieces[i] = append(chunk.Pieces[i], piece{HostTableOffset: 0})
   975  		}
   976  	}
   977  
   978  	// Defrag the chunk.
   979  	maxChunkSize := int64(sf.staticMetadata.StaticPagesPerChunk) * pageSize
   980  	maxPieces := (maxChunkSize - marshaledChunkOverhead) / marshaledPieceSize
   981  	maxPiecesPerSet := maxPieces / int64(len(chunk.Pieces))
   982  	sf.defragChunk(&chunk)
   983  
   984  	// The chunk should be smaller than maxChunkSize.
   985  	if chunkSize := marshaledChunkSize(chunk.numPieces()); chunkSize > maxChunkSize {
   986  		t.Errorf("chunkSize is too big %v > %v", chunkSize, maxChunkSize)
   987  	}
   988  	// The chunk should have less than maxPieces pieces.
   989  	if int64(chunk.numPieces()) > maxPieces {
   990  		t.Errorf("chunk should have <= %v pieces after defrag but was %v",
   991  			maxPieces, chunk.numPieces())
   992  	}
   993  	// The chunk should have numPieces * maxPiecesPerSet pieces.
   994  	if expectedPieces := int64(sf.ErasureCode().NumPieces()) * maxPiecesPerSet; expectedPieces != int64(chunk.numPieces()) {
   995  		t.Errorf("chunk should have %v pieces but was %v", expectedPieces, chunk.numPieces())
   996  	}
   997  	// Every set of pieces should have maxPiecesPerSet pieces.
   998  	for i, pieceSet := range chunk.Pieces {
   999  		if int64(len(pieceSet)) != maxPiecesPerSet {
  1000  			t.Errorf("pieceSet%v length is %v which is greater than %v",
  1001  				i, len(pieceSet), maxPiecesPerSet)
  1002  		}
  1003  	}
  1004  
  1005  	// Create a new file with 2 used hosts and 1 unused one. This file should
  1006  	// use 2 pages per chunk.
  1007  	sf, _, _ = newBlankTestFileAndWAL(2) // make sure we have 1 full chunk at the beginning of the file.
  1008  	sf.staticMetadata.StaticPagesPerChunk = 2
  1009  	sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: true})
  1010  	sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: true})
  1011  	sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: false})
  1012  	sf.pubKeyTable[0].PublicKey.Key = fastrand.Bytes(crypto.EntropySize)
  1013  	sf.pubKeyTable[1].PublicKey.Key = fastrand.Bytes(crypto.EntropySize)
  1014  	sf.pubKeyTable[2].PublicKey.Key = fastrand.Bytes(crypto.EntropySize)
  1015  
  1016  	// Save the above changes to disk to avoid failing sanity checks when
  1017  	// calling AddPiece.
  1018  	updates, err := sf.saveHeaderUpdates()
  1019  	if err != nil {
  1020  		t.Fatal(err)
  1021  	}
  1022  	if err := sf.createAndApplyTransaction(updates...); err != nil {
  1023  		t.Fatal(err)
  1024  	}
  1025  
  1026  	// Add 500 pieces to the first chunk of the file, randomly belonging to
  1027  	// any of the 3 hosts. This should never produce an error.
  1028  	var duration time.Duration
  1029  	for i := 0; i < 50; i++ {
  1030  		chunk, err := sf.chunk(0)
  1031  		if err != nil {
  1032  			t.Fatal(err)
  1033  		}
  1034  		pk := sf.pubKeyTable[fastrand.Intn(len(sf.pubKeyTable))].PublicKey
  1035  		pieceIndex := fastrand.Intn(len(chunk.Pieces))
  1036  		before := time.Now()
  1037  		if err := sf.AddPiece(pk, 0, uint64(pieceIndex), crypto.Hash{}); err != nil {
  1038  			t.Fatal(err)
  1039  		}
  1040  		duration += time.Since(before)
  1041  	}
  1042  
  1043  	// Save the file to disk again to make sure cached fields are persisted.
  1044  	updates, err = sf.saveHeaderUpdates()
  1045  	if err != nil {
  1046  		t.Fatal(err)
  1047  	}
  1048  	if err := sf.createAndApplyTransaction(updates...); err != nil {
  1049  		t.Fatal(err)
  1050  	}
  1051  
  1052  	// Finally load the file from disk again and compare it to the original.
  1053  	sf2, err := LoadSiaFile(sf.siaFilePath, sf.wal)
  1054  	if err != nil {
  1055  		t.Fatal(err)
  1056  	}
  1057  	// Compare the files.
  1058  	if err := equalFiles(sf, sf2); err != nil {
  1059  		t.Fatal(err)
  1060  	}
  1061  	if err := ensureMetadataValid(sf.Metadata()); err != nil {
  1062  		t.Fatal(err)
  1063  	}
  1064  	if err := ensureMetadataValid(sf2.Metadata()); err != nil {
  1065  		t.Fatal(err)
  1066  	}
  1067  }
  1068  
  1069  // TestChunkHealth probes the chunkHealth method
  1070  func TestChunkHealth(t *testing.T) {
  1071  	if testing.Short() {
  1072  		t.SkipNow()
  1073  	}
  1074  	t.Parallel()
  1075  	// Get a blank siafile with at least 3 chunks.
  1076  	sf, _, _ := newBlankTestFileAndWAL(3)
  1077  	rc := sf.ErasureCode()
  1078  
  1079  	// Create offline map
  1080  	offlineMap := make(map[string]bool)
  1081  	goodForRenewMap := make(map[string]bool)
  1082  
  1083  	// Check and Record file health of initialized file
  1084  	fileHealth, _, _, _, _, repairBytes, stuckBytes := sf.Health(offlineMap, goodForRenewMap)
  1085  	initHealth := float64(1) - (float64(0-rc.MinPieces()) / float64(rc.NumPieces()-rc.MinPieces()))
  1086  	if fileHealth != initHealth {
  1087  		t.Fatalf("Expected file to be %v, got %v", initHealth, fileHealth)
  1088  	}
  1089  	expectedChunkRepairBytes := uint64(rc.NumPieces()) * modules.SectorSize
  1090  	expectedFileRepairBytes := sf.NumChunks() * expectedChunkRepairBytes
  1091  	if repairBytes != expectedFileRepairBytes {
  1092  		t.Errorf("Expected repairBytes to be %v, got %v", expectedFileRepairBytes, repairBytes)
  1093  	}
  1094  	if stuckBytes != 0 {
  1095  		t.Errorf("Expected stuckBytes to be %v, got %v", 0, stuckBytes)
  1096  	}
  1097  
  1098  	// Since we are using a pre set offlineMap, all the chunks should have the
  1099  	// same health as the file
  1100  	err := sf.iterateChunksReadonly(func(chunk chunk) error {
  1101  		chunkHealth, _, repairBytes, err := sf.chunkHealth(chunk, offlineMap, goodForRenewMap)
  1102  		if err != nil {
  1103  			return err
  1104  		}
  1105  		if chunkHealth != fileHealth {
  1106  			t.Log("ChunkHealth:", chunkHealth)
  1107  			t.Log("FileHealth:", fileHealth)
  1108  			t.Fatal("Expected file and chunk to have same health")
  1109  		}
  1110  		if repairBytes != expectedChunkRepairBytes {
  1111  			return fmt.Errorf("Expected repairBytes to be %v, got %v", expectedChunkRepairBytes, repairBytes)
  1112  		}
  1113  		return nil
  1114  	})
  1115  	if err != nil {
  1116  		t.Fatal(err)
  1117  	}
  1118  
  1119  	// Add good piece to first chunk
  1120  	spk := types.SiaPublicKey{Algorithm: types.SignatureEd25519, Key: []byte{1}}
  1121  	offlineMap[spk.String()] = false
  1122  	goodForRenewMap[spk.String()] = true
  1123  	if err := sf.AddPiece(spk, 0, 0, crypto.Hash{}); err != nil {
  1124  		t.Fatal(err)
  1125  	}
  1126  
  1127  	// Chunk at index 0 should now have a health of 1 higher than before
  1128  	chunk, err := sf.chunk(0)
  1129  	if err != nil {
  1130  		t.Fatal(err)
  1131  	}
  1132  	newHealth := float64(1) - (float64(1-rc.MinPieces()) / float64(rc.NumPieces()-rc.MinPieces()))
  1133  	ch, _, repairBytes, err := sf.chunkHealth(chunk, offlineMap, goodForRenewMap)
  1134  	if err != nil {
  1135  		t.Fatal(err)
  1136  	}
  1137  	if ch != newHealth {
  1138  		t.Fatalf("Expected chunk health to be %v, got %v", newHealth, ch)
  1139  	}
  1140  	expectedChunkRepairBytes = uint64(rc.NumPieces()-1) * modules.SectorSize
  1141  	if repairBytes != expectedChunkRepairBytes {
  1142  		t.Errorf("Expected repairBytes to be %v, got %v", expectedChunkRepairBytes, repairBytes)
  1143  	}
  1144  
  1145  	// Chunk at index 1 should still have lower health
  1146  	chunk, err = sf.chunk(1)
  1147  	if err != nil {
  1148  		t.Fatal(err)
  1149  	}
  1150  	ch, _, repairBytes, err = sf.chunkHealth(chunk, offlineMap, goodForRenewMap)
  1151  	if err != nil {
  1152  		t.Fatal(err)
  1153  	}
  1154  	if ch != fileHealth {
  1155  		t.Fatalf("Expected chunk health to be %v, got %v", fileHealth, ch)
  1156  	}
  1157  	expectedChunkRepairBytes = uint64(rc.NumPieces()) * modules.SectorSize
  1158  	if repairBytes != expectedChunkRepairBytes {
  1159  		t.Errorf("Expected repairBytes to be %v, got %v", expectedChunkRepairBytes, repairBytes)
  1160  	}
  1161  
  1162  	// Add good piece to second chunk
  1163  	spk = types.SiaPublicKey{Algorithm: types.SignatureEd25519, Key: []byte{2}}
  1164  	offlineMap[spk.String()] = false
  1165  	goodForRenewMap[spk.String()] = true
  1166  	if err := sf.AddPiece(spk, 1, 0, crypto.Hash{}); err != nil {
  1167  		t.Fatal(err)
  1168  	}
  1169  
  1170  	// Chunk at index 1 should now have a health of 1 higher than before
  1171  	chunk, err = sf.chunk(1)
  1172  	if err != nil {
  1173  		t.Fatal(err)
  1174  	}
  1175  	ch, _, repairBytes, err = sf.chunkHealth(chunk, offlineMap, goodForRenewMap)
  1176  	if err != nil {
  1177  		t.Fatal(err)
  1178  	}
  1179  	if ch != newHealth {
  1180  		t.Fatalf("Expected chunk health to be %v, got %v", newHealth, ch)
  1181  	}
  1182  	expectedChunkRepairBytes = uint64(rc.NumPieces()-1) * modules.SectorSize
  1183  	if repairBytes != expectedChunkRepairBytes {
  1184  		t.Errorf("Expected repairBytes to be %v, got %v", expectedChunkRepairBytes, repairBytes)
  1185  	}
  1186  
  1187  	// Mark Chunk at index 1 as stuck and confirm that doesn't impact the result
  1188  	// of chunkHealth
  1189  	if err := sf.SetStuck(1, true); err != nil {
  1190  		t.Fatal(err)
  1191  	}
  1192  	chunk, err = sf.chunk(1)
  1193  	if err != nil {
  1194  		t.Fatal(err)
  1195  	}
  1196  	ch, _, repairBytes, err = sf.chunkHealth(chunk, offlineMap, goodForRenewMap)
  1197  	if err != nil {
  1198  		t.Fatal(err)
  1199  	}
  1200  	if ch != newHealth {
  1201  		t.Fatalf("Expected file to be %v, got %v", newHealth, ch)
  1202  	}
  1203  	if err := ensureMetadataValid(sf.Metadata()); err != nil {
  1204  		t.Fatal(err)
  1205  	}
  1206  	if repairBytes != expectedChunkRepairBytes {
  1207  		t.Errorf("Expected repairBytes to be %v, got %v", expectedChunkRepairBytes, repairBytes)
  1208  	}
  1209  }
  1210  
  1211  // TestStuckChunks checks to make sure the NumStuckChunks return the expected
  1212  // values and that the stuck chunks are persisted properly
  1213  func TestStuckChunks(t *testing.T) {
  1214  	if testing.Short() {
  1215  		t.SkipNow()
  1216  	}
  1217  	t.Parallel()
  1218  
  1219  	// Create siafile
  1220  	sf := newTestFile()
  1221  
  1222  	// Mark every other chunk as stuck
  1223  	expectedStuckChunks := 0
  1224  	for chunkIndex := 0; chunkIndex < sf.numChunks; chunkIndex++ {
  1225  		if (chunkIndex % 2) != 0 {
  1226  			continue
  1227  		}
  1228  		if err := sf.SetStuck(uint64(chunkIndex), true); err != nil {
  1229  			t.Fatal(err)
  1230  		}
  1231  		expectedStuckChunks++
  1232  	}
  1233  
  1234  	// Check that the total number of stuck chunks is consistent
  1235  	numStuckChunks := sf.NumStuckChunks()
  1236  	if numStuckChunks != uint64(expectedStuckChunks) {
  1237  		t.Fatalf("Wrong number of stuck chunks, got %v expected %v", numStuckChunks, expectedStuckChunks)
  1238  	}
  1239  
  1240  	// Load siafile from disk
  1241  	sf, err := LoadSiaFile(sf.SiaFilePath(), sf.wal)
  1242  	if err != nil {
  1243  		t.Fatal(err)
  1244  	}
  1245  
  1246  	// Check that the total number of stuck chunks is consistent
  1247  	if numStuckChunks != sf.NumStuckChunks() {
  1248  		t.Fatalf("Wrong number of stuck chunks, got %v expected %v", numStuckChunks, sf.NumStuckChunks())
  1249  	}
  1250  
  1251  	// Check chunks and Stuck Chunk Table
  1252  	err = sf.iterateChunksReadonly(func(chunk chunk) error {
  1253  		if chunk.Index%2 != 0 {
  1254  			if chunk.Stuck {
  1255  				t.Fatal("Found stuck chunk when un-stuck chunk was expected")
  1256  			}
  1257  			return nil
  1258  		}
  1259  		if !chunk.Stuck {
  1260  			t.Fatal("Found un-stuck chunk when stuck chunk was expected")
  1261  		}
  1262  		return nil
  1263  	})
  1264  	if err != nil {
  1265  		t.Fatal(err)
  1266  	}
  1267  	if err := ensureMetadataValid(sf.Metadata()); err != nil {
  1268  		t.Fatal(err)
  1269  	}
  1270  }
  1271  
  1272  // TestUploadedBytes tests that uploadedBytes() returns the expected values for
  1273  // total and unique uploaded bytes.
  1274  func TestUploadedBytes(t *testing.T) {
  1275  	if testing.Short() {
  1276  		t.SkipNow()
  1277  	}
  1278  	// Create a new blank test file
  1279  	f := newBlankTestFile()
  1280  	// Add multiple pieces to the first pieceSet of the first piece of the first
  1281  	// chunk
  1282  	for i := 0; i < 4; i++ {
  1283  		err := f.AddPiece(types.SiaPublicKey{}, uint64(0), 0, crypto.Hash{})
  1284  		if err != nil {
  1285  			t.Fatal(err)
  1286  		}
  1287  	}
  1288  	totalBytes, uniqueBytes, err := f.uploadedBytes()
  1289  	if err != nil {
  1290  		t.Fatal(err)
  1291  	}
  1292  	if totalBytes != 4*modules.SectorSize {
  1293  		t.Errorf("expected totalBytes to be %v, got %v", 4*modules.SectorSize, totalBytes)
  1294  	}
  1295  	if uniqueBytes != modules.SectorSize {
  1296  		t.Errorf("expected uniqueBytes to be %v, got %v", modules.SectorSize, uniqueBytes)
  1297  	}
  1298  	if err := ensureMetadataValid(f.Metadata()); err != nil {
  1299  		t.Fatal(err)
  1300  	}
  1301  }
  1302  
  1303  // TestFileUploadProgressPinning verifies that uploadProgress() returns at most
  1304  // 100%, even if more pieces have been uploaded,
  1305  func TestFileUploadProgressPinning(t *testing.T) {
  1306  	if testing.Short() {
  1307  		t.SkipNow()
  1308  	}
  1309  	f := newBlankTestFile()
  1310  
  1311  	for chunkIndex := uint64(0); chunkIndex < f.NumChunks(); chunkIndex++ {
  1312  		for pieceIndex := uint64(0); pieceIndex < uint64(f.ErasureCode().NumPieces()); pieceIndex++ {
  1313  			err1 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(0)}}, chunkIndex, pieceIndex, crypto.Hash{})
  1314  			err2 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(1)}}, chunkIndex, pieceIndex, crypto.Hash{})
  1315  			if err := errors.Compose(err1, err2); err != nil {
  1316  				t.Fatal(err)
  1317  			}
  1318  		}
  1319  	}
  1320  	if f.staticMetadata.CachedUploadProgress != 100 {
  1321  		t.Fatal("expected uploadProgress to report 100% but was", f.staticMetadata.CachedUploadProgress)
  1322  	}
  1323  	if err := ensureMetadataValid(f.Metadata()); err != nil {
  1324  		t.Fatal(err)
  1325  	}
  1326  }
  1327  
  1328  // TestFileExpiration probes the expiration method of the file type.
  1329  func TestFileExpiration(t *testing.T) {
  1330  	if testing.Short() {
  1331  		t.SkipNow()
  1332  	}
  1333  	siaFilePath, _, source, rc, sk, fileSize, numChunks, fileMode := newTestFileParams(1, false)
  1334  	f, _, _ := customTestFileAndWAL(siaFilePath, source, rc, sk, fileSize, numChunks, fileMode)
  1335  	contracts := make(map[string]skymodules.RenterContract)
  1336  	_ = f.Expiration(contracts)
  1337  	if f.staticMetadata.CachedExpiration != 0 {
  1338  		t.Error("file with no pieces should report as having no time remaining")
  1339  	}
  1340  	// Create 3 public keys
  1341  	pk1 := types.SiaPublicKey{Key: []byte{0}}
  1342  	pk2 := types.SiaPublicKey{Key: []byte{1}}
  1343  	pk3 := types.SiaPublicKey{Key: []byte{2}}
  1344  
  1345  	// Add a piece for each key to the file.
  1346  	err1 := f.AddPiece(pk1, 0, 0, crypto.Hash{})
  1347  	err2 := f.AddPiece(pk2, 0, 1, crypto.Hash{})
  1348  	err3 := f.AddPiece(pk3, 0, 2, crypto.Hash{})
  1349  	if err := errors.Compose(err1, err2, err3); err != nil {
  1350  		t.Fatal(err)
  1351  	}
  1352  
  1353  	// Add a contract.
  1354  	fc := skymodules.RenterContract{}
  1355  	fc.EndHeight = 100
  1356  	contracts[pk1.String()] = fc
  1357  	_ = f.Expiration(contracts)
  1358  	if f.staticMetadata.CachedExpiration != 100 {
  1359  		t.Error("file did not report lowest WindowStart", f.staticMetadata.CachedExpiration)
  1360  	}
  1361  
  1362  	// Add a contract with a lower WindowStart.
  1363  	fc.EndHeight = 50
  1364  	contracts[pk2.String()] = fc
  1365  	_ = f.Expiration(contracts)
  1366  	if f.staticMetadata.CachedExpiration != 50 {
  1367  		t.Error("file did not report lowest WindowStart", f.staticMetadata.CachedExpiration)
  1368  	}
  1369  
  1370  	// Add a contract with a higher WindowStart.
  1371  	fc.EndHeight = 75
  1372  	contracts[pk3.String()] = fc
  1373  	_ = f.Expiration(contracts)
  1374  	if f.staticMetadata.CachedExpiration != 50 {
  1375  		t.Error("file did not report lowest WindowStart", f.staticMetadata.CachedExpiration)
  1376  	}
  1377  	if err := ensureMetadataValid(f.Metadata()); err != nil {
  1378  		t.Fatal(err)
  1379  	}
  1380  }
  1381  
  1382  // BenchmarkLoadSiaFile benchmarks loading an existing siafile's metadata into
  1383  // memory.
  1384  func BenchmarkLoadSiaFile(b *testing.B) {
  1385  	// Get new file params
  1386  	siaFilePath, _, source, _, sk, _, _, fileMode := newTestFileParams(1, false)
  1387  	// Create the path to the file.
  1388  	dir, _ := filepath.Split(siaFilePath)
  1389  	if err := os.MkdirAll(dir, 0700); err != nil {
  1390  		b.Fatal(err)
  1391  	}
  1392  	// Create the file.
  1393  	wal, _ := newTestWAL()
  1394  	rc, err := skymodules.NewRSSubCode(10, 20, crypto.SegmentSize)
  1395  	if err != nil {
  1396  		b.Fatal(err)
  1397  	}
  1398  	sf, err := New(siaFilePath, source, wal, rc, sk, 1, fileMode) // 1 chunk file
  1399  	if err != nil {
  1400  		b.Fatal(err)
  1401  	}
  1402  	if err := sf.GrowNumChunks(10); err != nil { // Grow file to 10 chunks
  1403  		b.Fatal(err)
  1404  	}
  1405  	// Add pieces to chunks until every chunk has full redundancy.
  1406  	hostKeys := make([]types.SiaPublicKey, rc.NumPieces())
  1407  	for i := range hostKeys {
  1408  		fastrand.Read(hostKeys[i].Key)
  1409  	}
  1410  	for pieceIndex := 0; pieceIndex < rc.NumPieces(); pieceIndex++ {
  1411  		for chunkIndex := 0; chunkIndex < int(sf.NumChunks()); chunkIndex++ {
  1412  			if err := sf.AddPiece(hostKeys[pieceIndex], uint64(chunkIndex), uint64(pieceIndex), crypto.Hash{}); err != nil {
  1413  				b.Fatal(err)
  1414  			}
  1415  		}
  1416  	}
  1417  	b.ResetTimer()
  1418  	for loads := 0; loads < b.N; loads++ {
  1419  		sf, err = LoadSiaFile(siaFilePath, wal)
  1420  		if err != nil {
  1421  			b.Fatal(err)
  1422  		}
  1423  	}
  1424  }
  1425  
  1426  func BenchmarkRandomChunkWriteSingleThreaded(b *testing.B) {
  1427  	benchmarkRandomChunkWrite(1, b)
  1428  }
  1429  func BenchmarkRandomChunkWriteMultiThreaded(b *testing.B) {
  1430  	// 50 seems reasonable since it matches the number of hosts we usually have contracts with
  1431  	benchmarkRandomChunkWrite(50, b)
  1432  }
  1433  
  1434  // BenchmarkRandomChunkWrite benchmarks writing pieces to random chunks within a
  1435  // siafile.
  1436  func benchmarkRandomChunkWrite(numThreads int, b *testing.B) {
  1437  	// Get new file params
  1438  	siaFilePath, _, source, _, sk, _, _, fileMode := newTestFileParams(1, false)
  1439  	// Create the path to the file.
  1440  	dir, _ := filepath.Split(siaFilePath)
  1441  	if err := os.MkdirAll(dir, 0700); err != nil {
  1442  		b.Fatal(err)
  1443  	}
  1444  	// Create the file.
  1445  	wal, _ := newTestWAL()
  1446  	rc, err := skymodules.NewRSSubCode(10, 20, crypto.SegmentSize)
  1447  	if err != nil {
  1448  		b.Fatal(err)
  1449  	}
  1450  	sf, err := New(siaFilePath, source, wal, rc, sk, 1, fileMode) // 1 chunk file
  1451  	if err != nil {
  1452  		b.Fatal(err)
  1453  	}
  1454  	if err := sf.GrowNumChunks(100); err != nil { // Grow file to 100 chunks
  1455  		b.Fatal(err)
  1456  	}
  1457  	// Add pieces to random chunks until every chunk has full redundancy.
  1458  	var writes uint64
  1459  	piecePerm := fastrand.Perm(rc.NumPieces())
  1460  	chunkPerm := fastrand.Perm(int(sf.NumChunks()))
  1461  	hostKeys := make([]types.SiaPublicKey, rc.NumPieces())
  1462  	for i := range hostKeys {
  1463  		fastrand.Read(hostKeys[i].Key)
  1464  	}
  1465  	start := make(chan struct{})
  1466  	worker := func() {
  1467  		<-start
  1468  		for atomic.LoadUint64(&writes) < uint64(b.N) {
  1469  			for _, pieceIndex := range piecePerm {
  1470  				for _, chunkIndex := range chunkPerm {
  1471  					if err := sf.AddPiece(hostKeys[pieceIndex], uint64(chunkIndex), uint64(pieceIndex), crypto.Hash{}); err != nil {
  1472  						b.Fatal(err)
  1473  					}
  1474  					atomic.AddUint64(&writes, 1)
  1475  					if atomic.LoadUint64(&writes) >= uint64(b.N) {
  1476  						return
  1477  					}
  1478  				}
  1479  			}
  1480  		}
  1481  	}
  1482  	// Spawn worker threads.
  1483  	var wg sync.WaitGroup
  1484  	for i := 0; i < numThreads; i++ {
  1485  		wg.Add(1)
  1486  		go func() {
  1487  			defer wg.Done()
  1488  			worker()
  1489  		}()
  1490  	}
  1491  	// Reset timer and start threads.
  1492  	b.ResetTimer()
  1493  	close(start)
  1494  	wg.Wait()
  1495  }
  1496  
  1497  // BenchmarkRandomChunkRead benchmarks reading pieces of a random chunks within
  1498  // a siafile.
  1499  func BenchmarkRandomChunkRead(b *testing.B) {
  1500  	// Get new file params
  1501  	siaFilePath, _, source, _, sk, _, _, fileMode := newTestFileParams(1, false)
  1502  	// Create the path to the file.
  1503  	dir, _ := filepath.Split(siaFilePath)
  1504  	if err := os.MkdirAll(dir, 0700); err != nil {
  1505  		b.Fatal(err)
  1506  	}
  1507  	// Create the file.
  1508  	wal, _ := newTestWAL()
  1509  	rc, err := skymodules.NewRSSubCode(10, 20, crypto.SegmentSize)
  1510  	if err != nil {
  1511  		b.Fatal(err)
  1512  	}
  1513  	sf, err := New(siaFilePath, source, wal, rc, sk, 1, fileMode) // 1 chunk file
  1514  	if err != nil {
  1515  		b.Fatal(err)
  1516  	}
  1517  	if err := sf.GrowNumChunks(10); err != nil { // Grow file to 10 chunks
  1518  		b.Fatal(err)
  1519  	}
  1520  	// Add pieces to chunks until every chunk has full redundancy.
  1521  	hostKeys := make([]types.SiaPublicKey, rc.NumPieces())
  1522  	for i := range hostKeys {
  1523  		fastrand.Read(hostKeys[i].Key)
  1524  	}
  1525  	for pieceIndex := 0; pieceIndex < rc.NumPieces(); pieceIndex++ {
  1526  		for chunkIndex := 0; chunkIndex < int(sf.NumChunks()); chunkIndex++ {
  1527  			if err := sf.AddPiece(hostKeys[pieceIndex], uint64(chunkIndex), uint64(pieceIndex), crypto.Hash{}); err != nil {
  1528  				b.Fatal(err)
  1529  			}
  1530  		}
  1531  	}
  1532  	// Read random pieces
  1533  	reads := 0
  1534  	chunkPerm := fastrand.Perm(int(sf.NumChunks()))
  1535  	b.ResetTimer()
  1536  	for reads < b.N {
  1537  		for _, chunkIndex := range chunkPerm {
  1538  			if _, err := sf.Pieces(uint64(chunkIndex)); err != nil {
  1539  				b.Fatal(err)
  1540  			}
  1541  			reads++
  1542  			if reads == b.N {
  1543  				return
  1544  			}
  1545  		}
  1546  	}
  1547  }
  1548  
  1549  // ensureMetadataValid is a helper method which ensures we can backup and
  1550  // recover siafile metadata. By doing that, it also ensures that all the fields
  1551  // have valid values.
  1552  func ensureMetadataValid(md Metadata) (err error) {
  1553  	defer func() {
  1554  		if r := recover(); r != nil {
  1555  			err = fmt.Errorf("%s", r)
  1556  		}
  1557  	}()
  1558  	md.backup()
  1559  	return nil
  1560  }
  1561  
  1562  // TestCalculateHealth probes the CalculateHealth functions
  1563  func TestCalculateHealth(t *testing.T) {
  1564  	t.Parallel()
  1565  
  1566  	// Define health check function
  1567  	checkHealth := func(gp, mp, np int, h float64) {
  1568  		health := CalculateHealth(gp, mp, np)
  1569  		if health != h {
  1570  			t.Logf("gp %v mp %v np %v", gp, mp, np)
  1571  			t.Errorf("expected health of %v, got %v", h, health)
  1572  		}
  1573  	}
  1574  
  1575  	// Prepare rounding helper.
  1576  	round := func(h float64) float64 {
  1577  		return math.Round(h*10e3) / 10e3
  1578  	}
  1579  
  1580  	// 0 good pieces
  1581  	mp := fastrand.Intn(10) + 1 // +1 avoid 0 minpieces
  1582  	// +1 and +mp to avoid 0 paritypieces and numPieces == minPieces
  1583  	np := mp + fastrand.Intn(10) + 1
  1584  	h := round(1 - float64(0-mp)/float64(np-mp))
  1585  	checkHealth(0, mp, np, h)
  1586  
  1587  	// Full health
  1588  	mp = fastrand.Intn(10) + 1 // +1 avoid 0 minpieces
  1589  	// +1 and +mp to avoid 0 paritypieces and numPieces == minPieces
  1590  	np = mp + fastrand.Intn(10) + 1
  1591  	checkHealth(np, mp, np, 0)
  1592  
  1593  	// In the middle
  1594  	mp = fastrand.Intn(10) + 1 // +1 avoid 0 minpieces
  1595  	// +1 and +mp to avoid 0 paritypieces and numPieces == minPieces
  1596  	np = mp + fastrand.Intn(10) + 1
  1597  	gp := fastrand.Intn(np)
  1598  	h = round(1 - float64(gp-mp)/float64(np-mp))
  1599  	checkHealth(gp, mp, np, h)
  1600  
  1601  	// Recover check
  1602  	defer func() {
  1603  		r := recover()
  1604  		if r == nil {
  1605  			t.Fatal("expected critical")
  1606  		}
  1607  	}()
  1608  	checkHealth(0, 0, 0, 0)
  1609  }
  1610  
  1611  // TestUpdateMetadata probes the UpdateMetadata method
  1612  func TestUpdateMetadata(t *testing.T) {
  1613  	if testing.Short() {
  1614  		t.SkipNow()
  1615  	}
  1616  	t.Parallel()
  1617  
  1618  	// Create a file
  1619  	rsc, _ := skymodules.NewRSCode(10, 20)
  1620  	siaFilePath, _, source, _, sk, _, _, fileMode := newTestFileParams(1, true)
  1621  	file, _, _ := customTestFileAndWAL(siaFilePath, source, rsc, sk, 0, 0, fileMode)
  1622  
  1623  	// Add a used host to the pubkeyTable
  1624  	pk := types.SiaPublicKey{Key: []byte{byte(0)}}
  1625  	pubkeyTable := []HostPublicKey{{PublicKey: pk, Used: true}}
  1626  	pubkeyTableCopy := make([]HostPublicKey, len(pubkeyTable))
  1627  	copy(pubkeyTableCopy, pubkeyTable)
  1628  
  1629  	// Set all the cached values to values that will be updated when
  1630  	// UpdateMetadata is called
  1631  	file.mu.Lock()
  1632  	// pubKeyTable is updated during UpdateMetadata
  1633  	file.pubKeyTable = pubkeyTable
  1634  	// Cached redundancy fields are updated during UpdateMetadata
  1635  	file.staticMetadata.CachedRedundancy = math.MaxFloat64
  1636  	file.staticMetadata.CachedUserRedundancy = math.MaxFloat64
  1637  	// Cached health fields are updated during UpdateMetadata
  1638  	file.staticMetadata.CachedHealth = math.MaxFloat64
  1639  	file.staticMetadata.CachedStuckHealth = math.MaxFloat64
  1640  	file.staticMetadata.CachedRepairBytes = math.MaxUint64
  1641  	file.staticMetadata.CachedStuckBytes = math.MaxUint64
  1642  	// LastHealthCheckTime is updated during UpdateMetadata
  1643  	file.staticMetadata.LastHealthCheckTime = time.Time{}
  1644  	// CachedExpiration is updated during UpdateMetadata
  1645  	file.staticMetadata.CachedExpiration = types.BlockHeight(0)
  1646  	file.mu.Unlock()
  1647  
  1648  	// Define helper function
  1649  	checkMetadata := func(sf *SiaFile) (err error) {
  1650  		// Check PubKeyTable
  1651  		if reflect.DeepEqual(sf.pubKeyTable, pubkeyTableCopy) {
  1652  			err = errors.Compose(err, fmt.Errorf("pubkeyTable not updated; found %v", sf.pubKeyTable))
  1653  		}
  1654  		// Check redundancy
  1655  		if sf.staticMetadata.CachedRedundancy == math.MaxFloat64 {
  1656  			err = errors.Compose(err, fmt.Errorf("CachedRedundancy not updated; found %v", sf.staticMetadata.CachedRedundancy))
  1657  		}
  1658  		if sf.staticMetadata.CachedUserRedundancy == math.MaxFloat64 {
  1659  			err = errors.Compose(err, fmt.Errorf("CachedUserRedundancy not updated; found %v", sf.staticMetadata.CachedUserRedundancy))
  1660  		}
  1661  		// Check health
  1662  		if sf.staticMetadata.CachedHealth == math.MaxFloat64 {
  1663  			err = errors.Compose(err, fmt.Errorf("CachedHealth not updated; found %v", sf.staticMetadata.CachedHealth))
  1664  		}
  1665  		if sf.staticMetadata.CachedStuckHealth == math.MaxFloat64 {
  1666  			err = errors.Compose(err, fmt.Errorf("CachedStuckHealth not updated; found %v", sf.staticMetadata.CachedStuckHealth))
  1667  		}
  1668  		if sf.staticMetadata.CachedRepairBytes == math.MaxUint64 {
  1669  			err = errors.Compose(err, fmt.Errorf("CachedRepairBytes not updated; found %v", sf.staticMetadata.CachedRepairBytes))
  1670  		}
  1671  		if sf.staticMetadata.CachedStuckBytes == math.MaxUint64 {
  1672  			err = errors.Compose(err, fmt.Errorf("CachedStuckBytes not updated; found %v", sf.staticMetadata.CachedStuckBytes))
  1673  		}
  1674  		// Check LastHealthCheckTime
  1675  		if sf.staticMetadata.LastHealthCheckTime.IsZero() {
  1676  			err = errors.Compose(err, fmt.Errorf("LastHealthCheckTime not updated; found %v", sf.staticMetadata.LastHealthCheckTime))
  1677  		}
  1678  		// Check Expiration
  1679  		if sf.staticMetadata.CachedExpiration == types.BlockHeight(0) {
  1680  			err = errors.Compose(err, fmt.Errorf("CachedExpiration not updated; found %v", sf.staticMetadata.CachedExpiration))
  1681  		}
  1682  		return
  1683  	}
  1684  	err := checkMetadata(file)
  1685  	if err == nil {
  1686  		t.Fatal("metadata not initialized properly")
  1687  	}
  1688  
  1689  	// Create offline and goodForRenew maps
  1690  	offlineMap := make(map[string]bool)
  1691  	goodForRenewMap := make(map[string]bool)
  1692  
  1693  	// Create list of used Hosts
  1694  	used := []types.SiaPublicKey{}
  1695  
  1696  	// Create contracts map
  1697  	contractsMap := make(map[string]skymodules.RenterContract)
  1698  
  1699  	// UpdateMetadata
  1700  	err = file.UpdateMetadata(offlineMap, goodForRenewMap, contractsMap, used)
  1701  	if err != nil {
  1702  		t.Fatal(err)
  1703  	}
  1704  
  1705  	// Check metadata
  1706  	err = checkMetadata(file)
  1707  	if err != nil {
  1708  		t.Fatal(err)
  1709  	}
  1710  }
  1711  
  1712  // TestIsLost probes the IsLost function.
  1713  func TestIsLost(t *testing.T) {
  1714  	t.Parallel()
  1715  
  1716  	// Define Tests
  1717  	var tests = []struct {
  1718  		health   float64
  1719  		onDisk   bool
  1720  		isLost   bool
  1721  		finished bool
  1722  	}{
  1723  		// Lost
  1724  		{1.01, false, true, true},
  1725  		{math.MaxFloat64, false, true, true},
  1726  
  1727  		// Not Lost
  1728  		//
  1729  		// Lost conditions are not Lost if not finished
  1730  		{1.01, false, false, false},
  1731  		{math.MaxFloat64, false, false, false},
  1732  		// If onDisk, health doesn't matter
  1733  		{0, true, false, true},
  1734  		{1.01, true, false, true},
  1735  		{math.MaxFloat64, true, false, true},
  1736  		// If !onDisk, health must be <=1
  1737  		{0, false, false, true},
  1738  		{0.99, false, false, true},
  1739  		{1, false, false, true},
  1740  	}
  1741  
  1742  	// Execute Tests
  1743  	for _, test := range tests {
  1744  		if IsLost(test.health, test.onDisk, test.finished) != test.isLost {
  1745  			t.Error("Unexpected results", test)
  1746  		}
  1747  	}
  1748  }
  1749  
  1750  // TestUpdateMetadataPruneHosts is a regression test for UpdateMetadata. It
  1751  // covers the edge case where the host pubkey table is pruned, causing host
  1752  // offsets within pieces to become invalid.
  1753  func TestUpdateMetadataPruneHosts(t *testing.T) {
  1754  	if testing.Short() {
  1755  		t.SkipNow()
  1756  	}
  1757  	t.Parallel()
  1758  	// Get a blank siafile with a 1-1 ec.
  1759  	sf, _, _ := newBlankTestFileAndWAL(2) // make sure we have 1 full chunk at the beginning of sf.fullChunks
  1760  	ec, err := skymodules.NewRSSubCode(1, 1, crypto.SegmentSize)
  1761  	if err != nil {
  1762  		t.Fatal(err)
  1763  	}
  1764  	sf.staticMetadata.staticErasureCode = ec
  1765  
  1766  	// Use the first chunk of the file for testing.
  1767  	c, err := sf.chunk(0)
  1768  	if err != nil {
  1769  		t.Fatal(err)
  1770  	}
  1771  
  1772  	// Add 4 hosts to the table.
  1773  	_, pk1 := crypto.GenerateKeyPair()
  1774  	hpk1 := types.Ed25519PublicKey(pk1)
  1775  	_, pk2 := crypto.GenerateKeyPair()
  1776  	hpk2 := types.Ed25519PublicKey(pk2)
  1777  	_, pk3 := crypto.GenerateKeyPair()
  1778  	hpk3 := types.Ed25519PublicKey(pk3)
  1779  	_, pk4 := crypto.GenerateKeyPair()
  1780  	hpk4 := types.Ed25519PublicKey(pk4)
  1781  	sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: true, PublicKey: hpk1})
  1782  	sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: true, PublicKey: hpk2})
  1783  	sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: true, PublicKey: hpk3})
  1784  	sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: true, PublicKey: hpk4})
  1785  
  1786  	// Add 2 pieces to each set of pieces. One for hpk1 and one for hpk4.
  1787  	sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: false})
  1788  	for i := range c.Pieces {
  1789  		c.Pieces[i] = append(c.Pieces[i], piece{HostTableOffset: uint32(len(sf.pubKeyTable) - 1)})
  1790  		c.Pieces[i] = append(c.Pieces[i], piece{HostTableOffset: 0})
  1791  	}
  1792  
  1793  	// Add more hosts to reach the threshold of max unused hosts.
  1794  	for i := 0; i < pubKeyTableUpperPruneThreshold; i++ {
  1795  		sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: false})
  1796  	}
  1797  
  1798  	// Saves changes.
  1799  	err = sf.saveFile([]chunk{c})
  1800  	if err != nil {
  1801  		t.Fatal(err)
  1802  	}
  1803  
  1804  	// Update the metadata.
  1805  	m := make(map[string]bool)
  1806  	contracts := make(map[string]skymodules.RenterContract)
  1807  	err = sf.UpdateMetadata(m, m, contracts, []types.SiaPublicKey{hpk1, hpk2, hpk3})
  1808  	if err != nil {
  1809  		t.Fatal(err)
  1810  	}
  1811  
  1812  	// Pubkeytable should have a length of 3 afterwards.
  1813  	if len(sf.pubKeyTable) != pubKeyTableLowerPruneThreshold+3 {
  1814  		t.Fatal("wrong length", len(sf.pubKeyTable))
  1815  	}
  1816  }
  1817  
  1818  // TestFinished is a simple unit test to probe the Finished and SetFinished
  1819  // Method of the siafile
  1820  func TestFinished(t *testing.T) {
  1821  	if testing.Short() {
  1822  		t.SkipNow()
  1823  	}
  1824  	t.Parallel()
  1825  
  1826  	// Create file
  1827  	siaFilePath, _, _, rc, sk, fileSize, numChunks, fileMode := newTestFileParams(1, true)
  1828  	file, _, _ := customTestFileAndWAL(siaFilePath, "", rc, sk, fileSize, numChunks, fileMode)
  1829  
  1830  	// Create helper
  1831  	checkFinished := func(finished bool) error {
  1832  		if file.Finished() != finished {
  1833  			return fmt.Errorf("Expected file to be finished %v but found finished %v", finished, file.Finished())
  1834  		}
  1835  		return nil
  1836  	}
  1837  
  1838  	// Initial File should be unfinished
  1839  	if err := checkFinished(false); err != nil {
  1840  		t.Fatal(err)
  1841  	}
  1842  
  1843  	// SetFinished shouldn't change status for health > 1
  1844  	file.SetFinished(1.1)
  1845  	if err := checkFinished(false); err != nil {
  1846  		t.Fatal(err)
  1847  	}
  1848  
  1849  	// SetFinished with health of 1 should mark as finished
  1850  	file.SetFinished(1)
  1851  	if err := checkFinished(true); err != nil {
  1852  		t.Fatal(err)
  1853  	}
  1854  
  1855  	// Manually reset
  1856  	file.staticMetadata.Finished = false
  1857  	if err := checkFinished(false); err != nil {
  1858  		t.Fatal(err)
  1859  	}
  1860  
  1861  	// SetFinished with health of 0 should mark as finished
  1862  	file.SetFinished(0)
  1863  	if err := checkFinished(true); err != nil {
  1864  		t.Fatal(err)
  1865  	}
  1866  
  1867  	// Calling SetFinished with a high health again should have no effect.
  1868  	file.SetFinished(10)
  1869  	if err := checkFinished(true); err != nil {
  1870  		t.Fatal(err)
  1871  	}
  1872  
  1873  	// Manually reset
  1874  	file.staticMetadata.Finished = false
  1875  	if err := checkFinished(false); err != nil {
  1876  		t.Fatal(err)
  1877  	}
  1878  
  1879  	// File should be considered finished with local path even if
  1880  	// SetFinished is called with a high health
  1881  	file.staticMetadata.LocalPath = "notblank"
  1882  	file.SetFinished(10)
  1883  	if err := checkFinished(true); err != nil {
  1884  		t.Fatal(err)
  1885  	}
  1886  
  1887  	// Removing the localpath should have no effect
  1888  	file.staticMetadata.LocalPath = ""
  1889  	file.SetFinished(10)
  1890  	if err := checkFinished(true); err != nil {
  1891  		t.Fatal(err)
  1892  	}
  1893  }