gitlab.com/SkynetLabs/skyd@v1.6.9/skymodules/renter/filesystem/siafile/siafile.go (about)

     1  package siafile
     2  
     3  import (
     4  	"fmt"
     5  	"io"
     6  	"math"
     7  	"os"
     8  	"sync"
     9  	"time"
    10  
    11  	"gitlab.com/NebulousLabs/errors"
    12  	"gitlab.com/NebulousLabs/fastrand"
    13  	"gitlab.com/NebulousLabs/writeaheadlog"
    14  
    15  	"gitlab.com/NebulousLabs/encoding"
    16  	"gitlab.com/SkynetLabs/skyd/build"
    17  	"gitlab.com/SkynetLabs/skyd/skymodules"
    18  	"go.sia.tech/siad/crypto"
    19  	"go.sia.tech/siad/modules"
    20  	"go.sia.tech/siad/types"
    21  )
    22  
    23  var (
    24  	// ErrDeleted is returned when an operation failed due to the siafile being
    25  	// deleted already.
    26  	ErrDeleted = errors.New("files was deleted")
    27  	// ErrPathOverload is an error when a file already exists at that location
    28  	ErrPathOverload = errors.New("a file already exists at that location")
    29  	// ErrUnfinished is returned when an operation failed due to the siafile being
    30  	// unfinished.
    31  	ErrUnfinished = errors.New("file is unfinished")
    32  	// ErrUnknownPath is an error when a file cannot be found with the given path
    33  	ErrUnknownPath = errors.New("no file known with that path")
    34  	// ErrUnknownThread is an error when a SiaFile is trying to be closed by a
    35  	// thread that is not in the threadMap
    36  	ErrUnknownThread = errors.New("thread should not be calling Close(), does not have control of the siafile")
    37  
    38  	// errShrinkWithTooManyChunks is returned if the number of chunks passed
    39  	// to Shrink is >= the number of current chunks.
    40  	errShrinkWithTooManyChunks = errors.New("can't grow siafile using Shrink - use GrowNumChunks instead")
    41  )
    42  
    43  type (
    44  	// SiaFile is the disk format for files uploaded to the Sia network.  It
    45  	// contains all the necessary information to recover a file from its hosts and
    46  	// allows for easy constant-time updates of the file without having to read or
    47  	// write the whole file.
    48  	SiaFile struct {
    49  		// staticMetadata is the mostly static staticMetadata of a SiaFile. The reserved
    50  		// size of the staticMetadata on disk should always be a multiple of 4kib.
    51  		// The staticMetadata is also the only part of the file that is JSON encoded
    52  		// and can therefore be easily extended.
    53  		staticMetadata Metadata
    54  
    55  		// pubKeyTable stores the public keys of the hosts this file's pieces are uploaded to.
    56  		// Since multiple pieces from different chunks might be uploaded to the same host, this
    57  		// allows us to deduplicate the rather large public keys.
    58  		pubKeyTable []HostPublicKey
    59  
    60  		// numChunks is the number of chunks the file was split into including a
    61  		// potential partial chunk at the end.
    62  		numChunks int
    63  
    64  		// utility fields. These are not persisted.
    65  		deleted bool
    66  		deps    modules.Dependencies
    67  		mu      sync.RWMutex
    68  		wal     *writeaheadlog.WAL // the wal that is used for SiaFiles
    69  
    70  		// siaFilePath is the path to the .sia file on disk.
    71  		siaFilePath string
    72  	}
    73  
    74  	// Chunks is an exported version of a chunk slice.. It exists for
    75  	// convenience to make sure the caller has an exported type to pass around.
    76  	Chunks struct {
    77  		chunks []chunk
    78  	}
    79  
    80  	// chunk represents a single chunk of a file on disk
    81  	chunk struct {
    82  		// ExtensionInfo is some reserved space for each chunk that allows us
    83  		// to indicate if a chunk is special.
    84  		ExtensionInfo [16]byte
    85  
    86  		// Index is the index of the chunk.
    87  		Index int
    88  
    89  		// Pieces are the Pieces of the file the chunk consists of.
    90  		Pieces [][]piece
    91  
    92  		// Stuck indicates if the chunk was not repaired as expected by the
    93  		// repair loop
    94  		Stuck bool
    95  	}
    96  
    97  	// Chunk is an exported chunk. It contains exported pieces.
    98  	Chunk struct {
    99  		Pieces [][]Piece
   100  	}
   101  
   102  	// piece represents a single piece of a chunk on disk
   103  	piece struct {
   104  		HostTableOffset uint32      // offset of the host's key within the pubKeyTable
   105  		MerkleRoot      crypto.Hash // merkle root of the piece
   106  	}
   107  
   108  	// Piece is an exported piece. It contains a resolved public key instead of
   109  	// the table offset.
   110  	Piece struct {
   111  		HostPubKey types.SiaPublicKey // public key of the host
   112  		MerkleRoot crypto.Hash        // merkle root of the piece
   113  	}
   114  
   115  	// HostPublicKey is an entry in the HostPubKey table.
   116  	HostPublicKey struct {
   117  		PublicKey types.SiaPublicKey // public key of host
   118  		Used      bool               // indicates if we currently use this host
   119  	}
   120  )
   121  
   122  // CalculateHealth is the calculation for determining the health of a chunk or
   123  // file
   124  func CalculateHealth(goodPieces, minPieces, numPieces int) float64 {
   125  	// Divide by zero check
   126  	if minPieces == numPieces {
   127  		build.Critical("minPieces cannot equal numPieces")
   128  	}
   129  	// Calculate health
   130  	health := 1 - float64(goodPieces-minPieces)/float64(numPieces-minPieces)
   131  	// Round percentage to 2 digits.
   132  	health = health * 10e3
   133  	health = math.Round(health)
   134  	health = health / 10e3
   135  	return health
   136  }
   137  
   138  // IsLost returns whether or not a siafile should be considered unrecoverable
   139  // and therefore lost.
   140  func IsLost(health float64, onDisk bool, finished bool) bool {
   141  	// If the file is ondisk, then it can always be repaired and thus
   142  	// recovered.
   143  	if onDisk {
   144  		return false
   145  	}
   146  
   147  	// If the health of the file is <= 1 it means it has at least data
   148  	// pieces available.
   149  	if health <= 1 {
   150  		return false
   151  	}
   152  
   153  	// If we've reached this point, the file is not on disk and has a health
   154  	// greater than one.  We consider this file unrecoverable, but only if
   155  	// it was ever finished.
   156  	return finished
   157  }
   158  
   159  // MarshalSia implements the encoding.SiaMarshaler interface.
   160  func (hpk HostPublicKey) MarshalSia(w io.Writer) error {
   161  	e := encoding.NewEncoder(w)
   162  	e.Encode(hpk.PublicKey)
   163  	e.WriteBool(hpk.Used)
   164  	return e.Err()
   165  }
   166  
   167  // SiaFilePath returns the siaFilePath field of the SiaFile.
   168  func (sf *SiaFile) SiaFilePath() string {
   169  	sf.mu.RLock()
   170  	defer sf.mu.RUnlock()
   171  	return sf.siaFilePath
   172  }
   173  
   174  // Lock acquires the SiaFile's mutex for calling Unmanaged exported methods.
   175  func (sf *SiaFile) Lock() {
   176  	sf.mu.Lock()
   177  }
   178  
   179  // Unlock releases the SiaFile's mutex.
   180  func (sf *SiaFile) Unlock() {
   181  	sf.mu.Unlock()
   182  }
   183  
   184  // UnmanagedSetDeleted sets the deleted field of the SiaFile without
   185  // holding the lock.
   186  func (sf *SiaFile) UnmanagedSetDeleted(deleted bool) {
   187  	sf.deleted = deleted
   188  }
   189  
   190  // UnmanagedSetSiaFilePath sets the siaFilePath field of the SiaFile without
   191  // holding the lock.
   192  func (sf *SiaFile) UnmanagedSetSiaFilePath(newSiaFilePath string) {
   193  	sf.siaFilePath = newSiaFilePath
   194  }
   195  
   196  // UnmarshalSia implements the encoding.SiaUnmarshaler interface.
   197  func (hpk *HostPublicKey) UnmarshalSia(r io.Reader) error {
   198  	d := encoding.NewDecoder(r, encoding.DefaultAllocLimit)
   199  	d.Decode(&hpk.PublicKey)
   200  	hpk.Used = d.NextBool()
   201  	return d.Err()
   202  }
   203  
   204  // numPieces returns the total number of pieces uploaded for a chunk. This
   205  // means that numPieces can be greater than the number of pieces created by the
   206  // erasure coder.
   207  func (c *chunk) numPieces() (numPieces int) {
   208  	for _, c := range c.Pieces {
   209  		numPieces += len(c)
   210  	}
   211  	return
   212  }
   213  
   214  // New create a new SiaFile.
   215  func New(siaFilePath, source string, wal *writeaheadlog.WAL, erasureCode skymodules.ErasureCoder, masterKey crypto.CipherKey, fileSize uint64, fileMode os.FileMode) (*SiaFile, error) {
   216  	currentTime := time.Now()
   217  	ecType, ecParams := marshalErasureCoder(erasureCode)
   218  	minPieces := erasureCode.MinPieces()
   219  	numPieces := erasureCode.NumPieces()
   220  	zeroHealth := 1.0
   221  	if numPieces != minPieces {
   222  		zeroHealth = float64(1 + minPieces/(numPieces-minPieces))
   223  	}
   224  	repairSize := fileSize * uint64(numPieces/minPieces)
   225  	file := &SiaFile{
   226  		staticMetadata: Metadata{
   227  			AccessTime:              currentTime,
   228  			ChunkOffset:             defaultReservedMDPages * pageSize,
   229  			ChangeTime:              currentTime,
   230  			CreateTime:              currentTime,
   231  			CachedHealth:            zeroHealth,
   232  			CachedRepairBytes:       repairSize,
   233  			CachedStuckBytes:        0,
   234  			CachedStuckHealth:       0,
   235  			CachedRedundancy:        0,
   236  			CachedUserRedundancy:    0,
   237  			CachedUploadProgress:    0,
   238  			FileSize:                int64(fileSize),
   239  			Finished:                source != "",
   240  			LocalPath:               source,
   241  			StaticMasterKey:         masterKey.Key(),
   242  			StaticMasterKeyType:     masterKey.Type(),
   243  			Mode:                    fileMode,
   244  			ModTime:                 currentTime,
   245  			staticErasureCode:       erasureCode,
   246  			StaticErasureCodeType:   ecType,
   247  			StaticErasureCodeParams: ecParams,
   248  			StaticPagesPerChunk:     numChunkPagesRequired(erasureCode.NumPieces()),
   249  			StaticPieceSize:         modules.SectorSize - masterKey.Type().Overhead(),
   250  			StaticVersion:           metadataVersion,
   251  			UniqueID:                uniqueID(),
   252  		},
   253  		deps:        modules.ProdDependencies,
   254  		siaFilePath: siaFilePath,
   255  		wal:         wal,
   256  	}
   257  	// Init chunks.
   258  	numChunks := fileSize / file.staticChunkSize()
   259  	if fileSize%file.staticChunkSize() != 0 {
   260  		// This file does have a partial chunk but we treat it as a full chunk.
   261  		numChunks++
   262  	}
   263  	file.numChunks = int(numChunks)
   264  	// Update cached fields for 0-Byte files.
   265  	if file.staticMetadata.FileSize == 0 {
   266  		file.staticMetadata.CachedHealth = 0
   267  		file.staticMetadata.CachedRepairBytes = 0
   268  		file.staticMetadata.CachedStuckBytes = 0
   269  		file.staticMetadata.CachedStuckHealth = 0
   270  		file.staticMetadata.CachedRedundancy = float64(erasureCode.NumPieces()) / float64(erasureCode.MinPieces())
   271  		file.staticMetadata.CachedUserRedundancy = file.staticMetadata.CachedRedundancy
   272  		file.staticMetadata.CachedUploadProgress = 100
   273  	}
   274  	// Save file.
   275  	initialChunks := make([]chunk, file.numChunks)
   276  	for chunkIndex := range initialChunks {
   277  		initialChunks[chunkIndex].Index = chunkIndex
   278  		initialChunks[chunkIndex].Pieces = make([][]piece, erasureCode.NumPieces())
   279  	}
   280  	return file, file.saveFile(initialChunks)
   281  }
   282  
   283  // GrowNumChunks increases the number of chunks in the SiaFile to numChunks. If
   284  // the file already contains >= numChunks chunks then GrowNumChunks is a no-op.
   285  func (sf *SiaFile) GrowNumChunks(numChunks uint64) (err error) {
   286  	sf.mu.Lock()
   287  	defer sf.mu.Unlock()
   288  	// Backup metadata before doing any kind of persistence.
   289  	defer func(backup Metadata) {
   290  		if err != nil {
   291  			sf.staticMetadata.restore(backup)
   292  		}
   293  	}(sf.staticMetadata.backup())
   294  	updates, err := sf.growNumChunks(numChunks)
   295  	if err != nil {
   296  		return err
   297  	}
   298  	return sf.createAndApplyTransaction(updates...)
   299  }
   300  
   301  // RemoveLastChunk removes the last chunk of the SiaFile and truncates the file
   302  // accordingly.
   303  func (sf *SiaFile) RemoveLastChunk() error {
   304  	sf.mu.Lock()
   305  	defer sf.mu.Unlock()
   306  	return sf.removeLastChunk()
   307  }
   308  
   309  // SetFileSize changes the fileSize of the SiaFile.
   310  func (sf *SiaFile) SetFileSize(fileSize uint64) (err error) {
   311  	sf.mu.Lock()
   312  	defer sf.mu.Unlock()
   313  	if sf.deleted {
   314  		return errors.AddContext(ErrDeleted, "can't set filesize of deleted file")
   315  	}
   316  	// Backup the changed metadata before changing it. Revert the change on
   317  	// error.
   318  	defer func(backup Metadata) {
   319  		if err != nil {
   320  			sf.staticMetadata.restore(backup)
   321  		}
   322  	}(sf.staticMetadata.backup())
   323  	// Make sure that SetFileSize doesn't affect the number of total chunks within
   324  	// the file.
   325  	newNumChunks := fileSize / sf.staticChunkSize()
   326  	if fileSize%sf.staticChunkSize() != 0 {
   327  		newNumChunks++
   328  	}
   329  	if uint64(sf.numChunks) != newNumChunks {
   330  		return fmt.Errorf("can't change fileSize since it would change the number of chunks from %v to %v",
   331  			sf.numChunks, newNumChunks)
   332  	}
   333  	// Update filesize.
   334  	sf.staticMetadata.FileSize = int64(fileSize)
   335  	// Save changes to metadata to disk.
   336  	return sf.saveMetadata()
   337  }
   338  
   339  // AddPiece adds an uploaded piece to the file. It also updates the host table
   340  // if the public key of the host is not already known.
   341  func (sf *SiaFile) AddPiece(pk types.SiaPublicKey, chunkIndex, pieceIndex uint64, merkleRoot crypto.Hash) (err error) {
   342  	sf.mu.Lock()
   343  	defer sf.mu.Unlock()
   344  	// If the file was deleted we can't add a new piece since it would write
   345  	// the file to disk again.
   346  	if sf.deleted {
   347  		return errors.AddContext(ErrDeleted, "can't add piece to deleted file")
   348  	}
   349  	// Backup the changed metadata before changing it. Revert the change on
   350  	// error.
   351  	oldPubKeyTable := append([]HostPublicKey{}, sf.pubKeyTable...)
   352  	defer func(backup Metadata) {
   353  		if err != nil {
   354  			sf.staticMetadata.restore(backup)
   355  			sf.pubKeyTable = oldPubKeyTable
   356  		}
   357  	}(sf.staticMetadata.backup())
   358  
   359  	// Update cache.
   360  	defer sf.uploadProgressAndBytes()
   361  
   362  	// Get the index of the host in the public key table.
   363  	tableIndex := -1
   364  	for i, hpk := range sf.pubKeyTable {
   365  		if hpk.PublicKey.Equals(pk) {
   366  			tableIndex = i
   367  			break
   368  		}
   369  	}
   370  	// If we don't know the host yet, we add it to the table.
   371  	tableChanged := false
   372  	if tableIndex == -1 {
   373  		sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{
   374  			PublicKey: pk,
   375  			Used:      true,
   376  		})
   377  		tableIndex = len(sf.pubKeyTable) - 1
   378  		tableChanged = true
   379  	}
   380  	// Check if the chunkIndex is valid.
   381  	if chunkIndex >= uint64(sf.numChunks) {
   382  		return fmt.Errorf("chunkIndex %v out of bounds (%v)", chunkIndex, sf.numChunks)
   383  	}
   384  	// Get the chunk from disk.
   385  	chunk, err := sf.chunk(int(chunkIndex))
   386  	if err != nil {
   387  		return errors.AddContext(err, "failed to get chunk")
   388  	}
   389  	// Check if the pieceIndex is valid.
   390  	if pieceIndex >= uint64(len(chunk.Pieces)) {
   391  		return fmt.Errorf("pieceIndex %v out of bounds (%v)", pieceIndex, len(chunk.Pieces))
   392  	}
   393  	// Add the piece to the chunk.
   394  	chunk.Pieces[pieceIndex] = append(chunk.Pieces[pieceIndex], piece{
   395  		HostTableOffset: uint32(tableIndex),
   396  		MerkleRoot:      merkleRoot,
   397  	})
   398  
   399  	// Update the AccessTime, ChangeTime and ModTime.
   400  	sf.staticMetadata.AccessTime = time.Now()
   401  	sf.staticMetadata.ChangeTime = sf.staticMetadata.AccessTime
   402  	sf.staticMetadata.ModTime = sf.staticMetadata.AccessTime
   403  
   404  	// Defrag the chunk if necessary.
   405  	chunkSize := marshaledChunkSize(chunk.numPieces())
   406  	maxChunkSize := int64(sf.staticMetadata.StaticPagesPerChunk) * pageSize
   407  	if chunkSize > maxChunkSize {
   408  		sf.defragChunk(&chunk)
   409  	}
   410  
   411  	// If the chunk is still too large after the defrag, we abort.
   412  	chunkSize = marshaledChunkSize(chunk.numPieces())
   413  	if chunkSize > maxChunkSize {
   414  		return fmt.Errorf("chunk doesn't fit into allocated space %v > %v", chunkSize, maxChunkSize)
   415  	}
   416  	// Update the file atomically.
   417  	var updates []writeaheadlog.Update
   418  	// Get the updates for the header.
   419  	if tableChanged {
   420  		// If the table changed we update the whole header.
   421  		updates, err = sf.saveHeaderUpdates()
   422  	} else {
   423  		// Otherwise just the metadata.
   424  		updates, err = sf.saveMetadataUpdates()
   425  	}
   426  	if err != nil {
   427  		return err
   428  	}
   429  	// Save the changed chunk to disk.
   430  	chunkUpdate := sf.saveChunkUpdate(chunk)
   431  	return sf.createAndApplyTransaction(append(updates, chunkUpdate)...)
   432  }
   433  
   434  // chunkHealth returns the health and user health of the chunk which is defined
   435  // as the percent of parity pieces remaining. When calculating the user health
   436  // we assume that an incomplete partial chunk has full health. For the regular
   437  // health we don't assume that.
   438  //
   439  // health = 0 is full redundancy, health <= 1 is recoverable, health > 1 needs
   440  // to be repaired from disk or repair by upload streaming
   441  func (sf *SiaFile) chunkHealth(chunk chunk, offlineMap map[string]bool, goodForRenewMap map[string]bool) (h float64, uh float64, _ uint64, err error) {
   442  	// The max number of good pieces that a chunk can have is NumPieces()
   443  	numPieces := sf.staticMetadata.staticErasureCode.NumPieces()
   444  	minPieces := sf.staticMetadata.staticErasureCode.MinPieces()
   445  	// Find the good pieces that are good for renew
   446  	goodPieces, _ := sf.goodPieces(chunk, offlineMap, goodForRenewMap)
   447  	chunkHealth := CalculateHealth(int(goodPieces), minPieces, numPieces)
   448  	// Sanity Check, if something went wrong, default to minimum health
   449  	if int(goodPieces) > numPieces || goodPieces < 0 {
   450  		build.Critical("unexpected number of goodPieces for chunkHealth")
   451  		goodPieces = 0
   452  	}
   453  	// Determine repairBytesRemaining
   454  	repairBytes := (uint64(numPieces) - goodPieces) * modules.SectorSize
   455  	return chunkHealth, chunkHealth, repairBytes, nil
   456  }
   457  
   458  // ChunkHealth returns the health of the chunk which is defined as the percent
   459  // of parity pieces remaining.
   460  func (sf *SiaFile) ChunkHealth(index int, offlineMap map[string]bool, goodForRenewMap map[string]bool) (float64, float64, uint64, error) {
   461  	sf.mu.Lock()
   462  	defer sf.mu.Unlock()
   463  	chunk, err := sf.chunk(index)
   464  	if err != nil {
   465  		return 0, 0, 0, errors.AddContext(err, "failed to read chunk")
   466  	}
   467  	return sf.chunkHealth(chunk, offlineMap, goodForRenewMap)
   468  }
   469  
   470  // Delete removes the file from disk and marks it as deleted. Once the file is
   471  // deleted, certain methods should return an error.
   472  func (sf *SiaFile) Delete() (err error) {
   473  	sf.mu.Lock()
   474  	defer sf.mu.Unlock()
   475  	// We can't delete a file multiple times.
   476  	if sf.deleted {
   477  		return errors.AddContext(ErrDeleted, "requested file has already been deleted")
   478  	}
   479  	// Backup metadata before doing any kind of persistence.
   480  	defer func(backup Metadata) {
   481  		if err != nil {
   482  			sf.staticMetadata.restore(backup)
   483  		}
   484  	}(sf.staticMetadata.backup())
   485  	update := sf.createDeleteUpdate()
   486  	err = sf.createAndApplyTransaction(update)
   487  	sf.deleted = true
   488  	return err
   489  }
   490  
   491  // Deleted indicates if this file has been deleted by the user.
   492  func (sf *SiaFile) Deleted() bool {
   493  	sf.mu.RLock()
   494  	defer sf.mu.RUnlock()
   495  	return sf.deleted
   496  }
   497  
   498  // ErasureCode returns the erasure coder used by the file.
   499  func (sf *SiaFile) ErasureCode() skymodules.ErasureCoder {
   500  	return sf.staticMetadata.staticErasureCode
   501  }
   502  
   503  // SaveWithChunks saves the file's header to disk and appends the raw chunks provided at
   504  // the end of the file.
   505  func (sf *SiaFile) SaveWithChunks(chunks Chunks) (err error) {
   506  	sf.mu.Lock()
   507  	defer sf.mu.Unlock()
   508  	// Adding this should restore the metadata later.
   509  	defer func(backup Metadata) {
   510  		if err != nil {
   511  			sf.staticMetadata.restore(backup)
   512  		}
   513  	}(sf.staticMetadata.backup())
   514  
   515  	updates, err := sf.saveHeaderUpdates()
   516  	if err != nil {
   517  		return errors.AddContext(err, "failed to create header updates")
   518  	}
   519  	for _, chunk := range chunks.chunks {
   520  		updates = append(updates, sf.saveChunkUpdate(chunk))
   521  	}
   522  	return sf.createAndApplyTransaction(updates...)
   523  }
   524  
   525  // SaveHeader saves the file's header to disk.
   526  func (sf *SiaFile) SaveHeader() (err error) {
   527  	sf.mu.Lock()
   528  	defer sf.mu.Unlock()
   529  	// Can't save the header of a deleted file.
   530  	if sf.deleted {
   531  		return errors.AddContext(ErrDeleted, "can't SaveHeader of deleted file")
   532  	}
   533  	// Adding this should restore the metadata later.
   534  	defer func(backup Metadata) {
   535  		if err != nil {
   536  			sf.staticMetadata.restore(backup)
   537  		}
   538  	}(sf.staticMetadata.backup())
   539  
   540  	updates, err := sf.saveHeaderUpdates()
   541  	if err != nil {
   542  		return err
   543  	}
   544  	return sf.createAndApplyTransaction(updates...)
   545  }
   546  
   547  // SaveMetadata saves the file's metadata to disk in a fault tolerant way.
   548  func (sf *SiaFile) SaveMetadata() (err error) {
   549  	sf.mu.Lock()
   550  	defer sf.mu.Unlock()
   551  	if sf.deleted {
   552  		return errors.AddContext(ErrDeleted, "can't SaveMetadata of deleted file")
   553  	}
   554  	// backup the changed metadata before changing it. Revert the change on
   555  	// error.
   556  	defer func(backup Metadata) {
   557  		if err != nil {
   558  			sf.staticMetadata.restore(backup)
   559  		}
   560  	}(sf.staticMetadata.backup())
   561  	return sf.saveMetadata()
   562  }
   563  
   564  // saveMetadata saves the file's metadata to disk by creating the metadata
   565  // updates and applying them.
   566  //
   567  // NOTE: This method does not backup the metadata
   568  func (sf *SiaFile) saveMetadata() error {
   569  	updates, err := sf.saveMetadataUpdates()
   570  	if err != nil {
   571  		return err
   572  	}
   573  	return sf.createAndApplyTransaction(updates...)
   574  }
   575  
   576  // Expiration updates CachedExpiration with the lowest height at which any of
   577  // the file's contracts will expire and returns the new value.
   578  func (sf *SiaFile) Expiration(contracts map[string]skymodules.RenterContract) types.BlockHeight {
   579  	sf.mu.Lock()
   580  	defer sf.mu.Unlock()
   581  	return sf.expiration(contracts)
   582  }
   583  
   584  // expiration updates CachedExpiration with the lowest height at which any of
   585  // the file's contracts will expire and returns the new value.
   586  func (sf *SiaFile) expiration(contracts map[string]skymodules.RenterContract) types.BlockHeight {
   587  	if len(sf.pubKeyTable) == 0 {
   588  		sf.staticMetadata.CachedExpiration = 0
   589  		return 0
   590  	}
   591  
   592  	lowest := ^types.BlockHeight(0)
   593  	var pieceSets [][]Piece
   594  	for _, pieceSet := range pieceSets {
   595  		for _, piece := range pieceSet {
   596  			contract, exists := contracts[piece.HostPubKey.String()]
   597  			if !exists {
   598  				continue
   599  			}
   600  			if contract.EndHeight < lowest {
   601  				lowest = contract.EndHeight
   602  			}
   603  		}
   604  	}
   605  
   606  	for _, pk := range sf.pubKeyTable {
   607  		contract, exists := contracts[pk.PublicKey.String()]
   608  		if !exists {
   609  			continue
   610  		}
   611  		if contract.EndHeight < lowest {
   612  			lowest = contract.EndHeight
   613  		}
   614  	}
   615  	sf.staticMetadata.CachedExpiration = lowest
   616  	return lowest
   617  }
   618  
   619  // Health calculates the health of the file to be used in determining repair
   620  // priority. Health of the file is the lowest health of any of the chunks and is
   621  // defined as the percent of parity pieces remaining. The NumStuckChunks will be
   622  // calculated for the SiaFile and returned.
   623  //
   624  // NOTE: The cached values of the health and stuck health will be set but not
   625  // saved to disk as Health() does not write to disk. If the cached values need
   626  // to be updated on disk then a metadata save method should be called in
   627  // conjunction with Health()
   628  //
   629  // health = 0 is full redundancy, health <= 1 is recoverable, health > 1 needs
   630  // to be repaired from disk
   631  func (sf *SiaFile) Health(offline map[string]bool, goodForRenew map[string]bool) (h, sh, uh, ush float64, nsc, rb, sb uint64) {
   632  	sf.mu.Lock()
   633  	defer sf.mu.Unlock()
   634  	return sf.health(offline, goodForRenew)
   635  }
   636  
   637  // health calculates the health of the file to be used in determining repair
   638  // priority. Health of the file is the lowest health of any of the chunks and is
   639  // defined as the percent of parity pieces remaining. The NumStuckChunks will be
   640  // calculated for the SiaFile and returned.
   641  //
   642  // NOTE: The cached values of the health and stuck health will be set but not
   643  // saved to disk as Health() does not write to disk. If the cached values need
   644  // to be updated on disk then a metadata save method should be called in
   645  // conjunction with Health()
   646  //
   647  // health = 0 is full redundancy, health <= 1 is recoverable, health > 1 needs
   648  // to be repaired from disk
   649  func (sf *SiaFile) health(offline map[string]bool, goodForRenew map[string]bool) (h, sh, uh, ush float64, nsc, rb, sb uint64) {
   650  	numPieces := sf.staticMetadata.staticErasureCode.NumPieces()
   651  	minPieces := sf.staticMetadata.staticErasureCode.MinPieces()
   652  	worstHealth := CalculateHealth(0, minPieces, numPieces)
   653  
   654  	// Update the cache.
   655  	defer func() {
   656  		sf.staticMetadata.CachedHealth = h
   657  		sf.staticMetadata.CachedRepairBytes = rb
   658  		sf.staticMetadata.CachedStuckBytes = sb
   659  		sf.staticMetadata.CachedStuckHealth = sh
   660  	}()
   661  
   662  	// Check if siafile is deleted
   663  	if sf.deleted {
   664  		// Don't return health information of a deleted file to prevent
   665  		// misrepresenting the health information of a directory
   666  		return 0, 0, 0, 0, 0, 0, 0
   667  	}
   668  	// Check for Zero byte files
   669  	if sf.staticMetadata.FileSize == 0 {
   670  		// Return default health information for zero byte files to prevent
   671  		// misrepresenting the health information of a directory
   672  		return 0, 0, 0, 0, 0, 0, 0
   673  	}
   674  
   675  	// Iterate over the chunks to gather the health information
   676  	var health, stuckHealth, userHealth, userStuckHealth float64
   677  	var numStuckChunks, repairBytesRemaing, stuckBytes uint64
   678  	err := sf.iterateChunksReadonly(func(c chunk) error {
   679  		chunkHealth, userChunkHealth, chunkRepairBytesRemaining, err := sf.chunkHealth(c, offline, goodForRenew)
   680  		if err != nil {
   681  			return err
   682  		}
   683  
   684  		// Update the health or stuckHealth of the file according to the health
   685  		// of the chunk. The health of the file is the worst health (highest
   686  		// number) of all the chunks in the file.
   687  		if c.Stuck {
   688  			numStuckChunks++
   689  			if chunkHealth > stuckHealth {
   690  				stuckHealth = chunkHealth
   691  			}
   692  			if userChunkHealth > userStuckHealth {
   693  				userStuckHealth = userChunkHealth
   694  			}
   695  		} else {
   696  			if chunkHealth > health {
   697  				health = chunkHealth
   698  			}
   699  			if userChunkHealth > userHealth {
   700  				userHealth = userChunkHealth
   701  			}
   702  		}
   703  
   704  		// If the chunk is stuck then we count any remaining repair bytes as the
   705  		// stuck loop does not care how healthy the file is.
   706  		if c.Stuck {
   707  			stuckBytes += chunkRepairBytesRemaining
   708  			return nil
   709  		}
   710  
   711  		// If the chunk is not stuck then we only count the remaining repair bytes
   712  		// if the chunk needs repair.
   713  		if skymodules.NeedsRepair(chunkHealth) {
   714  			repairBytesRemaing += chunkRepairBytesRemaining
   715  		}
   716  
   717  		return nil
   718  	})
   719  	if err != nil {
   720  		err = fmt.Errorf("failed to iterate over chunks of file '%v': %v", sf.siaFilePath, err)
   721  		build.Critical(err)
   722  		return 0, 0, 0, 0, 0, 0, 0
   723  	}
   724  
   725  	// Check if all chunks are stuck, if so then set health to max health to
   726  	// avoid file being targeted for repair
   727  	if int(numStuckChunks) == sf.numChunks {
   728  		health = float64(0)
   729  	}
   730  	// Sanity check, verify that the calculated health is not worse (greater)
   731  	// than the worst health.
   732  	if userHealth > worstHealth || health > worstHealth {
   733  		build.Critical("WARN: health out of bounds. Max value, Min value, health found", worstHealth, 0, health, userHealth)
   734  		health = worstHealth
   735  	}
   736  	// Sanity check, verify that the calculated stuck health is not worse
   737  	// (greater) than the worst health.
   738  	if userStuckHealth > worstHealth || stuckHealth > worstHealth {
   739  		build.Critical("WARN: stuckHealth out of bounds. Max value, Min value, stuckHealth found", worstHealth, 0, stuckHealth, userStuckHealth)
   740  		stuckHealth = worstHealth
   741  	}
   742  	// Sanity Check that the number of stuck chunks makes sense
   743  	if numStuckChunks != sf.numStuckChunks() {
   744  		// If there is a mismatch there must have been a bad shutdown. Fix the
   745  		// metadata with the information read directly from the chunks
   746  		sf.staticMetadata.NumStuckChunks = numStuckChunks
   747  	}
   748  	return health, stuckHealth, userHealth, userStuckHealth, numStuckChunks, repairBytesRemaing, stuckBytes
   749  }
   750  
   751  // HostPublicKeys returns all the public keys of hosts the file has ever been
   752  // uploaded to. That means some of those hosts might no longer be in use.
   753  func (sf *SiaFile) HostPublicKeys() (spks []types.SiaPublicKey) {
   754  	sf.mu.RLock()
   755  	defer sf.mu.RUnlock()
   756  	// Only return the keys, not the whole entry.
   757  	keys := make([]types.SiaPublicKey, 0, len(sf.pubKeyTable))
   758  	for _, key := range sf.pubKeyTable {
   759  		keys = append(keys, key.PublicKey)
   760  	}
   761  	return keys
   762  }
   763  
   764  // NumChunks returns the number of chunks the file consists of. This will
   765  // return the number of chunks the file consists of even if the file is not
   766  // fully uploaded yet.
   767  func (sf *SiaFile) NumChunks() uint64 {
   768  	sf.mu.RLock()
   769  	defer sf.mu.RUnlock()
   770  	return uint64(sf.numChunks)
   771  }
   772  
   773  // Pieces returns all the pieces for a chunk in a slice of slices that contains
   774  // all the pieces for a certain index.
   775  func (sf *SiaFile) Pieces(chunkIndex uint64) ([][]Piece, error) {
   776  	sf.mu.RLock()
   777  	defer sf.mu.RUnlock()
   778  
   779  	// If the file has been deleted, we can't load its pieces.
   780  	if sf.deleted {
   781  		return nil, errors.AddContext(ErrDeleted, "can't call Pieces on deleted file")
   782  	}
   783  
   784  	if chunkIndex >= uint64(sf.numChunks) {
   785  		err := fmt.Errorf("index %v out of bounds (%v)", chunkIndex, sf.numChunks)
   786  		build.Critical(err)
   787  		return [][]Piece{}, err
   788  	}
   789  	chunk, err := sf.chunk(int(chunkIndex))
   790  	if err != nil {
   791  		return nil, err
   792  	}
   793  	// Resolve pieces to Pieces.
   794  	pieces := make([][]Piece, len(chunk.Pieces))
   795  	for pieceIndex := range pieces {
   796  		pieces[pieceIndex] = make([]Piece, len(chunk.Pieces[pieceIndex]))
   797  		for i, piece := range chunk.Pieces[pieceIndex] {
   798  			pieces[pieceIndex][i] = Piece{
   799  				HostPubKey: sf.hostKey(piece.HostTableOffset).PublicKey,
   800  				MerkleRoot: piece.MerkleRoot,
   801  			}
   802  		}
   803  	}
   804  	return pieces, nil
   805  }
   806  
   807  // Redundancy returns the redundancy of the least redundant chunk. A file
   808  // becomes available when this redundancy is >= 1. Assumes that every piece is
   809  // unique within a file contract. -1 is returned if the file has size 0. It
   810  // takes two arguments, a map of offline contracts for this file and a map that
   811  // indicates if a contract is goodForRenew. The first redundancy returned is the
   812  // one that should be used by the repair code and is more accurate. The other
   813  // one is the redundancy presented to users.
   814  func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[string]bool) (r, ur float64, err error) {
   815  	sf.mu.Lock()
   816  	defer sf.mu.Unlock()
   817  	return sf.redundancy(offlineMap, goodForRenewMap)
   818  }
   819  
   820  // redundancy returns the redundancy of the least redundant chunk. A file
   821  // becomes available when this redundancy is >= 1. Assumes that every piece is
   822  // unique within a file contract. -1 is returned if the file has size 0. It
   823  // takes two arguments, a map of offline contracts for this file and a map that
   824  // indicates if a contract is goodForRenew. The first redundancy returned is the
   825  // one that should be used by the repair code and is more accurate. The other
   826  // one is the redundancy presented to users.
   827  func (sf *SiaFile) redundancy(offlineMap map[string]bool, goodForRenewMap map[string]bool) (r, ur float64, err error) {
   828  	// If the file has been deleted, we can't compute its redundancy.
   829  	if sf.deleted {
   830  		return 0, 0, errors.AddContext(ErrDeleted, "can't call Redundancy on deleted file")
   831  	}
   832  
   833  	// Update the cache.
   834  	defer func() {
   835  		sf.staticMetadata.CachedRedundancy = r
   836  		sf.staticMetadata.CachedUserRedundancy = ur
   837  	}()
   838  	if sf.staticMetadata.FileSize == 0 {
   839  		// TODO change this once tiny files are supported.
   840  		if sf.numChunks != 1 {
   841  			// should never happen
   842  			return -1, -1, nil
   843  		}
   844  		ec := sf.staticMetadata.staticErasureCode
   845  		r = float64(ec.NumPieces()) / float64(ec.MinPieces())
   846  		ur = r
   847  		return
   848  	}
   849  
   850  	ec := sf.staticMetadata.staticErasureCode
   851  	minRedundancy := math.MaxFloat64
   852  	minRedundancyUser := minRedundancy
   853  	minRedundancyNoRenewUser := math.MaxFloat64
   854  	minRedundancyNoRenew := math.MaxFloat64
   855  	err = sf.iterateChunksReadonly(func(chunk chunk) error {
   856  		// Loop over chunks and remember how many unique pieces of the chunk
   857  		// were goodForRenew and how many were not.
   858  		numPiecesRenew, numPiecesNoRenew := sf.goodPieces(chunk, offlineMap, goodForRenewMap)
   859  		redundancy := float64(numPiecesRenew) / float64(sf.staticMetadata.staticErasureCode.MinPieces())
   860  		redundancyUser := redundancy
   861  		if redundancy < minRedundancy {
   862  			minRedundancy = redundancy
   863  		}
   864  		if redundancyUser < minRedundancyUser {
   865  			minRedundancyUser = redundancyUser
   866  		}
   867  		redundancyNoRenew := float64(numPiecesNoRenew) / float64(ec.MinPieces())
   868  		redundancyNoRenewUser := redundancyNoRenew
   869  		if redundancyNoRenewUser < minRedundancyNoRenewUser {
   870  			minRedundancyNoRenewUser = redundancyNoRenewUser
   871  		}
   872  		if redundancyNoRenew < minRedundancyNoRenew {
   873  			minRedundancyNoRenew = redundancyNoRenew
   874  		}
   875  		return nil
   876  	})
   877  	if err != nil {
   878  		return 0, 0, err
   879  	}
   880  
   881  	// If the redundancyUser is smaller than 1x we return the redundancy that
   882  	// includes contracts that are not good for renewal. The reason for this is a
   883  	// better user experience. If the renter operates correctly, redundancyUser
   884  	// should never go above numPieces / minPieces and redundancyNoRenewUser should
   885  	// never go below 1.
   886  	if minRedundancyUser < 1 && minRedundancyNoRenewUser >= 1 {
   887  		ur = 1
   888  	} else if minRedundancy < 1 {
   889  		ur = minRedundancyNoRenewUser
   890  	} else {
   891  		ur = minRedundancyUser
   892  	}
   893  	r = minRedundancy
   894  	return
   895  }
   896  
   897  // SetAllStuck sets the Stuck field of all chunks to stuck.
   898  func (sf *SiaFile) SetAllStuck(stuck bool) error {
   899  	sf.mu.Lock()
   900  	defer sf.mu.Unlock()
   901  	return sf.setAllStuck(stuck)
   902  }
   903  
   904  // setAllStuck sets the Stuck field of all chunks to stuck.
   905  func (sf *SiaFile) setAllStuck(stuck bool) (err error) {
   906  	// If the file has been deleted we can't mark a chunk as stuck.
   907  	if sf.deleted {
   908  		return errors.AddContext(ErrDeleted, "can't call SetStuck on deleted file")
   909  	}
   910  
   911  	// If the file is unfinished then do not set the chunks as stuck
   912  	if !sf.finished() && stuck {
   913  		err = errors.AddContext(ErrUnfinished, "cannot set an unfinished file as stuck")
   914  		build.Critical(err)
   915  		return err
   916  	}
   917  
   918  	// Backup metadata before doing any kind of persistence.
   919  	defer func(backup Metadata) {
   920  		if err != nil {
   921  			sf.staticMetadata.restore(backup)
   922  		}
   923  	}(sf.staticMetadata.backup())
   924  	// Update metadata.
   925  	if stuck {
   926  		sf.staticMetadata.NumStuckChunks = uint64(sf.numChunks)
   927  	} else {
   928  		sf.staticMetadata.NumStuckChunks = 0
   929  	}
   930  	// Create metadata updates and apply updates on disk
   931  	updates, err := sf.saveMetadataUpdates()
   932  	if err != nil {
   933  		return err
   934  	}
   935  	// Figure out which chunks to update.
   936  	var setStuck []chunk
   937  	errIter := sf.iterateChunksReadonly(func(chunk chunk) error {
   938  		if chunk.Stuck != stuck {
   939  			setStuck = append(setStuck, chunk)
   940  			return nil
   941  		}
   942  		return nil
   943  	})
   944  	if errIter != nil {
   945  		return errIter
   946  	}
   947  	// Check if work needs to be done.
   948  	if len(setStuck) == 0 {
   949  		// We don't have any chunks to mark as stuck but make sure that
   950  		// the metadata updates are applied
   951  		return sf.createAndApplyTransaction(updates...)
   952  	}
   953  	// Create chunk updates.
   954  	chunkUpdates, errIter := sf.iterateChunks(func(chunk *chunk) (bool, error) {
   955  		if len(setStuck) == 0 {
   956  			return false, nil
   957  		}
   958  		if chunk.Index == setStuck[0].Index {
   959  			chunk.Stuck = stuck
   960  			setStuck = setStuck[1:]
   961  			return true, nil
   962  		}
   963  		return false, nil
   964  	})
   965  	if errIter != nil {
   966  		return errIter
   967  	}
   968  	// Apply updates.
   969  	updates = append(updates, chunkUpdates...)
   970  	return sf.createAndApplyTransaction(updates...)
   971  }
   972  
   973  // SetStuck sets the Stuck field of the chunk at the given index
   974  func (sf *SiaFile) SetStuck(index uint64, stuck bool) (err error) {
   975  	sf.mu.Lock()
   976  	defer sf.mu.Unlock()
   977  	// Backup the changed metadata before doing any king of persistence.
   978  	defer func(backup Metadata) {
   979  		if err != nil {
   980  			sf.staticMetadata.restore(backup)
   981  		}
   982  	}(sf.staticMetadata.backup())
   983  	return sf.setStuck(index, stuck)
   984  }
   985  
   986  // StuckChunkByIndex returns if the chunk at the index is marked as Stuck or not
   987  func (sf *SiaFile) StuckChunkByIndex(index uint64) (bool, error) {
   988  	sf.mu.Lock()
   989  	defer sf.mu.Unlock()
   990  	chunk, err := sf.chunk(int(index))
   991  	if err != nil {
   992  		return false, errors.AddContext(err, "failed to read chunk")
   993  	}
   994  	return chunk.Stuck, nil
   995  }
   996  
   997  // UID returns a unique identifier for this file.
   998  func (sf *SiaFile) UID() SiafileUID {
   999  	sf.mu.RLock()
  1000  	defer sf.mu.RUnlock()
  1001  	return sf.staticMetadata.UniqueID
  1002  }
  1003  
  1004  // UpdateMetadata updates various parts of the siafile's metadata
  1005  func (sf *SiaFile) UpdateMetadata(offlineMap, goodForRenew map[string]bool, contracts map[string]skymodules.RenterContract, used []types.SiaPublicKey) error {
  1006  	sf.mu.Lock()
  1007  	defer sf.mu.Unlock()
  1008  	return sf.updateMetadata(offlineMap, goodForRenew, contracts, used)
  1009  }
  1010  
  1011  // updateMetadata updates various parts of the siafile's metadata
  1012  func (sf *SiaFile) updateMetadata(offlineMap, goodForRenew map[string]bool, contracts map[string]skymodules.RenterContract, used []types.SiaPublicKey) (err error) {
  1013  	// Don't update metadata for a deleted file.
  1014  	if sf.deleted {
  1015  		return ErrDeleted
  1016  	}
  1017  
  1018  	// backup the changed metadata before changing it. Revert the change on
  1019  	// error.
  1020  	oldPubKeyTable := append([]HostPublicKey{}, sf.pubKeyTable...)
  1021  	defer func(backup Metadata) {
  1022  		if err != nil {
  1023  			sf.staticMetadata.restore(backup)
  1024  			sf.pubKeyTable = oldPubKeyTable
  1025  		}
  1026  	}(sf.staticMetadata.backup())
  1027  
  1028  	// Update the siafile's used hosts.
  1029  	updates, err := sf.updateUsedHosts(used)
  1030  	if err != nil {
  1031  		return errors.AddContext(err, "unable to update used hosts")
  1032  	}
  1033  
  1034  	// Update cached redundancy values by calling the redundancy method.
  1035  	_, _, err = sf.redundancy(offlineMap, goodForRenew)
  1036  	if err != nil {
  1037  		return errors.AddContext(err, "unable to update cached redundancy")
  1038  	}
  1039  
  1040  	// Update cached health values by calling the health method.
  1041  	health, sh, _, _, _, _, _ := sf.health(offlineMap, goodForRenew)
  1042  
  1043  	// Set the finished state of the file based on the health. Ideally we
  1044  	// would look at the unique uploaded bytes, like we did in the compat
  1045  	// code. However that requires disk reads to interate over all the
  1046  	// chunks.
  1047  	sf.setFinished(health)
  1048  
  1049  	// Check Lost status
  1050  	if !IsLost(math.Max(health, sh), sf.onDisk(), sf.finished()) && sf.staticMetadata.Lost {
  1051  		// While rare, there is one specific case in which a file can be
  1052  		// lost and recovered which has a test, which is with stream
  1053  		// repair.
  1054  		//
  1055  		// TODO: if a logger is ever added to the siafile this could be
  1056  		// a good thing to log, this is the main reason this is within
  1057  		// an if condition and not handled automatically every time. We
  1058  		// should be aware of all the cases in which a lost file can be
  1059  		// recovered.
  1060  		sf.staticMetadata.Lost = false
  1061  	}
  1062  
  1063  	// Set the LastHealthCheckTime
  1064  	sf.staticMetadata.LastHealthCheckTime = time.Now()
  1065  
  1066  	// Update the cached expiration of the siafile by calling the expiration
  1067  	// method.
  1068  	_ = sf.expiration(contracts)
  1069  
  1070  	// Generate the header updates as updateUsedHostUpdates updates the
  1071  	// pubKeyTable.
  1072  	headerUpdates, err := sf.saveHeaderUpdates()
  1073  	if err != nil {
  1074  		return errors.AddContext(err, "unable to generate header updates")
  1075  	}
  1076  	updates = append(updates, headerUpdates...)
  1077  
  1078  	// Save the updates.
  1079  	return sf.createAndApplyTransaction(updates...)
  1080  }
  1081  
  1082  // updateUsedHosts returns the wal updates needed for updating the used hosts
  1083  // for the siafile.
  1084  func (sf *SiaFile) updateUsedHosts(used []types.SiaPublicKey) (_ []writeaheadlog.Update, err error) {
  1085  	// Can't update used hosts on deleted file.
  1086  	if sf.deleted {
  1087  		return nil, errors.AddContext(ErrDeleted, "can't call UpdateUsedHosts on deleted file")
  1088  	}
  1089  	// Adding this should restore the metadata later.
  1090  	oldPubKeyTable := append([]HostPublicKey{}, sf.pubKeyTable...)
  1091  	defer func(backup Metadata) {
  1092  		if err != nil {
  1093  			sf.staticMetadata.restore(backup)
  1094  			sf.pubKeyTable = oldPubKeyTable
  1095  		}
  1096  	}(sf.staticMetadata.backup())
  1097  	// Create a map of the used keys for faster lookups.
  1098  	usedMap := make(map[string]struct{})
  1099  	for _, key := range used {
  1100  		usedMap[key.String()] = struct{}{}
  1101  	}
  1102  	// Mark the entries in the table. If the entry exists 'Used' is true.
  1103  	// Otherwise it's 'false'.
  1104  	var unusedHosts uint
  1105  	for i, entry := range sf.pubKeyTable {
  1106  		_, used := usedMap[entry.PublicKey.String()]
  1107  		sf.pubKeyTable[i].Used = used
  1108  		if !used {
  1109  			unusedHosts++
  1110  		}
  1111  	}
  1112  	// Prune the pubKeyTable if necessary. If we have too many unused hosts we
  1113  	// want to remove them from the table but only if we have enough used hosts.
  1114  	// Otherwise we might be pruning hosts that could become used again since
  1115  	// the file might be in flux while it uploads or repairs
  1116  	tooManyUnusedHosts := unusedHosts > pubKeyTableUpperPruneThreshold
  1117  	enoughUsedHosts := len(usedMap) > sf.staticMetadata.staticErasureCode.NumPieces()
  1118  	if tooManyUnusedHosts && enoughUsedHosts {
  1119  		// If we prune the hosts, we apply the update right away and return an
  1120  		// empty set of updates. That's because pruning the hosts involves
  1121  		// updating the pieces on disk as well and the calling code might be
  1122  		// dependent on the pieces being up-to-date. Since we don't expect to
  1123  		// prune the host pubkey table frequently, this shouldn't impact
  1124  		// performance.
  1125  		pruneUpdates, err := sf.pruneHosts(pubKeyTableLowerPruneThreshold)
  1126  		if err != nil {
  1127  			return nil, errors.AddContext(err, "pruneHosts failed")
  1128  		}
  1129  		return []writeaheadlog.Update{}, sf.createAndApplyTransaction(pruneUpdates...)
  1130  	}
  1131  	// If we don't prune the hosts we explicitly save the header.
  1132  	headerUpdates, err := sf.saveHeaderUpdates()
  1133  	if err != nil {
  1134  		return nil, err
  1135  	}
  1136  	return headerUpdates, nil
  1137  }
  1138  
  1139  // defragChunk removes pieces which belong to bad hosts and if that wasn't
  1140  // enough to reduce the chunkSize below the maximum size, it will remove
  1141  // redundant pieces.
  1142  func (sf *SiaFile) defragChunk(chunk *chunk) {
  1143  	// Calculate how many pieces every pieceSet can contain.
  1144  	maxChunkSize := int64(sf.staticMetadata.StaticPagesPerChunk) * pageSize
  1145  	maxPieces := (maxChunkSize - marshaledChunkOverhead) / marshaledPieceSize
  1146  	maxPiecesPerSet := maxPieces / int64(len(chunk.Pieces))
  1147  
  1148  	// Filter out pieces with unused hosts since we don't have contracts with
  1149  	// those anymore.
  1150  	for i, pieceSet := range chunk.Pieces {
  1151  		var newPieceSet []piece
  1152  		for _, piece := range pieceSet {
  1153  			if int64(len(newPieceSet)) == maxPiecesPerSet {
  1154  				break
  1155  			}
  1156  			if sf.hostKey(piece.HostTableOffset).Used {
  1157  				newPieceSet = append(newPieceSet, piece)
  1158  			}
  1159  		}
  1160  		chunk.Pieces[i] = newPieceSet
  1161  	}
  1162  }
  1163  
  1164  // hostKey fetches a host's key from the map. It also checks an offset against
  1165  // the hostTable to make sure it's not out of bounds. If it is, build.Critical
  1166  // is called and to avoid a crash in production, dummy hosts are added.
  1167  func (sf *SiaFile) hostKey(offset uint32) HostPublicKey {
  1168  	// Add dummy hostkeys to the table in case of siafile corruption and mark
  1169  	// them as unused. The next time the table is pruned, the keys will be
  1170  	// removed which is fine. This doesn't fix heavy corruption and the file but
  1171  	// still be lost but it's better than crashing.
  1172  	if offset >= uint32(len(sf.pubKeyTable)) {
  1173  		// Causes tests to fail. The following for loop will try to fix the
  1174  		// corruption on release builds.
  1175  		build.Critical("piece.HostTableOffset", offset, " >= len(sf.pubKeyTable)", len(sf.pubKeyTable), sf.deleted)
  1176  		for offset >= uint32(len(sf.pubKeyTable)) {
  1177  			sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: false})
  1178  		}
  1179  	}
  1180  	return sf.pubKeyTable[offset]
  1181  }
  1182  
  1183  // pruneHosts prunes the unused hostkeys from the file, updates the
  1184  // HostTableOffset of the pieces and removes pieces which do no longer have a
  1185  // host. At most maxUnusedHosts will be kept.
  1186  func (sf *SiaFile) pruneHosts(maxUnusedHosts int) (_ []writeaheadlog.Update, err error) {
  1187  	var prunedTable []HostPublicKey
  1188  	// Backup the changed metadata before changing it. Revert the change on
  1189  	// error.
  1190  	oldPubKeyTable := append([]HostPublicKey{}, sf.pubKeyTable...)
  1191  	defer func(backup Metadata) {
  1192  		if err != nil {
  1193  			sf.staticMetadata.restore(backup)
  1194  			sf.pubKeyTable = oldPubKeyTable
  1195  		}
  1196  	}(sf.staticMetadata.backup())
  1197  
  1198  	// Shuffle the pubkeytable first. That way the unused hosts we keep are random..
  1199  	// We need to remember the original offsets for creating the offsetMap later.
  1200  	tableOffsets := make([]uint32, len(sf.pubKeyTable))
  1201  	for i := range tableOffsets {
  1202  		tableOffsets[i] = uint32(i)
  1203  	}
  1204  	fastrand.Shuffle(len(sf.pubKeyTable), func(i, j int) {
  1205  		sf.pubKeyTable[i], sf.pubKeyTable[j] = sf.pubKeyTable[j], sf.pubKeyTable[i]
  1206  		tableOffsets[i], tableOffsets[j] = tableOffsets[j], tableOffsets[i]
  1207  	})
  1208  
  1209  	// Create a map to track how the indices of the hostkeys changed when being
  1210  	// pruned. We add all used hosts and up to maxUnusedHosts unused ones.
  1211  	unusedAdded := 0
  1212  	offsetMap := make(map[uint32]uint32)
  1213  	for i := uint32(0); i < uint32(len(tableOffsets)); i++ {
  1214  		if sf.pubKeyTable[i].Used || unusedAdded < maxUnusedHosts {
  1215  			prunedTable = append(prunedTable, sf.pubKeyTable[i])
  1216  			offsetMap[tableOffsets[i]] = uint32(len(prunedTable) - 1)
  1217  			if !sf.pubKeyTable[i].Used {
  1218  				unusedAdded++
  1219  			}
  1220  		}
  1221  	}
  1222  	sf.pubKeyTable = prunedTable
  1223  	// Update the header first.
  1224  	headerUpdates, err := sf.saveHeaderUpdates()
  1225  	if err != nil {
  1226  		return nil, err
  1227  	}
  1228  	// With this map we loop over all the chunks and pieces and update the ones
  1229  	// who got a new offset and remove the ones that no longer have one.
  1230  	chunkUpdates, err := sf.iterateChunks(func(chunk *chunk) (bool, error) {
  1231  		for pieceIndex, pieceSet := range chunk.Pieces {
  1232  			var newPieceSet []piece
  1233  			for i, piece := range pieceSet {
  1234  				newOffset, exists := offsetMap[piece.HostTableOffset]
  1235  				if exists {
  1236  					pieceSet[i].HostTableOffset = newOffset
  1237  					newPieceSet = append(newPieceSet, pieceSet[i])
  1238  				}
  1239  			}
  1240  			chunk.Pieces[pieceIndex] = newPieceSet
  1241  		}
  1242  		return true, nil
  1243  	})
  1244  	if err != nil {
  1245  		return nil, err
  1246  	}
  1247  	return append(headerUpdates, chunkUpdates...), nil
  1248  }
  1249  
  1250  // GoodPieces loops over the pieces of a chunk and tracks the number of unique
  1251  // pieces that are good for upload, meaning the host is online, and the number
  1252  // of unique pieces that are good for renew, meaning the contract is set to
  1253  // renew.
  1254  func (sf *SiaFile) GoodPieces(chunkIndex int, offlineMap map[string]bool, goodForRenewMap map[string]bool) (uint64, uint64) {
  1255  	sf.mu.RLock()
  1256  	defer sf.mu.RUnlock()
  1257  	chunk, err := sf.chunk(chunkIndex)
  1258  	if err != nil {
  1259  		build.Critical("failed to retrieve chunk for goodPieces: ", err)
  1260  		return 0, 0
  1261  	}
  1262  	return sf.goodPieces(chunk, offlineMap, goodForRenewMap)
  1263  }
  1264  
  1265  // goodPieces loops over the pieces of a chunk and tracks the number of unique
  1266  // pieces that are good for upload, meaning the host is online, and the number
  1267  // of unique pieces that are good for renew, meaning the contract is set to
  1268  // renew.
  1269  func (sf *SiaFile) goodPieces(chunk chunk, offlineMap map[string]bool, goodForRenewMap map[string]bool) (uint64, uint64) {
  1270  	numPiecesGoodForRenew := uint64(0)
  1271  	numPiecesGoodForUpload := uint64(0)
  1272  
  1273  	for _, pieceSet := range chunk.Pieces {
  1274  		// Remember if we encountered a goodForRenew piece or a
  1275  		// !goodForRenew piece that was at least online.
  1276  		foundGoodForRenew := false
  1277  		foundOnline := false
  1278  		for _, piece := range pieceSet {
  1279  			offline, exists1 := offlineMap[sf.hostKey(piece.HostTableOffset).PublicKey.String()]
  1280  			goodForRenew, exists2 := goodForRenewMap[sf.hostKey(piece.HostTableOffset).PublicKey.String()]
  1281  			if exists1 != exists2 {
  1282  				build.Critical("contract can't be in one map but not in the other")
  1283  			}
  1284  			if !exists1 || offline {
  1285  				continue
  1286  			}
  1287  			// If we found a goodForRenew piece we can stop.
  1288  			if goodForRenew {
  1289  				foundGoodForRenew = true
  1290  				break
  1291  			}
  1292  			// Otherwise we continue since there might be other hosts with
  1293  			// the same piece that are goodForRenew. We still remember that
  1294  			// we found an online piece though.
  1295  			foundOnline = true
  1296  		}
  1297  		if foundGoodForRenew {
  1298  			numPiecesGoodForRenew++
  1299  			numPiecesGoodForUpload++
  1300  		} else if foundOnline {
  1301  			numPiecesGoodForUpload++
  1302  		}
  1303  	}
  1304  	return numPiecesGoodForRenew, numPiecesGoodForUpload
  1305  }
  1306  
  1307  // UploadProgressAndBytes is the exported wrapped for uploadProgressAndBytes.
  1308  func (sf *SiaFile) UploadProgressAndBytes() (float64, uint64, error) {
  1309  	sf.mu.Lock()
  1310  	defer sf.mu.Unlock()
  1311  	return sf.uploadProgressAndBytes()
  1312  }
  1313  
  1314  // Chunk returns the chunk of a SiaFile at a given index.
  1315  func (sf *SiaFile) Chunk(chunkIndex uint64) (chunk, error) {
  1316  	sf.mu.Lock()
  1317  	defer sf.mu.Unlock()
  1318  	return sf.chunk(int(chunkIndex))
  1319  }
  1320  
  1321  // Shrink shrinks the siafile to a certain number of chunks.
  1322  func (sf *SiaFile) Shrink(numChunks uint64) (err error) {
  1323  	sf.mu.Lock()
  1324  	defer sf.mu.Unlock()
  1325  
  1326  	// Sanity check.
  1327  	if numChunks >= uint64(sf.numChunks) {
  1328  		return errShrinkWithTooManyChunks
  1329  	}
  1330  
  1331  	// Restore metadata if necessary.
  1332  	defer func(backup Metadata) {
  1333  		if err != nil {
  1334  			sf.staticMetadata.restore(backup)
  1335  		}
  1336  	}(sf.staticMetadata.backup())
  1337  
  1338  	// Update the fileSize and number of chunks.
  1339  	sf.numChunks = int(numChunks)
  1340  	sf.staticMetadata.FileSize = int64(sf.staticChunkSize() * uint64(sf.numChunks))
  1341  	mdu, err := sf.saveMetadataUpdates()
  1342  	if err != nil {
  1343  		return err
  1344  	}
  1345  
  1346  	// Truncate the file.
  1347  	tu := writeaheadlog.TruncateUpdate(sf.siaFilePath, sf.chunkOffset(sf.numChunks))
  1348  
  1349  	// Apply the updates.
  1350  	return sf.createAndApplyTransaction(append(mdu, tu)...)
  1351  }
  1352  
  1353  // growNumChunks increases the number of chunks in the SiaFile to numChunks. If
  1354  // the file already contains >= numChunks chunks then GrowNumChunks is a no-op.
  1355  func (sf *SiaFile) growNumChunks(numChunks uint64) (updates []writeaheadlog.Update, err error) {
  1356  	if sf.deleted {
  1357  		return nil, errors.AddContext(ErrDeleted, "can't grow number of chunks of deleted file")
  1358  	}
  1359  	// Check if we need to grow the file.
  1360  	if uint64(sf.numChunks) >= numChunks {
  1361  		// Handle edge case where file has 1 chunk but has a size of 0. When we grow
  1362  		// such a file to 1 chunk we want to increment the size to >0.
  1363  		sf.staticMetadata.FileSize = int64(sf.staticChunkSize() * uint64(sf.numChunks))
  1364  		return nil, nil
  1365  	}
  1366  	// Backup the changed metadata before changing it. Revert the change on
  1367  	// error.
  1368  	oldNumChunks := sf.numChunks
  1369  	defer func(backup Metadata) {
  1370  		if err != nil {
  1371  			sf.staticMetadata.restore(backup)
  1372  			sf.numChunks = oldNumChunks
  1373  		}
  1374  	}(sf.staticMetadata.backup())
  1375  	// Update the chunks.
  1376  	newChunks := make([]chunk, 0, numChunks-uint64(sf.numChunks))
  1377  	for uint64(sf.numChunks) < numChunks {
  1378  		newChunk := chunk{
  1379  			Index:  int(sf.numChunks),
  1380  			Pieces: make([][]piece, sf.staticMetadata.staticErasureCode.NumPieces()),
  1381  		}
  1382  		sf.numChunks++
  1383  		newChunks = append(newChunks, newChunk)
  1384  	}
  1385  	// Update the fileSize.
  1386  	sf.staticMetadata.FileSize = int64(sf.staticChunkSize() * uint64(sf.numChunks))
  1387  	mdu, err := sf.saveMetadataUpdates()
  1388  	if err != nil {
  1389  		return nil, err
  1390  	}
  1391  	// Prepare chunk updates.
  1392  	for _, newChunk := range newChunks {
  1393  		updates = append(updates, sf.saveChunkUpdate(newChunk))
  1394  	}
  1395  	return append(updates, mdu...), nil
  1396  }
  1397  
  1398  // removeLastChunk removes the last chunk of the SiaFile and truncates the file
  1399  // accordingly. This method might change the metadata but doesn't persist the
  1400  // change itself. Handle this accordingly.
  1401  func (sf *SiaFile) removeLastChunk() (err error) {
  1402  	if sf.deleted {
  1403  		return errors.AddContext(ErrDeleted, "can't remove last chunk of deleted file")
  1404  	}
  1405  	// Backup the changed metadata before changing it. Revert the change on
  1406  	// error.
  1407  	defer func(backup Metadata) {
  1408  		if err != nil {
  1409  			sf.staticMetadata.restore(backup)
  1410  		}
  1411  	}(sf.staticMetadata.backup())
  1412  	// Remove a chunk. If the removed chunk was stuck, update the metadata.
  1413  	chunk, err := sf.chunk(sf.numChunks - 1)
  1414  	if err != nil {
  1415  		return err
  1416  	}
  1417  	if chunk.Stuck {
  1418  		sf.staticMetadata.NumStuckChunks--
  1419  	}
  1420  	// Truncate the file on disk.
  1421  	fi, err := os.Stat(sf.siaFilePath)
  1422  	if err != nil {
  1423  		return err
  1424  	}
  1425  	update := writeaheadlog.TruncateUpdate(sf.siaFilePath, fi.Size()-int64(sf.staticMetadata.StaticPagesPerChunk)*pageSize)
  1426  	return sf.createAndApplyTransaction(update)
  1427  }
  1428  
  1429  // SetFinished sets the file's Finished field in the metadata
  1430  func (sf *SiaFile) SetFinished(health float64) (err error) {
  1431  	sf.mu.Lock()
  1432  	defer sf.mu.Unlock()
  1433  	// Backup metadata before doing any kind of persistence.
  1434  	defer func(backup Metadata) {
  1435  		if err != nil {
  1436  			sf.staticMetadata.restore(backup)
  1437  		}
  1438  	}(sf.staticMetadata.backup())
  1439  
  1440  	// Update the metadata
  1441  	sf.setFinished(health)
  1442  
  1443  	// Save the metadata updates
  1444  	updates, err := sf.saveMetadataUpdates()
  1445  	if err != nil {
  1446  		return err
  1447  	}
  1448  	return sf.createAndApplyTransaction(updates...)
  1449  }
  1450  
  1451  // MarkAsLazyUpload marks a siafile as being lazily uploaded.
  1452  func (sf *SiaFile) MarkAsLazyUpload() (err error) {
  1453  	sf.mu.Lock()
  1454  	defer sf.mu.Unlock()
  1455  	// Backup metadata before doing any kind of persistence.
  1456  	defer func(backup Metadata) {
  1457  		if err != nil {
  1458  			sf.staticMetadata.restore(backup)
  1459  		}
  1460  	}(sf.staticMetadata.backup())
  1461  
  1462  	// Update the metadata
  1463  	sf.staticMetadata.LazyUpload = true
  1464  
  1465  	// Save the metadata updates
  1466  	updates, err := sf.saveMetadataUpdates()
  1467  	if err != nil {
  1468  		return err
  1469  	}
  1470  	return sf.createAndApplyTransaction(updates...)
  1471  }
  1472  
  1473  // setFinished sets the file's Finished field in the metadata
  1474  func (sf *SiaFile) setFinished(health float64) {
  1475  	// Once a file is finished if cannot be unfinished.
  1476  	if sf.staticMetadata.Finished {
  1477  		return
  1478  	}
  1479  	// A file is finished if the health is <= 1 or there is a localPath. A
  1480  	// file is finished if there is a localPath because a file can be
  1481  	// repaired from the local file even if it loses 100% of its health.
  1482  	// Additionally, a siafile with a local file is immediately accessible
  1483  	// because we serve downloads from disk in the case that there is a
  1484  	// local file present.
  1485  	sf.staticMetadata.Finished = health <= 1 || sf.onDisk()
  1486  }
  1487  
  1488  // setStuck sets the Stuck field of the chunk at the given index
  1489  func (sf *SiaFile) setStuck(index uint64, stuck bool) (err error) {
  1490  	// If the file has been deleted we can't mark a chunk as stuck.
  1491  	if sf.deleted {
  1492  		return errors.AddContext(ErrDeleted, "can't call SetStuck on deleted file")
  1493  	}
  1494  	// A file can only be marked as stuck if the file has previously finished
  1495  	if !sf.finished() && stuck {
  1496  		err = errors.AddContext(ErrUnfinished, "cannot set an unfinished file as stuck")
  1497  		build.Critical(err)
  1498  		return err
  1499  	}
  1500  
  1501  	//  Get chunk.
  1502  	chunk, err := sf.chunk(int(index))
  1503  	if err != nil {
  1504  		return err
  1505  	}
  1506  	// Check for change
  1507  	if stuck == chunk.Stuck {
  1508  		return nil
  1509  	}
  1510  	// Backup the changed metadata before changing it. Revert the change on
  1511  	// error.
  1512  	defer func(backup Metadata) {
  1513  		if err != nil {
  1514  			sf.staticMetadata.restore(backup)
  1515  		}
  1516  	}(sf.staticMetadata.backup())
  1517  	// Update chunk and NumStuckChunks in siafile metadata
  1518  	chunk.Stuck = stuck
  1519  	if stuck {
  1520  		sf.staticMetadata.NumStuckChunks++
  1521  	} else {
  1522  		sf.staticMetadata.NumStuckChunks--
  1523  	}
  1524  	// Update chunk and metadata on disk
  1525  	updates, err := sf.saveMetadataUpdates()
  1526  	if err != nil {
  1527  		return err
  1528  	}
  1529  	update := sf.saveChunkUpdate(chunk)
  1530  	updates = append(updates, update)
  1531  	return sf.createAndApplyTransaction(updates...)
  1532  }
  1533  
  1534  // uploadProgressAndBytes updates the CachedUploadProgress and
  1535  // CachedUploadedBytes fields to indicate what percentage of the file has been
  1536  // uploaded based on the unique pieces that have been uploaded and also how many
  1537  // bytes have been uploaded of that file in total. Note that a file may be
  1538  // Available long before UploadProgress reaches 100%.
  1539  func (sf *SiaFile) uploadProgressAndBytes() (float64, uint64, error) {
  1540  	_, uploaded, err := sf.uploadedBytes()
  1541  	if err != nil {
  1542  		return 0, 0, err
  1543  	}
  1544  	if sf.staticMetadata.FileSize == 0 {
  1545  		// Update cache.
  1546  		sf.staticMetadata.CachedUploadProgress = 100
  1547  		return 100, uploaded, nil
  1548  	}
  1549  	desired := uint64(sf.numChunks) * modules.SectorSize * uint64(sf.staticMetadata.staticErasureCode.NumPieces())
  1550  	// Update cache.
  1551  	sf.staticMetadata.CachedUploadProgress = math.Min(100*(float64(uploaded)/float64(desired)), 100)
  1552  	return sf.staticMetadata.CachedUploadProgress, uploaded, nil
  1553  }
  1554  
  1555  // uploadedBytes indicates how many bytes of the file have been uploaded via
  1556  // current file contracts in total as well as unique uploaded bytes. Note that
  1557  // this includes padding and redundancy, so uploadedBytes can return a value
  1558  // much larger than the file's original filesize.
  1559  func (sf *SiaFile) uploadedBytes() (uint64, uint64, error) {
  1560  	var total, unique uint64
  1561  	err := sf.iterateChunksReadonly(func(chunk chunk) error {
  1562  		for _, pieceSet := range chunk.Pieces {
  1563  			// Move onto the next pieceSet if nothing has been uploaded yet.
  1564  			if len(pieceSet) == 0 {
  1565  				continue
  1566  			}
  1567  			// Note: we need to multiply by SectorSize here instead of
  1568  			// f.pieceSize because the actual bytes uploaded include overhead
  1569  			// from TwoFish encryption
  1570  			//
  1571  			// Sum the total bytes uploaded
  1572  			total += uint64(len(pieceSet)) * modules.SectorSize
  1573  			// Sum the unique bytes uploaded
  1574  			unique += modules.SectorSize
  1575  		}
  1576  		return nil
  1577  	})
  1578  	if err != nil {
  1579  		return 0, 0, errors.AddContext(err, "failed to compute uploaded bytes")
  1580  	}
  1581  	// Update cache.
  1582  	sf.staticMetadata.CachedUploadedBytes = total
  1583  	return total, unique, nil
  1584  }