gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/renter/siafile/siafile.go (about)

     1  package siafile
     2  
     3  import (
     4  	"bytes"
     5  	"fmt"
     6  	"io"
     7  	"math"
     8  	"os"
     9  	"sync"
    10  	"time"
    11  
    12  	"gitlab.com/NebulousLabs/errors"
    13  	"gitlab.com/SiaPrime/writeaheadlog"
    14  
    15  	"gitlab.com/SiaPrime/SiaPrime/build"
    16  	"gitlab.com/SiaPrime/SiaPrime/crypto"
    17  	"gitlab.com/SiaPrime/SiaPrime/encoding"
    18  	"gitlab.com/SiaPrime/SiaPrime/modules"
    19  	"gitlab.com/SiaPrime/SiaPrime/types"
    20  )
    21  
    22  var (
    23  	// ErrPathOverload is an error when a file already exists at that location
    24  	ErrPathOverload = errors.New("a file already exists at that location")
    25  	// ErrUnknownPath is an error when a file cannot be found with the given path
    26  	ErrUnknownPath = errors.New("no file known with that path")
    27  	// ErrUnknownThread is an error when a SiaFile is trying to be closed by a
    28  	// thread that is not in the threadMap
    29  	ErrUnknownThread = errors.New("thread should not be calling Close(), does not have control of the siafile")
    30  )
    31  
    32  type (
    33  	// SiaFile is the disk format for files uploaded to the Sia network.  It
    34  	// contains all the necessary information to recover a file from its hosts and
    35  	// allows for easy constant-time updates of the file without having to read or
    36  	// write the whole file.
    37  	SiaFile struct {
    38  		// staticMetadata is the mostly static staticMetadata of a SiaFile. The reserved
    39  		// size of the staticMetadata on disk should always be a multiple of 4kib.
    40  		// The staticMetadata is also the only part of the file that is JSON encoded
    41  		// and can therefore be easily extended.
    42  		staticMetadata Metadata
    43  
    44  		// pubKeyTable stores the public keys of the hosts this file's pieces are uploaded to.
    45  		// Since multiple pieces from different chunks might be uploaded to the same host, this
    46  		// allows us to deduplicate the rather large public keys.
    47  		pubKeyTable []HostPublicKey
    48  
    49  		// numChunks is the number of chunks the file was split into including a
    50  		// potential partial chunk at the end.
    51  		numChunks int
    52  
    53  		// utility fields. These are not persisted.
    54  		deleted bool
    55  		deps    modules.Dependencies
    56  		mu      sync.RWMutex
    57  		wal     *writeaheadlog.WAL // the wal that is used for SiaFiles
    58  
    59  		// siaFilePath is the path to the .sia file on disk.
    60  		siaFilePath string
    61  
    62  		// partialSiaFile is the SiaFile that holds or could hold the partial chunk of
    63  		// this siafile. Since we don't know if a file is going to have a partial
    64  		// chunk we simply keep the megafiles always open and assign them to SiaFiles
    65  		// with matching redundancy.
    66  		partialsSiaFile *SiaFileSetEntry
    67  	}
    68  
    69  	// chunk represents a single chunk of a file on disk
    70  	chunk struct {
    71  		// ExtensionInfo is some reserved space for each chunk that allows us
    72  		// to indicate if a chunk is special.
    73  		ExtensionInfo [16]byte
    74  
    75  		// Index is the index of the chunk.
    76  		Index int
    77  
    78  		// Pieces are the Pieces of the file the chunk consists of.
    79  		Pieces [][]piece
    80  
    81  		// Stuck indicates if the chunk was not repaired as expected by the
    82  		// repair loop
    83  		Stuck bool
    84  	}
    85  
    86  	// Chunk is an exported chunk. It contains exported pieces.
    87  	Chunk struct {
    88  		Pieces [][]Piece
    89  	}
    90  
    91  	// piece represents a single piece of a chunk on disk
    92  	piece struct {
    93  		offset          uint32      // offset of the piece within the sector
    94  		length          uint32      // length of the piece within the sector
    95  		HostTableOffset uint32      // offset of the host's key within the pubKeyTable
    96  		MerkleRoot      crypto.Hash // merkle root of the piece
    97  	}
    98  
    99  	// Piece is an exported piece. It contains a resolved public key instead of
   100  	// the table offset.
   101  	Piece struct {
   102  		HostPubKey types.SiaPublicKey // public key of the host
   103  		MerkleRoot crypto.Hash        // merkle root of the piece
   104  	}
   105  
   106  	// HostPublicKey is an entry in the HostPubKey table.
   107  	HostPublicKey struct {
   108  		PublicKey types.SiaPublicKey // public key of host
   109  		Used      bool               // indicates if we currently use this host
   110  	}
   111  )
   112  
   113  // MarshalSia implements the encoding.SiaMarshaler interface.
   114  func (hpk HostPublicKey) MarshalSia(w io.Writer) error {
   115  	e := encoding.NewEncoder(w)
   116  	e.Encode(hpk.PublicKey)
   117  	e.WriteBool(hpk.Used)
   118  	return e.Err()
   119  }
   120  
   121  // SiaFilePath returns the siaFilePath field of the SiaFile.
   122  func (sf *SiaFile) SiaFilePath() string {
   123  	sf.mu.RLock()
   124  	defer sf.mu.RUnlock()
   125  	return sf.siaFilePath
   126  }
   127  
   128  // UnmarshalSia implements the encoding.SiaUnmarshaler interface.
   129  func (hpk *HostPublicKey) UnmarshalSia(r io.Reader) error {
   130  	d := encoding.NewDecoder(r, encoding.DefaultAllocLimit)
   131  	d.Decode(&hpk.PublicKey)
   132  	hpk.Used = d.NextBool()
   133  	return d.Err()
   134  }
   135  
   136  // numPieces returns the total number of pieces uploaded for a chunk. This
   137  // means that numPieces can be greater than the number of pieces created by the
   138  // erasure coder.
   139  func (c *chunk) numPieces() (numPieces int) {
   140  	for _, c := range c.Pieces {
   141  		numPieces += len(c)
   142  	}
   143  	return
   144  }
   145  
   146  // New create a new SiaFile.
   147  func New(siaFilePath, source string, wal *writeaheadlog.WAL, erasureCode modules.ErasureCoder, masterKey crypto.CipherKey, fileSize uint64, fileMode os.FileMode, partialsSiaFile *SiaFileSetEntry, disablePartialUpload bool) (*SiaFile, error) {
   148  	currentTime := time.Now()
   149  	ecType, ecParams := marshalErasureCoder(erasureCode)
   150  	zeroHealth := float64(1 + erasureCode.MinPieces()/(erasureCode.NumPieces()-erasureCode.MinPieces()))
   151  	file := &SiaFile{
   152  		staticMetadata: Metadata{
   153  			AccessTime:              currentTime,
   154  			ChunkOffset:             defaultReservedMDPages * pageSize,
   155  			ChangeTime:              currentTime,
   156  			CreateTime:              currentTime,
   157  			CachedHealth:            zeroHealth,
   158  			CachedStuckHealth:       0,
   159  			CachedRedundancy:        0,
   160  			CachedUserRedundancy:    0,
   161  			CachedUploadProgress:    0,
   162  			DisablePartialChunk:     disablePartialUpload,
   163  			FileSize:                int64(fileSize),
   164  			LocalPath:               source,
   165  			StaticMasterKey:         masterKey.Key(),
   166  			StaticMasterKeyType:     masterKey.Type(),
   167  			Mode:                    fileMode,
   168  			ModTime:                 currentTime,
   169  			staticErasureCode:       erasureCode,
   170  			StaticErasureCodeType:   ecType,
   171  			StaticErasureCodeParams: ecParams,
   172  			StaticPagesPerChunk:     numChunkPagesRequired(erasureCode.NumPieces()),
   173  			StaticPieceSize:         modules.SectorSize - masterKey.Type().Overhead(),
   174  			UniqueID:                uniqueID(),
   175  		},
   176  		deps:            modules.ProdDependencies,
   177  		partialsSiaFile: partialsSiaFile,
   178  		siaFilePath:     siaFilePath,
   179  		wal:             wal,
   180  	}
   181  	// Init chunks.
   182  	numChunks := fileSize / file.staticChunkSize()
   183  	if fileSize%file.staticChunkSize() != 0 && partialsSiaFile != nil && !disablePartialUpload {
   184  		// This file has a partial chunk
   185  		file.staticMetadata.HasPartialChunk = true
   186  		numChunks++
   187  	} else if fileSize%file.staticChunkSize() != 0 && disablePartialUpload {
   188  		// This file does have a partial chunk but we treat it as a full chunk.
   189  		numChunks++
   190  	} else if fileSize%file.staticChunkSize() != 0 && partialsSiaFile == nil {
   191  		return nil, errors.New("can't create a file with a partial chunk without assigning a partialsSiaFile")
   192  	}
   193  	file.numChunks = int(numChunks)
   194  	// Update cached fields for 0-Byte files.
   195  	if file.staticMetadata.FileSize == 0 {
   196  		file.staticMetadata.CachedHealth = 0
   197  		file.staticMetadata.CachedStuckHealth = 0
   198  		file.staticMetadata.CachedRedundancy = float64(erasureCode.NumPieces()) / float64(erasureCode.MinPieces())
   199  		file.staticMetadata.CachedUserRedundancy = file.staticMetadata.CachedRedundancy
   200  		file.staticMetadata.CachedUploadProgress = 100
   201  	}
   202  	// Save file.
   203  	initialChunks := make([]chunk, file.numChunks)
   204  	for chunkIndex := range initialChunks {
   205  		initialChunks[chunkIndex].Index = chunkIndex
   206  		initialChunks[chunkIndex].Pieces = make([][]piece, erasureCode.NumPieces())
   207  	}
   208  	return file, file.saveFile(initialChunks)
   209  }
   210  
   211  // GrowNumChunks increases the number of chunks in the SiaFile to numChunks. If
   212  // the file already contains >= numChunks chunks then GrowNumChunks is a no-op.
   213  func (sf *SiaFile) GrowNumChunks(numChunks uint64) (err error) {
   214  	sf.mu.Lock()
   215  	defer sf.mu.Unlock()
   216  	updates, err := sf.growNumChunks(numChunks)
   217  
   218  	if err != nil {
   219  		return err
   220  	}
   221  	return sf.createAndApplyTransaction(updates...)
   222  }
   223  
   224  // RemoveLastChunk removes the last chunk of the SiaFile and truncates the file
   225  // accordingly.
   226  func (sf *SiaFile) RemoveLastChunk() error {
   227  	sf.mu.Lock()
   228  	defer sf.mu.Unlock()
   229  	return sf.removeLastChunk()
   230  }
   231  
   232  // SetFileSize changes the fileSize of the SiaFile.
   233  func (sf *SiaFile) SetFileSize(fileSize uint64) error {
   234  	sf.mu.Lock()
   235  	defer sf.mu.Unlock()
   236  	if sf.deleted {
   237  		return errors.New("can't set filesize of deleted file")
   238  	}
   239  	if sf.staticMetadata.HasPartialChunk {
   240  		return errors.New("can't call SetFileSize on file with partial chunk")
   241  	}
   242  	// Make sure that SetFileSize doesn't affect the number of total chunks within
   243  	// the file.
   244  	newNumChunks := fileSize / sf.staticChunkSize()
   245  	if fileSize%sf.staticChunkSize() != 0 {
   246  		newNumChunks++
   247  	}
   248  	if uint64(sf.numChunks) != newNumChunks {
   249  		return fmt.Errorf("can't change fileSize since it would change the number of chunks from %v to %v",
   250  			sf.numChunks, newNumChunks)
   251  	}
   252  	// Update filesize.
   253  	sf.staticMetadata.FileSize = int64(fileSize)
   254  	// Check if the file changed from not having a partial chunk to having one.
   255  	if !sf.staticMetadata.DisablePartialChunk && uint64(sf.staticMetadata.FileSize)%sf.staticChunkSize() != 0 {
   256  		if sf.numChunks > 0 {
   257  			// Last fullChunk is replaced by a partial chunk so we remove it.
   258  			if err := sf.removeLastChunk(); err != nil {
   259  				return (err)
   260  			}
   261  		}
   262  		sf.staticMetadata.HasPartialChunk = true
   263  		if sf.partialsSiaFile == nil {
   264  			return errors.New("can't turn file without partial chunk into a file with one if partialsSiaFile == nil")
   265  		}
   266  	}
   267  	updates, err := sf.saveMetadataUpdates()
   268  	if err != nil {
   269  		return err
   270  	}
   271  	return sf.createAndApplyTransaction(updates...)
   272  }
   273  
   274  // AddPiece adds an uploaded piece to the file. It also updates the host table
   275  // if the public key of the host is not already known.
   276  func (sf *SiaFile) AddPiece(pk types.SiaPublicKey, chunkIndex, pieceIndex uint64, merkleRoot crypto.Hash) error {
   277  	sf.mu.Lock()
   278  	defer sf.mu.Unlock()
   279  	// If the file was deleted we can't add a new piece since it would write
   280  	// the file to disk again.
   281  	if sf.deleted {
   282  		return errors.New("can't add piece to deleted file")
   283  	}
   284  	// Don't allow adding pieces to incomplete chunk which is not yet part of a
   285  	// combined chunk.
   286  	if sf.isIncompletePartialChunk(chunkIndex) {
   287  		return errors.New("can't add piece to incomplete partial chunk")
   288  	}
   289  
   290  	// Update cache.
   291  	defer sf.uploadProgressAndBytes()
   292  
   293  	// Handle piece being added to the partial chunk.
   294  	if cci, ok := sf.isIncludedPartialChunk(chunkIndex); ok {
   295  		return sf.partialsSiaFile.AddPiece(pk, cci.Index, pieceIndex, merkleRoot)
   296  	}
   297  
   298  	// Get the index of the host in the public key table.
   299  	tableIndex := -1
   300  	for i, hpk := range sf.pubKeyTable {
   301  		if hpk.PublicKey.Algorithm == pk.Algorithm && bytes.Equal(hpk.PublicKey.Key, pk.Key) {
   302  			tableIndex = i
   303  			break
   304  		}
   305  	}
   306  	// If we don't know the host yet, we add it to the table.
   307  	tableChanged := false
   308  	if tableIndex == -1 {
   309  		sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{
   310  			PublicKey: pk,
   311  			Used:      true,
   312  		})
   313  		tableIndex = len(sf.pubKeyTable) - 1
   314  		tableChanged = true
   315  	}
   316  	// Check if the chunkIndex is valid.
   317  	if chunkIndex >= uint64(sf.numChunks) {
   318  		return fmt.Errorf("chunkIndex %v out of bounds (%v)", chunkIndex, sf.numChunks)
   319  	}
   320  	// Get the chunk from disk.
   321  	chunk, err := sf.chunk(int(chunkIndex))
   322  	if err != nil {
   323  		return errors.AddContext(err, "failed to get chunk")
   324  	}
   325  	// Check if the pieceIndex is valid.
   326  	if pieceIndex >= uint64(len(chunk.Pieces)) {
   327  		return fmt.Errorf("pieceIndex %v out of bounds (%v)", pieceIndex, len(chunk.Pieces))
   328  	}
   329  	// Add the piece to the chunk.
   330  	chunk.Pieces[pieceIndex] = append(chunk.Pieces[pieceIndex], piece{
   331  		HostTableOffset: uint32(tableIndex),
   332  		MerkleRoot:      merkleRoot,
   333  	})
   334  
   335  	// Update the AccessTime, ChangeTime and ModTime.
   336  	sf.staticMetadata.AccessTime = time.Now()
   337  	sf.staticMetadata.ChangeTime = sf.staticMetadata.AccessTime
   338  	sf.staticMetadata.ModTime = sf.staticMetadata.AccessTime
   339  
   340  	// Defrag the chunk if necessary.
   341  	chunkSize := marshaledChunkSize(chunk.numPieces())
   342  	maxChunkSize := int64(sf.staticMetadata.StaticPagesPerChunk) * pageSize
   343  	if chunkSize > maxChunkSize {
   344  		sf.defragChunk(&chunk)
   345  	}
   346  
   347  	// If the chunk is still too large after the defrag, we abort.
   348  	chunkSize = marshaledChunkSize(chunk.numPieces())
   349  	if chunkSize > maxChunkSize {
   350  		return fmt.Errorf("chunk doesn't fit into allocated space %v > %v", chunkSize, maxChunkSize)
   351  	}
   352  	// Update the file atomically.
   353  	var updates []writeaheadlog.Update
   354  	// Get the updates for the header.
   355  	if tableChanged {
   356  		// If the table changed we update the whole header.
   357  		updates, err = sf.saveHeaderUpdates()
   358  	} else {
   359  		// Otherwise just the metadata.
   360  		updates, err = sf.saveMetadataUpdates()
   361  	}
   362  	if err != nil {
   363  		return err
   364  	}
   365  	// Save the changed chunk to disk.
   366  	chunkUpdate := sf.saveChunkUpdate(chunk)
   367  	return sf.createAndApplyTransaction(append(updates, chunkUpdate)...)
   368  }
   369  
   370  // chunkHealth returns the health and user health of the chunk which is defined
   371  // as the percent of parity pieces remaining. When calculating the user health
   372  // we assume that an incomplete partial chunk has full health. For the regular
   373  // health we don't assume that.
   374  //
   375  // health = 0 is full redundancy, health <= 1 is recoverable, health > 1 needs
   376  // to be repaired from disk or repair by upload streaming
   377  func (sf *SiaFile) chunkHealth(chunk chunk, offlineMap map[string]bool, goodForRenewMap map[string]bool) (h float64, uh float64, err error) {
   378  	// Handle returning health of complete partial chunk.
   379  	incomplete := sf.isIncompletePartialChunk(uint64(chunk.Index))
   380  	if cci, ok := sf.isIncludedPartialChunk(uint64(chunk.Index)); ok && !incomplete {
   381  		return sf.partialsSiaFile.ChunkHealth(int(cci.Index), offlineMap, goodForRenewMap)
   382  	}
   383  	// The max number of good pieces that a chunk can have is NumPieces()
   384  	numPieces := sf.staticMetadata.staticErasureCode.NumPieces()
   385  	minPieces := sf.staticMetadata.staticErasureCode.MinPieces()
   386  	targetPieces := float64(numPieces - minPieces)
   387  	// Find the good pieces that are good for renew
   388  	goodPieces, _ := sf.goodPieces(chunk, offlineMap, goodForRenewMap)
   389  	chunkHealth := 1 - (float64(int(goodPieces)-minPieces) / targetPieces)
   390  	// Handle health of incomplete partial chunk.
   391  	if sf.isIncompletePartialChunk(uint64(chunk.Index)) {
   392  		return chunkHealth, 0, nil // Partial chunk has full health if not yet included in combined chunk
   393  	}
   394  	// Sanity Check, if something went wrong, default to minimum health
   395  	if int(goodPieces) > numPieces || goodPieces < 0 {
   396  		build.Critical("unexpected number of goodPieces for chunkHealth")
   397  		goodPieces = 0
   398  	}
   399  	return chunkHealth, chunkHealth, nil
   400  }
   401  
   402  // ChunkHealth returns the health of the chunk which is defined as the percent
   403  // of parity pieces remaining.
   404  func (sf *SiaFile) ChunkHealth(index int, offlineMap map[string]bool, goodForRenewMap map[string]bool) (float64, float64, error) {
   405  	sf.mu.Lock()
   406  	defer sf.mu.Unlock()
   407  	chunk, err := sf.chunk(index)
   408  	if err != nil {
   409  		return 0, 0, errors.AddContext(err, "failed to read chunk")
   410  	}
   411  	return sf.chunkHealth(chunk, offlineMap, goodForRenewMap)
   412  }
   413  
   414  // Delete removes the file from disk and marks it as deleted. Once the file is
   415  // deleted, certain methods should return an error.
   416  func (sf *SiaFile) Delete() error {
   417  	sf.mu.Lock()
   418  	defer sf.mu.Unlock()
   419  	// We can't delete a file multiple times.
   420  	if sf.deleted {
   421  		return errors.New("requested file has already been deleted")
   422  	}
   423  	update := sf.createDeleteUpdate()
   424  	err := sf.createAndApplyTransaction(update)
   425  	sf.deleted = true
   426  	return err
   427  }
   428  
   429  // Deleted indicates if this file has been deleted by the user.
   430  func (sf *SiaFile) Deleted() bool {
   431  	sf.mu.RLock()
   432  	defer sf.mu.RUnlock()
   433  	return sf.deleted
   434  }
   435  
   436  // ErasureCode returns the erasure coder used by the file.
   437  func (sf *SiaFile) ErasureCode() modules.ErasureCoder {
   438  	return sf.staticMetadata.staticErasureCode
   439  }
   440  
   441  // SaveWithChunks saves the file's header to disk and appends the raw chunks provided at
   442  // the end of the file.
   443  func (sf *SiaFile) SaveWithChunks(chunks []chunk) error {
   444  	sf.mu.Lock()
   445  	defer sf.mu.Unlock()
   446  	updates, err := sf.saveHeaderUpdates()
   447  	if err != nil {
   448  		return errors.AddContext(err, "failed to create header updates")
   449  	}
   450  	for _, chunk := range chunks {
   451  		updates = append(updates, sf.saveChunkUpdate(chunk))
   452  	}
   453  	return sf.createAndApplyTransaction(updates...)
   454  }
   455  
   456  // SaveHeader saves the file's header to disk.
   457  func (sf *SiaFile) SaveHeader() error {
   458  	sf.mu.Lock()
   459  	defer sf.mu.Unlock()
   460  	updates, err := sf.saveHeaderUpdates()
   461  	if err != nil {
   462  		return err
   463  	}
   464  	return sf.createAndApplyTransaction(updates...)
   465  }
   466  
   467  // SaveMetadata saves the file's metadata to disk.
   468  func (sf *SiaFile) SaveMetadata() error {
   469  	sf.mu.Lock()
   470  	defer sf.mu.Unlock()
   471  	if sf.deleted {
   472  		return errors.New("can't SaveMetadata of deleted file")
   473  	}
   474  	updates, err := sf.saveMetadataUpdates()
   475  	if err != nil {
   476  		return err
   477  	}
   478  	return sf.createAndApplyTransaction(updates...)
   479  }
   480  
   481  // Expiration updates CachedExpiration with the lowest height at which any of
   482  // the file's contracts will expire and returns the new value.
   483  func (sf *SiaFile) Expiration(contracts map[string]modules.RenterContract) types.BlockHeight {
   484  	sf.mu.Lock()
   485  	defer sf.mu.Unlock()
   486  	if len(sf.pubKeyTable) == 0 {
   487  		sf.staticMetadata.CachedExpiration = 0
   488  		return 0
   489  	}
   490  
   491  	// If the file has a combined chunk, also take the pubkeys from that chunk into
   492  	// account.
   493  	lowest := ^types.BlockHeight(0)
   494  	var pieceSets [][]Piece
   495  	for _, pc := range sf.staticMetadata.PartialChunks {
   496  		if pc.Status != CombinedChunkStatusCompleted {
   497  			continue
   498  		}
   499  		ps, err := sf.partialsSiaFile.Pieces(pc.Index)
   500  		if err == nil {
   501  			pieceSets = append(pieceSets, ps...)
   502  		}
   503  	}
   504  	for _, pieceSet := range pieceSets {
   505  		for _, piece := range pieceSet {
   506  			contract, exists := contracts[piece.HostPubKey.String()]
   507  			if !exists {
   508  				continue
   509  			}
   510  			if contract.EndHeight < lowest {
   511  				lowest = contract.EndHeight
   512  			}
   513  		}
   514  	}
   515  
   516  	for _, pk := range sf.pubKeyTable {
   517  		contract, exists := contracts[pk.PublicKey.String()]
   518  		if !exists {
   519  			continue
   520  		}
   521  		if contract.EndHeight < lowest {
   522  			lowest = contract.EndHeight
   523  		}
   524  	}
   525  	sf.staticMetadata.CachedExpiration = lowest
   526  	return lowest
   527  }
   528  
   529  // Health calculates the health of the file to be used in determining repair
   530  // priority. Health of the file is the lowest health of any of the chunks and is
   531  // defined as the percent of parity pieces remaining. The NumStuckChunks will be
   532  // calculated for the SiaFile and returned.
   533  //
   534  // NOTE: The cached values of the health and stuck health will be set but not
   535  // saved to disk as Health() does not write to disk. If the cached values need
   536  // to be updated on disk then a metadata save method should be called in
   537  // conjunction with Health()
   538  //
   539  // health = 0 is full redundancy, health <= 1 is recoverable, health > 1 needs
   540  // to be repaired from disk
   541  func (sf *SiaFile) Health(offline map[string]bool, goodForRenew map[string]bool) (h float64, sh float64, uh float64, ush float64, nsc uint64) {
   542  	numPieces := float64(sf.staticMetadata.staticErasureCode.NumPieces())
   543  	minPieces := float64(sf.staticMetadata.staticErasureCode.MinPieces())
   544  	worstHealth := 1 - ((0 - minPieces) / (numPieces - minPieces))
   545  
   546  	sf.mu.Lock()
   547  	defer sf.mu.Unlock()
   548  	// Update the cache.
   549  	defer func() {
   550  		sf.staticMetadata.CachedHealth = h
   551  		sf.staticMetadata.CachedStuckHealth = sh
   552  	}()
   553  
   554  	// Check if siafile is deleted
   555  	if sf.deleted {
   556  		// Don't return health information of a deleted file to prevent
   557  		// misrepresenting the health information of a directory
   558  		return 0, 0, 0, 0, 0
   559  	}
   560  	// Check for Zero byte files
   561  	if sf.staticMetadata.FileSize == 0 {
   562  		// Return default health information for zero byte files to prevent
   563  		// misrepresenting the health information of a directory
   564  		return 0, 0, 0, 0, 0
   565  	}
   566  	var health, stuckHealth, userHealth, userStuckHealth float64
   567  	var numStuckChunks uint64
   568  	err := sf.iterateChunksReadonly(func(c chunk) error {
   569  		chunkHealth, userChunkHealth, err := sf.chunkHealth(c, offline, goodForRenew)
   570  		if err != nil {
   571  			return err
   572  		}
   573  
   574  		// Update the health or stuckHealth of the file according to the health
   575  		// of the chunk. The health of the file is the worst health (highest
   576  		// number) of all the chunks in the file.
   577  		if c.Stuck {
   578  			numStuckChunks++
   579  			if chunkHealth > stuckHealth {
   580  				stuckHealth = chunkHealth
   581  			}
   582  			if userChunkHealth > userStuckHealth {
   583  				userStuckHealth = userChunkHealth
   584  			}
   585  		} else {
   586  			if chunkHealth > health {
   587  				health = chunkHealth
   588  			}
   589  			if userChunkHealth > userHealth {
   590  				userHealth = userChunkHealth
   591  			}
   592  		}
   593  		return nil
   594  	})
   595  	if err != nil {
   596  		build.Critical("failed to iterate over chunks: ", err)
   597  		return 0, 0, 0, 0, 0
   598  	}
   599  
   600  	// Check if all chunks are stuck, if so then set health to max health to
   601  	// avoid file being targetted for repair
   602  	if int(numStuckChunks) == sf.numChunks {
   603  		health = float64(0)
   604  	}
   605  	// Sanity check, verify that the calculated health is not worse (greater)
   606  	// than the worst health.
   607  	if userHealth > worstHealth || health > worstHealth {
   608  		build.Critical("WARN: health out of bounds. Max value, Min value, health found", worstHealth, 0, health, userHealth)
   609  		health = worstHealth
   610  	}
   611  	// Sanity check, verify that the calculated stuck health is not worse
   612  	// (greater) than the worst health.
   613  	if userStuckHealth > worstHealth || stuckHealth > worstHealth {
   614  		build.Critical("WARN: stuckHealth out of bounds. Max value, Min value, stuckHealth found", worstHealth, 0, stuckHealth, userStuckHealth)
   615  		stuckHealth = worstHealth
   616  	}
   617  	// Sanity Check that the number of stuck chunks makes sense
   618  	expectedStuckChunks := sf.numStuckChunks()
   619  	if numStuckChunks != expectedStuckChunks {
   620  		build.Critical("WARN: the number of stuck chunks found does not match metadata", numStuckChunks, expectedStuckChunks)
   621  	}
   622  	return health, stuckHealth, userHealth, userStuckHealth, numStuckChunks
   623  }
   624  
   625  // HostPublicKeys returns all the public keys of hosts the file has ever been
   626  // uploaded to. That means some of those hosts might no longer be in use.
   627  func (sf *SiaFile) HostPublicKeys() (spks []types.SiaPublicKey) {
   628  	sf.mu.RLock()
   629  	defer sf.mu.RUnlock()
   630  	// Only return the keys, not the whole entry.
   631  	keys := make([]types.SiaPublicKey, 0, len(sf.pubKeyTable))
   632  	for _, key := range sf.pubKeyTable {
   633  		keys = append(keys, key.PublicKey)
   634  	}
   635  	return keys
   636  }
   637  
   638  // IsIncludedPartialChunk returns 'true' if the provided index points to a
   639  // partial chunk which has been added to the partials sia file already.
   640  func (sf *SiaFile) IsIncludedPartialChunk(chunkIndex uint64) bool {
   641  	sf.mu.RLock()
   642  	defer sf.mu.RUnlock()
   643  	_, b := sf.isIncludedPartialChunk(chunkIndex)
   644  	return b
   645  }
   646  
   647  // IsIncompletePartialChunk returns 'true' if the provided index points to a
   648  // partial chunk which hasn't been added to a partials siafile yet.
   649  func (sf *SiaFile) IsIncompletePartialChunk(chunkIndex uint64) bool {
   650  	sf.mu.RLock()
   651  	defer sf.mu.RUnlock()
   652  	return sf.isIncompletePartialChunk(chunkIndex)
   653  }
   654  
   655  // NumChunks returns the number of chunks the file consists of. This will
   656  // return the number of chunks the file consists of even if the file is not
   657  // fully uploaded yet.
   658  func (sf *SiaFile) NumChunks() uint64 {
   659  	sf.mu.RLock()
   660  	defer sf.mu.RUnlock()
   661  	return uint64(sf.numChunks)
   662  }
   663  
   664  // Pieces returns all the pieces for a chunk in a slice of slices that contains
   665  // all the pieces for a certain index.
   666  func (sf *SiaFile) Pieces(chunkIndex uint64) ([][]Piece, error) {
   667  	sf.mu.RLock()
   668  	defer sf.mu.RUnlock()
   669  	if chunkIndex >= uint64(sf.numChunks) {
   670  		err := fmt.Errorf("index %v out of bounds (%v)", chunkIndex, sf.numChunks)
   671  		build.Critical(err)
   672  		return [][]Piece{}, err
   673  	}
   674  	// Handle partial chunk.
   675  	if cc, ok := sf.isIncludedPartialChunk(chunkIndex); ok {
   676  		return sf.partialsSiaFile.Pieces(cc.Index) // get pieces from linked siafile
   677  	}
   678  	if sf.isIncompletePartialChunk(chunkIndex) {
   679  		return make([][]Piece, sf.staticMetadata.staticErasureCode.NumPieces()), nil
   680  	}
   681  	chunk, err := sf.chunk(int(chunkIndex))
   682  	if err != nil {
   683  		return nil, err
   684  	}
   685  	// Resolve pieces to Pieces.
   686  	pieces := make([][]Piece, len(chunk.Pieces))
   687  	for pieceIndex := range pieces {
   688  		pieces[pieceIndex] = make([]Piece, len(chunk.Pieces[pieceIndex]))
   689  		for i, piece := range chunk.Pieces[pieceIndex] {
   690  			pieces[pieceIndex][i] = Piece{
   691  				HostPubKey: sf.hostKey(piece.HostTableOffset).PublicKey,
   692  				MerkleRoot: piece.MerkleRoot,
   693  			}
   694  		}
   695  	}
   696  	return pieces, nil
   697  }
   698  
   699  // Redundancy returns the redundancy of the least redundant chunk. A file
   700  // becomes available when this redundancy is >= 1. Assumes that every piece is
   701  // unique within a file contract. -1 is returned if the file has size 0. It
   702  // takes two arguments, a map of offline contracts for this file and a map that
   703  // indicates if a contract is goodForRenew. The first redundancy returned is the
   704  // one that should be used by the repair code and is more accurate. The other
   705  // one is the redundancy presented to users.
   706  func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[string]bool) (r, ur float64, err error) {
   707  	sf.mu.Lock()
   708  	defer sf.mu.Unlock()
   709  	// Update the cache.
   710  	defer func() {
   711  		sf.staticMetadata.CachedRedundancy = r
   712  		sf.staticMetadata.CachedUserRedundancy = ur
   713  	}()
   714  	if sf.staticMetadata.FileSize == 0 {
   715  		// TODO change this once tiny files are supported.
   716  		if sf.numChunks != 1 {
   717  			// should never happen
   718  			return -1, -1, nil
   719  		}
   720  		ec := sf.staticMetadata.staticErasureCode
   721  		r = float64(ec.NumPieces()) / float64(ec.MinPieces())
   722  		ur = r
   723  		return
   724  	}
   725  
   726  	ec := sf.staticMetadata.staticErasureCode
   727  	minRedundancy := math.MaxFloat64
   728  	minRedundancyUser := minRedundancy
   729  	minRedundancyNoRenewUser := math.MaxFloat64
   730  	minRedundancyNoRenew := math.MaxFloat64
   731  	err = sf.iterateChunksReadonly(func(chunk chunk) error {
   732  		// Loop over chunks and remember how many unique pieces of the chunk
   733  		// were goodForRenew and how many were not.
   734  		numPiecesRenew, numPiecesNoRenew := sf.goodPieces(chunk, offlineMap, goodForRenewMap)
   735  		redundancy := float64(numPiecesRenew) / float64(sf.staticMetadata.staticErasureCode.MinPieces())
   736  		redundancyUser := redundancy
   737  		if incomplete := sf.isIncompletePartialChunk(uint64(chunk.Index)); incomplete {
   738  			// If the partial chunk is incomplete it has full redundancy.
   739  			redundancyUser = float64(ec.NumPieces()) / float64(ec.MinPieces())
   740  		}
   741  		if redundancy < minRedundancy {
   742  			minRedundancy = redundancy
   743  		}
   744  		if redundancyUser < minRedundancyUser {
   745  			minRedundancyUser = redundancyUser
   746  		}
   747  		redundancyNoRenew := float64(numPiecesNoRenew) / float64(ec.MinPieces())
   748  		redundancyNoRenewUser := redundancyNoRenew
   749  		if incomplete := sf.isIncompletePartialChunk(uint64(chunk.Index)); incomplete {
   750  			// If the partial chunk is incomplete it has full redundancy.
   751  			redundancyNoRenewUser = float64(ec.NumPieces()) / float64(ec.MinPieces())
   752  		}
   753  		if redundancyNoRenewUser < minRedundancyNoRenewUser {
   754  			minRedundancyNoRenewUser = redundancyNoRenewUser
   755  		}
   756  		if redundancyNoRenew < minRedundancyNoRenew {
   757  			minRedundancyNoRenew = redundancyNoRenew
   758  		}
   759  		return nil
   760  	})
   761  	if err != nil {
   762  		return 0, 0, err
   763  	}
   764  
   765  	// If the redundancyUser is smaller than 1x we return the redundancy that
   766  	// includes contracts that are not good for renewal. The reason for this is a
   767  	// better user experience. If the renter operates correctly, redundancyUser
   768  	// should never go above numPieces / minPieces and redundancyNoRenewUser should
   769  	// never go below 1.
   770  	if minRedundancyUser < 1 && minRedundancyNoRenewUser >= 1 {
   771  		ur = 1
   772  	} else if minRedundancy < 1 {
   773  		ur = minRedundancyNoRenewUser
   774  	} else {
   775  		ur = minRedundancyUser
   776  	}
   777  	r = minRedundancy
   778  	return
   779  }
   780  
   781  // SetAllStuck sets the Stuck field of all chunks to stuck.
   782  func (sf *SiaFile) SetAllStuck(stuck bool) (err error) {
   783  	sf.mu.Lock()
   784  	defer sf.mu.Unlock()
   785  
   786  	// If the file has been deleted we can't mark a chunk as stuck.
   787  	if sf.deleted {
   788  		return errors.New("can't call SetStuck on deleted file")
   789  	}
   790  	// Update all the Stuck field for each chunk.
   791  	updates, errIter := sf.iterateChunks(func(chunk *chunk) (bool, error) {
   792  		if chunk.Stuck != stuck {
   793  			chunk.Stuck = stuck
   794  			return true, nil
   795  		}
   796  		return false, nil
   797  	})
   798  	if errIter != nil {
   799  		return errIter
   800  	}
   801  	// Update NumStuckChunks in siafile metadata
   802  	nsc := sf.staticMetadata.NumStuckChunks
   803  	defer func() {
   804  		if err != nil {
   805  			sf.staticMetadata.NumStuckChunks = nsc
   806  		}
   807  	}()
   808  	if stuck && sf.staticMetadata.HasPartialChunk && len(sf.staticMetadata.PartialChunks) == 0 {
   809  		sf.staticMetadata.NumStuckChunks = uint64(sf.numChunks) - 1 // partial chunk can't be stuck in this state
   810  	} else if stuck {
   811  		sf.staticMetadata.NumStuckChunks = uint64(sf.numChunks)
   812  	} else {
   813  		sf.staticMetadata.NumStuckChunks = 0
   814  	}
   815  	// Create metadata update and apply updates on disk
   816  	metadataUpdates, err := sf.saveMetadataUpdates()
   817  	if err != nil {
   818  		return err
   819  	}
   820  	updates = append(updates, metadataUpdates...)
   821  	return sf.createAndApplyTransaction(updates...)
   822  }
   823  
   824  // SetChunkStatusCompleted sets the CombinedChunkStatus field of the metadata to
   825  // completed.
   826  func (sf *SiaFile) SetChunkStatusCompleted(pci uint64) error {
   827  	sf.mu.Lock()
   828  	defer sf.mu.Unlock()
   829  	sf.staticMetadata.PartialChunks[pci].Status = CombinedChunkStatusCompleted
   830  	updates, err := sf.saveMetadataUpdates()
   831  	if err != nil {
   832  		return err
   833  	}
   834  	return sf.createAndApplyTransaction(updates...)
   835  }
   836  
   837  // SetStuck sets the Stuck field of the chunk at the given index
   838  func (sf *SiaFile) SetStuck(index uint64, stuck bool) (err error) {
   839  	sf.mu.Lock()
   840  	defer sf.mu.Unlock()
   841  	return sf.setStuck(index, stuck)
   842  }
   843  
   844  // StuckChunkByIndex returns if the chunk at the index is marked as Stuck or not
   845  func (sf *SiaFile) StuckChunkByIndex(index uint64) (bool, error) {
   846  	sf.mu.Lock()
   847  	defer sf.mu.Unlock()
   848  	chunk, err := sf.chunk(int(index))
   849  	if err != nil {
   850  		return false, errors.AddContext(err, "failed to read chunk")
   851  	}
   852  	return chunk.Stuck, nil
   853  }
   854  
   855  // UID returns a unique identifier for this file.
   856  func (sf *SiaFile) UID() SiafileUID {
   857  	sf.mu.RLock()
   858  	defer sf.mu.RUnlock()
   859  	return sf.staticMetadata.UniqueID
   860  }
   861  
   862  // UpdateUsedHosts updates the 'Used' flag for the entries in the pubKeyTable
   863  // of the SiaFile. The keys of all used hosts should be passed to the method
   864  // and the SiaFile will update the flag for hosts it knows of to 'true' and set
   865  // hosts which were not passed in to 'false'.
   866  func (sf *SiaFile) UpdateUsedHosts(used []types.SiaPublicKey) error {
   867  	sf.mu.Lock()
   868  	defer sf.mu.Unlock()
   869  	// Can't update used hosts on deleted file.
   870  	if sf.deleted {
   871  		return errors.New("can't call UpdateUsedHosts on deleted file")
   872  	}
   873  	// Create a map of the used keys for faster lookups.
   874  	usedMap := make(map[string]struct{})
   875  	for _, key := range used {
   876  		usedMap[key.String()] = struct{}{}
   877  	}
   878  	// Mark the entries in the table. If the entry exists 'Used' is true.
   879  	// Otherwise it's 'false'.
   880  	var unusedHosts uint
   881  	for i, entry := range sf.pubKeyTable {
   882  		_, used := usedMap[entry.PublicKey.String()]
   883  		sf.pubKeyTable[i].Used = used
   884  		if !used {
   885  			unusedHosts++
   886  		}
   887  	}
   888  	// Prune the pubKeyTable if necessary. If we have too many unused hosts we
   889  	// want to remove them from the table but only if we have enough used hosts.
   890  	// Otherwise we might be pruning hosts that could become used again since
   891  	// the file might be in flux while it uploads or repairs
   892  	pruned := false
   893  	tooManyUnusedHosts := unusedHosts > pubKeyTablePruneThreshold
   894  	enoughUsedHosts := len(usedMap) > sf.staticMetadata.staticErasureCode.NumPieces()
   895  	if tooManyUnusedHosts && enoughUsedHosts {
   896  		sf.pruneHosts()
   897  		pruned = true
   898  	}
   899  	// Save the header to disk.
   900  	updates, err := sf.saveHeaderUpdates()
   901  	if err != nil {
   902  		return err
   903  	}
   904  	// If we pruned the hosts we also need to save the body.
   905  	if pruned {
   906  		chunkUpdates, err := sf.iterateChunks(func(chunk *chunk) (bool, error) {
   907  			return true, nil
   908  		})
   909  		if err != nil {
   910  			return err
   911  		}
   912  		updates = append(updates, chunkUpdates...)
   913  	}
   914  	err = sf.createAndApplyTransaction(updates...)
   915  	if err != nil {
   916  		return err
   917  	}
   918  	// Also update used hosts for potential partial chunk.
   919  	if sf.partialsSiaFile != nil {
   920  		return sf.partialsSiaFile.UpdateUsedHosts(used)
   921  	}
   922  	return nil
   923  }
   924  
   925  // defragChunk removes pieces which belong to bad hosts and if that wasn't
   926  // enough to reduce the chunkSize below the maximum size, it will remove
   927  // redundant pieces.
   928  func (sf *SiaFile) defragChunk(chunk *chunk) {
   929  	// Calculate how many pieces every pieceSet can contain.
   930  	maxChunkSize := int64(sf.staticMetadata.StaticPagesPerChunk) * pageSize
   931  	maxPieces := (maxChunkSize - marshaledChunkOverhead) / marshaledPieceSize
   932  	maxPiecesPerSet := maxPieces / int64(len(chunk.Pieces))
   933  
   934  	// Filter out pieces with unused hosts since we don't have contracts with
   935  	// those anymore.
   936  	for i, pieceSet := range chunk.Pieces {
   937  		var newPieceSet []piece
   938  		for _, piece := range pieceSet {
   939  			if int64(len(newPieceSet)) == maxPiecesPerSet {
   940  				break
   941  			}
   942  			if sf.hostKey(piece.HostTableOffset).Used {
   943  				newPieceSet = append(newPieceSet, piece)
   944  			}
   945  		}
   946  		chunk.Pieces[i] = newPieceSet
   947  	}
   948  }
   949  
   950  // hostKey fetches a host's key from the map. It also checks an offset against
   951  // the hostTable to make sure it's not out of bounds. If it is, build.Critical
   952  // is called and to avoid a crash in production, dummy hosts are added.
   953  func (sf *SiaFile) hostKey(offset uint32) HostPublicKey {
   954  	// Add dummy hostkeys to the table in case of siafile corruption and mark
   955  	// them as unused. The next time the table is pruned, the keys will be
   956  	// removed which is fine. This doesn't fix heavy corruption and the file but
   957  	// still be lost but it's better than crashing.
   958  	if offset >= uint32(len(sf.pubKeyTable)) {
   959  		// Causes tests to fail. The following for loop will try to fix the
   960  		// corruption on release builds.
   961  		build.Critical("piece.HostTableOffset", offset, " >= len(sf.pubKeyTable)", len(sf.pubKeyTable))
   962  		for offset >= uint32(len(sf.pubKeyTable)) {
   963  			sf.pubKeyTable = append(sf.pubKeyTable, HostPublicKey{Used: false})
   964  		}
   965  	}
   966  	return sf.pubKeyTable[offset]
   967  }
   968  
   969  // isIncludedPartialChunk returns 'true' if the provided index points to a
   970  // partial chunk which has been added to the partials sia file already.
   971  func (sf *SiaFile) isIncludedPartialChunk(chunkIndex uint64) (PartialChunkInfo, bool) {
   972  	idx := CombinedChunkIndex(uint64(sf.numChunks), chunkIndex, len(sf.staticMetadata.PartialChunks))
   973  	if idx == -1 {
   974  		return PartialChunkInfo{}, false
   975  	}
   976  	cc := sf.staticMetadata.PartialChunks[idx]
   977  	return cc, cc.Status >= CombinedChunkStatusInComplete
   978  }
   979  
   980  // isIncompletePartialChunk returns 'true' if the provided index points to a
   981  // partial chunk which hasn't been added to a partials siafile yet.
   982  func (sf *SiaFile) isIncompletePartialChunk(chunkIndex uint64) bool {
   983  	idx := CombinedChunkIndex(uint64(sf.numChunks), chunkIndex, len(sf.staticMetadata.PartialChunks))
   984  	if idx == -1 {
   985  		return sf.staticMetadata.HasPartialChunk && chunkIndex == uint64(sf.numChunks-1)
   986  	}
   987  	return sf.staticMetadata.PartialChunks[idx].Status < CombinedChunkStatusCompleted
   988  }
   989  
   990  // pruneHosts prunes the unused hostkeys from the file, updates the
   991  // HostTableOffset of the pieces and removes pieces which do no longer have a
   992  // host.
   993  func (sf *SiaFile) pruneHosts() ([]writeaheadlog.Update, error) {
   994  	var prunedTable []HostPublicKey
   995  	// Create a map to track how the indices of the hostkeys changed when being
   996  	// pruned.
   997  	offsetMap := make(map[uint32]uint32)
   998  	for i := uint32(0); i < uint32(len(sf.pubKeyTable)); i++ {
   999  		if sf.pubKeyTable[i].Used {
  1000  			prunedTable = append(prunedTable, sf.pubKeyTable[i])
  1001  			offsetMap[i] = uint32(len(prunedTable) - 1)
  1002  		}
  1003  	}
  1004  	sf.pubKeyTable = prunedTable
  1005  	// With this map we loop over all the chunks and pieces and update the ones
  1006  	// who got a new offset and remove the ones that no longer have one.
  1007  	return sf.iterateChunks(func(chunk *chunk) (bool, error) {
  1008  		for pieceIndex, pieceSet := range chunk.Pieces {
  1009  			var newPieceSet []piece
  1010  			for i, piece := range pieceSet {
  1011  				newOffset, exists := offsetMap[piece.HostTableOffset]
  1012  				if exists {
  1013  					pieceSet[i].HostTableOffset = newOffset
  1014  					newPieceSet = append(newPieceSet, pieceSet[i])
  1015  				}
  1016  			}
  1017  			chunk.Pieces[pieceIndex] = newPieceSet
  1018  		}
  1019  		return true, nil
  1020  	})
  1021  }
  1022  
  1023  // GoodPieces loops over the pieces of a chunk and tracks the number of unique
  1024  // pieces that are good for upload, meaning the host is online, and the number
  1025  // of unique pieces that are good for renew, meaning the contract is set to
  1026  // renew.
  1027  func (sf *SiaFile) GoodPieces(chunkIndex int, offlineMap map[string]bool, goodForRenewMap map[string]bool) (uint64, uint64) {
  1028  	sf.mu.RLock()
  1029  	defer sf.mu.RUnlock()
  1030  	chunk, err := sf.chunk(chunkIndex)
  1031  	if err != nil {
  1032  		build.Critical("failed to retrieve chunk for goodPieces: ", err)
  1033  		return 0, 0
  1034  	}
  1035  	return sf.goodPieces(chunk, offlineMap, goodForRenewMap)
  1036  }
  1037  
  1038  // goodPieces loops over the pieces of a chunk and tracks the number of unique
  1039  // pieces that are good for upload, meaning the host is online, and the number
  1040  // of unique pieces that are good for renew, meaning the contract is set to
  1041  // renew.
  1042  func (sf *SiaFile) goodPieces(chunk chunk, offlineMap map[string]bool, goodForRenewMap map[string]bool) (uint64, uint64) {
  1043  	numPiecesGoodForRenew := uint64(0)
  1044  	numPiecesGoodForUpload := uint64(0)
  1045  
  1046  	// Handle partial chunk.
  1047  	if cci, ok := sf.isIncludedPartialChunk(uint64(chunk.Index)); ok {
  1048  		return sf.partialsSiaFile.GoodPieces(int(cci.Index), offlineMap, goodForRenewMap)
  1049  	}
  1050  	if sf.isIncompletePartialChunk(uint64(chunk.Index)) {
  1051  		return 0, 0
  1052  	}
  1053  
  1054  	for _, pieceSet := range chunk.Pieces {
  1055  		// Remember if we encountered a goodForRenew piece or a
  1056  		// !goodForRenew piece that was at least online.
  1057  		foundGoodForRenew := false
  1058  		foundOnline := false
  1059  		for _, piece := range pieceSet {
  1060  			offline, exists1 := offlineMap[sf.hostKey(piece.HostTableOffset).PublicKey.String()]
  1061  			goodForRenew, exists2 := goodForRenewMap[sf.hostKey(piece.HostTableOffset).PublicKey.String()]
  1062  			if exists1 != exists2 {
  1063  				build.Critical("contract can't be in one map but not in the other")
  1064  			}
  1065  			if !exists1 || offline {
  1066  				continue
  1067  			}
  1068  			// If we found a goodForRenew piece we can stop.
  1069  			if goodForRenew {
  1070  				foundGoodForRenew = true
  1071  				break
  1072  			}
  1073  			// Otherwise we continue since there might be other hosts with
  1074  			// the same piece that are goodForRenew. We still remember that
  1075  			// we found an online piece though.
  1076  			foundOnline = true
  1077  		}
  1078  		if foundGoodForRenew {
  1079  			numPiecesGoodForRenew++
  1080  			numPiecesGoodForUpload++
  1081  		} else if foundOnline {
  1082  			numPiecesGoodForUpload++
  1083  		}
  1084  	}
  1085  	return numPiecesGoodForRenew, numPiecesGoodForUpload
  1086  }
  1087  
  1088  // UploadProgressAndBytes is the exported wrapped for uploadProgressAndBytes.
  1089  func (sf *SiaFile) UploadProgressAndBytes() (float64, uint64, error) {
  1090  	sf.mu.Lock()
  1091  	defer sf.mu.Unlock()
  1092  	return sf.uploadProgressAndBytes()
  1093  }
  1094  
  1095  // Chunk returns the chunk of a SiaFile at a given index.
  1096  func (sf *SiaFile) Chunk(chunkIndex uint64) (chunk, error) {
  1097  	sf.mu.Lock()
  1098  	defer sf.mu.Unlock()
  1099  	return sf.chunk(int(chunkIndex))
  1100  }
  1101  
  1102  // growNumChunks increases the number of chunks in the SiaFile to numChunks. If
  1103  // the file already contains >= numChunks chunks then GrowNumChunks is a no-op.
  1104  func (sf *SiaFile) growNumChunks(numChunks uint64) (updates []writeaheadlog.Update, err error) {
  1105  	if sf.deleted {
  1106  		return nil, errors.New("can't grow number of chunks of deleted file")
  1107  	}
  1108  	// Don't allow a SiaFile with a partial chunk to grow.
  1109  	if sf.staticMetadata.HasPartialChunk {
  1110  		return nil, errors.New("can't grow a siafile with a partial chunk")
  1111  	}
  1112  	// Check if we need to grow the file.
  1113  	if uint64(sf.numChunks) >= numChunks {
  1114  		// Handle edge case where file has 1 chunk but has a size of 0. When we grow
  1115  		// such a file to 1 chunk we want to increment the size to >0.
  1116  		sf.staticMetadata.FileSize = int64(sf.staticChunkSize() * uint64(sf.numChunks))
  1117  		return nil, nil
  1118  	}
  1119  	// Remember the number of chunks we have before adding any and restore it in case of an error.
  1120  	ncb := sf.numChunks
  1121  	defer func() {
  1122  		if err != nil {
  1123  			sf.numChunks = ncb
  1124  		}
  1125  	}()
  1126  	// Update the chunks.
  1127  	for uint64(sf.numChunks) < numChunks {
  1128  		newChunk := chunk{
  1129  			Index:  int(sf.numChunks),
  1130  			Pieces: make([][]piece, sf.staticMetadata.staticErasureCode.NumPieces()),
  1131  		}
  1132  		sf.numChunks++
  1133  		updates = append(updates, sf.saveChunkUpdate(newChunk))
  1134  	}
  1135  	// Update the fileSize.
  1136  	sf.staticMetadata.FileSize = int64(sf.staticChunkSize() * uint64(sf.numChunks))
  1137  	mdu, err := sf.saveMetadataUpdates()
  1138  	if err != nil {
  1139  		return nil, err
  1140  	}
  1141  	return append(updates, mdu...), nil
  1142  }
  1143  
  1144  // removeLastChunk removes the last chunk of the SiaFile and truncates the file
  1145  // accordingly. This method might change the metadata but doesn't persist the
  1146  // change itself. Handle this accordingly.
  1147  func (sf *SiaFile) removeLastChunk() error {
  1148  	if sf.deleted {
  1149  		return errors.New("can't remove last chunk of deleted file")
  1150  	}
  1151  	if sf.staticMetadata.HasPartialChunk {
  1152  		return errors.New("can't remove last chunk if it is a partial chunk")
  1153  	}
  1154  	// Remove a chunk. If the removed chunk was stuck, update the metadata.
  1155  	chunk, err := sf.chunk(sf.numChunks - 1)
  1156  	if err != nil {
  1157  		return err
  1158  	}
  1159  	if chunk.Stuck {
  1160  		sf.staticMetadata.NumStuckChunks--
  1161  	}
  1162  	// Truncate the file on disk.
  1163  	fi, err := os.Stat(sf.siaFilePath)
  1164  	if err != nil {
  1165  		return err
  1166  	}
  1167  	err = os.Truncate(sf.siaFilePath, fi.Size()-int64(sf.staticMetadata.StaticPagesPerChunk)*pageSize)
  1168  	if err != nil {
  1169  		return err
  1170  	}
  1171  	return nil
  1172  }
  1173  
  1174  // setStuck sets the Stuck field of the chunk at the given index
  1175  func (sf *SiaFile) setStuck(index uint64, stuck bool) (err error) {
  1176  	// Handle partial chunk.
  1177  	if cci, ok := sf.isIncludedPartialChunk(index); ok {
  1178  		return sf.partialsSiaFile.SetStuck(cci.Index, stuck)
  1179  	}
  1180  	if sf.isIncompletePartialChunk(index) {
  1181  		return nil // do nothing
  1182  	}
  1183  
  1184  	// If the file has been deleted we can't mark a chunk as stuck.
  1185  	if sf.deleted {
  1186  		return errors.New("can't call SetStuck on deleted file")
  1187  	}
  1188  	//  Get chunk.
  1189  	chunk, err := sf.chunk(int(index))
  1190  	if err != nil {
  1191  		return err
  1192  	}
  1193  	// Check for change
  1194  	if stuck == chunk.Stuck {
  1195  		return nil
  1196  	}
  1197  	// Remember the current number of stuck chunks in case an error happens.
  1198  	nsc := sf.staticMetadata.NumStuckChunks
  1199  	s := chunk.Stuck
  1200  	defer func() {
  1201  		if err != nil {
  1202  			sf.staticMetadata.NumStuckChunks = nsc
  1203  			chunk.Stuck = s
  1204  		}
  1205  	}()
  1206  	// Update chunk and NumStuckChunks in siafile metadata
  1207  	chunk.Stuck = stuck
  1208  	if stuck {
  1209  		sf.staticMetadata.NumStuckChunks++
  1210  	} else {
  1211  		sf.staticMetadata.NumStuckChunks--
  1212  	}
  1213  	// Update chunk and metadata on disk
  1214  	updates, err := sf.saveMetadataUpdates()
  1215  	if err != nil {
  1216  		return err
  1217  	}
  1218  	update := sf.saveChunkUpdate(chunk)
  1219  	updates = append(updates, update)
  1220  	return sf.createAndApplyTransaction(updates...)
  1221  }
  1222  
  1223  // uploadProgressAndBytes updates the CachedUploadProgress and
  1224  // CachedUploadedBytes fields to indicate what percentage of the file has been
  1225  // uploaded based on the unique pieces that have been uploaded and also how many
  1226  // bytes have been uploaded of that file in total. Note that a file may be
  1227  // Available long before UploadProgress reaches 100%.
  1228  func (sf *SiaFile) uploadProgressAndBytes() (float64, uint64, error) {
  1229  	_, uploaded, err := sf.uploadedBytes()
  1230  	if err != nil {
  1231  		return 0, 0, err
  1232  	}
  1233  	if sf.staticMetadata.FileSize == 0 {
  1234  		// Update cache.
  1235  		sf.staticMetadata.CachedUploadProgress = 100
  1236  		return 100, uploaded, nil
  1237  	}
  1238  	desired := uint64(sf.numChunks) * modules.SectorSize * uint64(sf.staticMetadata.staticErasureCode.NumPieces())
  1239  	// Update cache.
  1240  	sf.staticMetadata.CachedUploadProgress = math.Min(100*(float64(uploaded)/float64(desired)), 100)
  1241  	return sf.staticMetadata.CachedUploadProgress, uploaded, nil
  1242  }
  1243  
  1244  // uploadedBytes indicates how many bytes of the file have been uploaded via
  1245  // current file contracts in total as well as unique uploaded bytes. Note that
  1246  // this includes padding and redundancy, so uploadedBytes can return a value
  1247  // much larger than the file's original filesize.
  1248  func (sf *SiaFile) uploadedBytes() (uint64, uint64, error) {
  1249  	var total, unique uint64
  1250  	err := sf.iterateChunksReadonly(func(chunk chunk) error {
  1251  		for _, pieceSet := range chunk.Pieces {
  1252  			// Move onto the next pieceSet if nothing has been uploaded yet
  1253  			idx := CombinedChunkIndex(uint64(sf.numChunks), uint64(chunk.Index), len(sf.staticMetadata.PartialChunks))
  1254  			if len(pieceSet) == 0 &&
  1255  				(idx == -1 || sf.staticMetadata.PartialChunks[idx].Status != CombinedChunkStatusInComplete) {
  1256  				continue
  1257  			}
  1258  
  1259  			// Note: we need to multiply by SectorSize here instead of
  1260  			// f.pieceSize because the actual bytes uploaded include overhead
  1261  			// from TwoFish encryption
  1262  			//
  1263  			// Sum the total bytes uploaded
  1264  			total += uint64(len(pieceSet)) * modules.SectorSize
  1265  			// Sum the unique bytes uploaded
  1266  			unique += modules.SectorSize
  1267  		}
  1268  		return nil
  1269  	})
  1270  	if err != nil {
  1271  		return 0, 0, errors.AddContext(err, "failed to compute uploaded bytes")
  1272  	}
  1273  	// Update cache.
  1274  	sf.staticMetadata.CachedUploadedBytes = total
  1275  	return total, unique, nil
  1276  }