gitlab.com/SkynetLabs/skyd@v1.6.9/skymodules/renter/filesystem/siafile/persist.go (about)

     1  package siafile
     2  
     3  import (
     4  	"encoding/binary"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"os"
    10  	"path/filepath"
    11  
    12  	"gitlab.com/SkynetLabs/skyd/siatest/dependencies"
    13  
    14  	"gitlab.com/NebulousLabs/errors"
    15  	"gitlab.com/NebulousLabs/writeaheadlog"
    16  	"go.sia.tech/siad/modules"
    17  
    18  	"gitlab.com/NebulousLabs/encoding"
    19  	"gitlab.com/SkynetLabs/skyd/build"
    20  )
    21  
    22  var (
    23  	// errUnknownSiaFileUpdate is returned when applyUpdates finds an update
    24  	// that is unknown
    25  	errUnknownSiaFileUpdate = errors.New("unknown siafile update")
    26  )
    27  
    28  // ApplyUpdates is a wrapper for applyUpdates that uses the production
    29  // dependencies.
    30  func ApplyUpdates(updates ...writeaheadlog.Update) error {
    31  	return applyUpdates(modules.ProdDependencies, updates...)
    32  }
    33  
    34  // LoadSiaFile is a wrapper for loadSiaFile that uses the production
    35  // dependencies.
    36  func LoadSiaFile(path string, wal *writeaheadlog.WAL) (*SiaFile, error) {
    37  	return loadSiaFile(path, wal, modules.ProdDependencies)
    38  }
    39  
    40  // LoadSiaFileFromReader allows loading a SiaFile from a different location that
    41  // directly from disk as long as the source satisfies the SiaFileSource
    42  // interface.
    43  func LoadSiaFileFromReader(r io.ReadSeeker, path string, wal *writeaheadlog.WAL) (*SiaFile, error) {
    44  	return loadSiaFileFromReader(r, path, wal, modules.ProdDependencies)
    45  }
    46  
    47  // LoadSiaFileFromReaderWithChunks does not only read the header of the Siafile
    48  // from disk but also the chunks which it returns separately. This is useful if
    49  // the file is read from a buffer in-memory and the chunks can't be read from
    50  // disk later.
    51  func LoadSiaFileFromReaderWithChunks(r io.ReadSeeker, path string, wal *writeaheadlog.WAL) (*SiaFile, Chunks, error) {
    52  	sf, err := LoadSiaFileFromReader(r, path, wal)
    53  	if err != nil {
    54  		return nil, Chunks{}, err
    55  	}
    56  	// Load chunks from reader.
    57  	var chunks []chunk
    58  	chunkBytes := make([]byte, int(sf.staticMetadata.StaticPagesPerChunk)*pageSize)
    59  	for chunkIndex := 0; chunkIndex < sf.numChunks; chunkIndex++ {
    60  		if _, err := r.Read(chunkBytes); err != nil && !errors.Contains(err, io.EOF) {
    61  			return nil, Chunks{}, errors.AddContext(err, fmt.Sprintf("failed to read chunk %v", chunkIndex))
    62  		}
    63  		chunk, err := unmarshalChunk(uint32(sf.staticMetadata.staticErasureCode.NumPieces()), chunkBytes)
    64  		if err != nil {
    65  			return nil, Chunks{}, errors.AddContext(err, fmt.Sprintf("failed to unmarshal chunk %v", chunkIndex))
    66  		}
    67  		chunk.Index = int(chunkIndex)
    68  		chunks = append(chunks, chunk)
    69  	}
    70  	return sf, Chunks{chunks}, nil
    71  }
    72  
    73  // SetSiaFilePath sets the path of the siafile on disk.
    74  func (sf *SiaFile) SetSiaFilePath(path string) {
    75  	sf.mu.Lock()
    76  	defer sf.mu.Unlock()
    77  	sf.siaFilePath = path
    78  }
    79  
    80  // applyUpdates applies a number of writeaheadlog updates to the corresponding
    81  // SiaFile. This method can apply updates from different SiaFiles and should
    82  // only be run before the SiaFiles are loaded from disk right after the startup
    83  // of siad. Otherwise we might run into concurrency issues.
    84  func applyUpdates(deps modules.Dependencies, updates ...writeaheadlog.Update) error {
    85  	for _, u := range updates {
    86  		err := func() error {
    87  			switch u.Name {
    88  			case updateDeleteName:
    89  				return readAndApplyDeleteUpdate(deps, u)
    90  			case updateInsertName:
    91  				return readAndApplyInsertUpdate(deps, u)
    92  			case updateDeletePartialName:
    93  				return readAndApplyDeleteUpdate(deps, u)
    94  			case writeaheadlog.NameDeleteUpdate:
    95  				return writeaheadlog.ApplyDeleteUpdate(u)
    96  			case writeaheadlog.NameTruncateUpdate:
    97  				return writeaheadlog.ApplyTruncateUpdate(u)
    98  			case writeaheadlog.NameWriteAtUpdate:
    99  				return writeaheadlog.ApplyWriteAtUpdate(u)
   100  			default:
   101  				return errUnknownSiaFileUpdate
   102  			}
   103  		}()
   104  		if err != nil {
   105  			return errors.AddContext(err, "failed to apply update")
   106  		}
   107  	}
   108  	return nil
   109  }
   110  
   111  // createDeleteUpdate is a helper method that creates a writeaheadlog for
   112  // deleting a file.
   113  func createDeleteUpdate(path string) writeaheadlog.Update {
   114  	return writeaheadlog.Update{
   115  		Name:         updateDeleteName,
   116  		Instructions: []byte(path),
   117  	}
   118  }
   119  
   120  // loadSiaFile loads a SiaFile from disk.
   121  func loadSiaFile(path string, wal *writeaheadlog.WAL, deps modules.Dependencies) (*SiaFile, error) {
   122  	// Open the file.
   123  	f, err := deps.Open(path)
   124  	if err != nil {
   125  		return nil, err
   126  	}
   127  	sf, err := loadSiaFileFromReader(f, path, wal, deps)
   128  	return sf, errors.Compose(err, f.Close())
   129  }
   130  
   131  // loadSiaFileFromReader allows loading a SiaFile from a different location that
   132  // directly from disk as long as the source satisfies the SiaFileSource
   133  // interface.
   134  func loadSiaFileFromReader(r io.ReadSeeker, path string, wal *writeaheadlog.WAL, deps modules.Dependencies) (*SiaFile, error) {
   135  	// Create the SiaFile
   136  	sf := &SiaFile{
   137  		deps:        deps,
   138  		siaFilePath: path,
   139  		wal:         wal,
   140  	}
   141  	// Load the metadata.
   142  	decoder := json.NewDecoder(r)
   143  	err := decoder.Decode(&sf.staticMetadata)
   144  	if err != nil {
   145  		return nil, errors.AddContext(err, "failed to decode metadata")
   146  	}
   147  
   148  	// Create the erasure coder.
   149  	sf.staticMetadata.staticErasureCode, err = unmarshalErasureCoder(sf.staticMetadata.StaticErasureCodeType, sf.staticMetadata.StaticErasureCodeParams)
   150  	if err != nil {
   151  		return nil, err
   152  	}
   153  
   154  	// Load the pubKeyTable.
   155  	pubKeyTableLen := sf.staticMetadata.ChunkOffset - sf.staticMetadata.PubKeyTableOffset
   156  	if pubKeyTableLen < 0 {
   157  		return nil, fmt.Errorf("pubKeyTableLen is %v, can't load file", pubKeyTableLen)
   158  	}
   159  	rawPubKeyTable := make([]byte, pubKeyTableLen)
   160  	if _, err := r.Seek(sf.staticMetadata.PubKeyTableOffset, io.SeekStart); err != nil {
   161  		return nil, errors.AddContext(err, "failed to seek to pubKeyTable")
   162  	}
   163  	if _, err := r.Read(rawPubKeyTable); errors.Contains(err, io.EOF) {
   164  		// Empty table.
   165  		sf.pubKeyTable = []HostPublicKey{}
   166  	} else if err != nil {
   167  		// Unexpected error.
   168  		return nil, errors.AddContext(err, "failed to read pubKeyTable from disk")
   169  	} else {
   170  		// Unmarshal table.
   171  		sf.pubKeyTable, err = unmarshalPubKeyTable(rawPubKeyTable)
   172  		if err != nil {
   173  			return nil, errors.AddContext(err, "failed to unmarshal pubKeyTable")
   174  		}
   175  	}
   176  
   177  	// Seek to the start of the chunks.
   178  	off, err := r.Seek(sf.staticMetadata.ChunkOffset, io.SeekStart)
   179  	if err != nil {
   180  		return nil, err
   181  	}
   182  
   183  	// Sanity check that the offset is page aligned.
   184  	if off%pageSize != 0 {
   185  		return nil, errors.New("chunkOff is not page aligned")
   186  	}
   187  
   188  	// Set numChunks field.
   189  	numChunks := sf.staticMetadata.FileSize / int64(sf.staticChunkSize())
   190  	if sf.staticMetadata.FileSize%int64(sf.staticChunkSize()) != 0 || numChunks == 0 {
   191  		numChunks++
   192  	}
   193  	sf.numChunks = int(numChunks)
   194  
   195  	// Compat check for metadata static version
   196  	//
   197  	// NOTE: This needs to be last so that the file is fully loaded and all
   198  	// information is available for the compat checks and saving of the
   199  	// metadata.
   200  	err = sf.metadataCompatCheck()
   201  	if err != nil {
   202  		return nil, err
   203  	}
   204  
   205  	return sf, nil
   206  }
   207  
   208  // readAndApplyDeleteUpdate reads the delete update and applies it. This helper
   209  // assumes that the file is not open
   210  func readAndApplyDeleteUpdate(deps modules.Dependencies, update writeaheadlog.Update) error {
   211  	err := deps.RemoveFile(readDeleteUpdate(update))
   212  	if os.IsNotExist(err) {
   213  		return nil
   214  	}
   215  	return err
   216  }
   217  
   218  // readAndApplyInsertUpdate reads the insert update and applies it. This helper
   219  // assumes that the file is not open and so should only be called on start up
   220  // before any siafiles are loaded from disk
   221  func readAndApplyInsertUpdate(deps modules.Dependencies, update writeaheadlog.Update) (err error) {
   222  	// Decode update.
   223  	path, index, data, err := readInsertUpdate(update)
   224  	if err != nil {
   225  		return err
   226  	}
   227  
   228  	// Open the file.
   229  	f, err := deps.OpenFile(path, os.O_RDWR|os.O_CREATE, 0600)
   230  	if err != nil {
   231  		return err
   232  	}
   233  	defer func() {
   234  		err = errors.Compose(err, f.Close())
   235  	}()
   236  
   237  	// Write data.
   238  	if n, err := f.WriteAt(data, index); err != nil {
   239  		return err
   240  	} else if n < len(data) {
   241  		return fmt.Errorf("update was only applied partially - %v / %v", n, len(data))
   242  	}
   243  	// Sync file.
   244  	return f.Sync()
   245  }
   246  
   247  // readDeleteUpdate unmarshals the update's instructions and returns the
   248  // encoded path.
   249  func readDeleteUpdate(update writeaheadlog.Update) string {
   250  	return string(update.Instructions)
   251  }
   252  
   253  // readInsertUpdate unmarshals the update's instructions and returns the path, index
   254  // and data encoded in the instructions.
   255  func readInsertUpdate(update writeaheadlog.Update) (path string, index int64, data []byte, err error) {
   256  	if !IsSiaFileUpdate(update) {
   257  		err = errors.New("readUpdate can't read non-SiaFile update")
   258  		build.Critical(err)
   259  		return
   260  	}
   261  	err = encoding.UnmarshalAll(update.Instructions, &path, &index, &data)
   262  	return
   263  }
   264  
   265  // allocateHeaderPage allocates a new page for the metadata and publicKeyTable.
   266  // It returns an update that moves the chunkData back by one pageSize if
   267  // applied and also updates the ChunkOffset of the metadata.
   268  func (sf *SiaFile) allocateHeaderPage() (_ writeaheadlog.Update, err error) {
   269  	// Sanity check the chunk offset.
   270  	if sf.staticMetadata.ChunkOffset%pageSize != 0 {
   271  		build.Critical("the chunk offset is not page aligned")
   272  	}
   273  	// Open the file.
   274  	f, err := sf.deps.OpenFile(sf.siaFilePath, os.O_RDWR|os.O_CREATE, 0600)
   275  	if err != nil {
   276  		return writeaheadlog.Update{}, errors.AddContext(err, "failed to open siafile")
   277  	}
   278  	defer func() {
   279  		err = errors.Compose(err, f.Close())
   280  	}()
   281  	// Seek the chunk offset.
   282  	_, err = f.Seek(sf.staticMetadata.ChunkOffset, io.SeekStart)
   283  	if err != nil {
   284  		return writeaheadlog.Update{}, err
   285  	}
   286  	// Read all the chunk data.
   287  	chunkData, err := ioutil.ReadAll(f)
   288  	if err != nil {
   289  		return writeaheadlog.Update{}, err
   290  	}
   291  	// Move the offset back by a pageSize.
   292  	sf.staticMetadata.ChunkOffset += pageSize
   293  
   294  	// Create and return update.
   295  	return sf.createInsertUpdate(sf.staticMetadata.ChunkOffset, chunkData), nil
   296  }
   297  
   298  // applyUpdates applies updates to the SiaFile. Only updates that belong to the
   299  // SiaFile on which applyUpdates is called can be applied. Everything else will
   300  // be considered a developer error and cause the update to not be applied to
   301  // avoid corruption.  applyUpdates also syncs the SiaFile for convenience since
   302  // it already has an open file handle.
   303  func (sf *SiaFile) applyUpdates(updates ...writeaheadlog.Update) (err error) {
   304  	// Sanity check that file hasn't been deleted.
   305  	if sf.deleted {
   306  		return errors.New("can't call applyUpdates on deleted file")
   307  	}
   308  
   309  	// If the set of updates contains a delete, all updates prior to that delete
   310  	// are irrelevant, so perform the last delete and then process the remaining
   311  	// updates. This also prevents a bug on Windows where we attempt to delete
   312  	// the file while holding a open file handle.
   313  	for i := len(updates) - 1; i >= 0; i-- {
   314  		u := updates[i]
   315  		if u.Name != updateDeleteName {
   316  			continue
   317  		}
   318  		// Read and apply the delete update.
   319  		if err := readAndApplyDeleteUpdate(sf.deps, u); err != nil {
   320  			return err
   321  		}
   322  		// Truncate the updates and break out of the for loop.
   323  		updates = updates[i+1:]
   324  		break
   325  	}
   326  	if len(updates) == 0 {
   327  		return nil
   328  	}
   329  
   330  	// Create the path if it doesn't exist yet.
   331  	if err = os.MkdirAll(filepath.Dir(sf.siaFilePath), 0700); err != nil {
   332  		return err
   333  	}
   334  	// Create and/or open the file.
   335  	f, err := sf.deps.OpenFile(sf.siaFilePath, os.O_RDWR|os.O_CREATE, 0600)
   336  	if err != nil {
   337  		return err
   338  	}
   339  	defer func() {
   340  		if err == nil {
   341  			// If no error occurred we sync and close the file.
   342  			err = errors.Compose(f.Sync(), f.Close())
   343  		} else {
   344  			// Otherwise we still need to close the file.
   345  			err = errors.Compose(err, f.Close())
   346  		}
   347  	}()
   348  
   349  	// Apply updates.
   350  	for _, u := range updates {
   351  		err := func() error {
   352  			switch u.Name {
   353  			case updateDeleteName:
   354  				// Sanity check: all of the updates should be insert updates.
   355  				build.Critical("Unexpected non-insert update", u.Name)
   356  				return nil
   357  			case updateInsertName:
   358  				return sf.readAndApplyInsertUpdate(f, u)
   359  			case updateDeletePartialName:
   360  				return readAndApplyDeleteUpdate(sf.deps, u)
   361  			case writeaheadlog.NameTruncateUpdate:
   362  				return sf.readAndApplyTruncateUpdate(f, u)
   363  			default:
   364  				return errUnknownSiaFileUpdate
   365  			}
   366  		}()
   367  		if err != nil {
   368  			return errors.AddContext(err, "failed to apply update")
   369  		}
   370  	}
   371  	return nil
   372  }
   373  
   374  // chunk reads the chunk with index chunkIndex from disk.
   375  func (sf *SiaFile) chunk(chunkIndex int) (_ chunk, err error) {
   376  	// If the file has been deleted we can't call chunk.
   377  	if sf.deleted {
   378  		return chunk{}, errors.AddContext(ErrDeleted, "can't call chunk on deleted file")
   379  	}
   380  	chunkOffset := sf.chunkOffset(chunkIndex)
   381  	chunkBytes := make([]byte, int(sf.staticMetadata.StaticPagesPerChunk)*pageSize)
   382  	f, err := sf.deps.Open(sf.siaFilePath)
   383  	if err != nil {
   384  		return chunk{}, errors.AddContext(err, "failed to open file to read chunk")
   385  	}
   386  	defer func() {
   387  		err = errors.Compose(err, f.Close())
   388  	}()
   389  	if _, err := f.ReadAt(chunkBytes, chunkOffset); err != nil && !errors.Contains(err, io.EOF) {
   390  		return chunk{}, errors.AddContext(err, "failed to read chunk from disk")
   391  	}
   392  	c, err := unmarshalChunk(uint32(sf.staticMetadata.staticErasureCode.NumPieces()), chunkBytes)
   393  	if err != nil {
   394  		return chunk{}, errors.AddContext(err, "failed to unmarshal chunk")
   395  	}
   396  	c.Index = chunkIndex // Set non-persisted field
   397  	return c, nil
   398  }
   399  
   400  // iterateChunks iterates over all the chunks on disk and create wal updates for
   401  // each chunk that was modified.
   402  func (sf *SiaFile) iterateChunks(iterFunc func(chunk *chunk) (bool, error)) ([]writeaheadlog.Update, error) {
   403  	if sf.deleted {
   404  		return nil, errors.AddContext(ErrDeleted, "can't call iterateChunks on deleted file")
   405  	}
   406  	var updates []writeaheadlog.Update
   407  	err := sf.iterateChunksReadonly(func(chunk chunk) error {
   408  		modified, err := iterFunc(&chunk)
   409  		if err != nil {
   410  			return err
   411  		}
   412  		if modified {
   413  			updates = append(updates, sf.saveChunkUpdate(chunk))
   414  		}
   415  		return nil
   416  	})
   417  	return updates, err
   418  }
   419  
   420  // iterateChunksReadonly iterates over all the chunks on disk and calls iterFunc
   421  // on each one without modifying them.
   422  func (sf *SiaFile) iterateChunksReadonly(iterFunc func(chunk chunk) error) (err error) {
   423  	if sf.deleted {
   424  		return errors.AddContext(err, "can't call iterateChunksReadonly on deleted file")
   425  	}
   426  	// Open the file.
   427  	f, err := os.Open(sf.siaFilePath)
   428  	if err != nil {
   429  		return errors.AddContext(err, "failed to open file")
   430  	}
   431  	defer func() {
   432  		err = errors.Compose(err, f.Close())
   433  	}()
   434  
   435  	// Seek to the first chunk.
   436  	_, err = f.Seek(sf.staticMetadata.ChunkOffset, io.SeekStart)
   437  	if err != nil {
   438  		return errors.AddContext(err, "failed to seek to ChunkOffset")
   439  	}
   440  	// Read the chunks one-by-one.
   441  	chunkBytes := make([]byte, int(sf.staticMetadata.StaticPagesPerChunk)*pageSize)
   442  	for chunkIndex := 0; chunkIndex < sf.numChunks; chunkIndex++ {
   443  		var c chunk
   444  		var err error
   445  		if _, err := f.Read(chunkBytes); err != nil && !errors.Contains(err, io.EOF) {
   446  			return errors.AddContext(err, fmt.Sprintf("failed to read chunk %v", chunkIndex))
   447  		}
   448  		c, err = unmarshalChunk(uint32(sf.staticMetadata.staticErasureCode.NumPieces()), chunkBytes)
   449  		if err != nil {
   450  			return errors.AddContext(err, fmt.Sprintf("failed to unmarshal chunk %v", chunkIndex))
   451  		}
   452  		c.Index = chunkIndex
   453  		if err := iterFunc(c); err != nil {
   454  			return errors.AddContext(err, fmt.Sprintf("failed to iterate over chunk %v", chunkIndex))
   455  		}
   456  	}
   457  	return nil
   458  }
   459  
   460  // chunkOffset returns the offset of a marshaled chunk within the file.
   461  func (sf *SiaFile) chunkOffset(chunkIndex int) int64 {
   462  	if chunkIndex < 0 {
   463  		panic("chunk index can't be negative")
   464  	}
   465  	return sf.staticMetadata.ChunkOffset + int64(chunkIndex)*int64(sf.staticMetadata.StaticPagesPerChunk)*pageSize
   466  }
   467  
   468  // createAndApplyTransaction is a helper method that creates a writeaheadlog
   469  // transaction and applies it.
   470  func (sf *SiaFile) createAndApplyTransaction(updates ...writeaheadlog.Update) (err error) {
   471  	// Sanity check that file hasn't been deleted.
   472  	if sf.deleted {
   473  		return errors.New("can't call createAndApplyTransaction on deleted file")
   474  	}
   475  	if len(updates) == 0 {
   476  		return nil
   477  	}
   478  	// Create the writeaheadlog transaction.
   479  	txn, err := sf.wal.NewTransaction(updates)
   480  	if err != nil {
   481  		return errors.AddContext(err, "failed to create wal txn")
   482  	}
   483  	// No extra setup is required. Signal that it is done.
   484  	if err := <-txn.SignalSetupComplete(); err != nil {
   485  		return errors.AddContext(err, "failed to signal setup completion")
   486  	}
   487  	// Starting at this point the changes to be made are written to the WAL.
   488  	// This means we need to panic in case applying the updates fails.
   489  	defer func() {
   490  		if err != nil && !sf.deps.Disrupt(dependencies.DisruptFaultyFile) {
   491  			panic(err)
   492  		}
   493  	}()
   494  	// Apply the updates.
   495  	if err := sf.applyUpdates(updates...); err != nil {
   496  		return errors.AddContext(err, "failed to apply updates")
   497  	}
   498  	// Updates are applied. Let the writeaheadlog know.
   499  	if err := txn.SignalUpdatesApplied(); err != nil {
   500  		return errors.AddContext(err, "failed to signal that updates are applied")
   501  	}
   502  	return nil
   503  }
   504  
   505  // createAndApplyTransaction is a generic version of the
   506  // createAndApplyTransaction method of the SiaFile. This will result in 2 fsyncs
   507  // independent of the number of updates.
   508  func createAndApplyTransaction(wal *writeaheadlog.WAL, updates ...writeaheadlog.Update) (err error) {
   509  	if len(updates) == 0 {
   510  		return nil
   511  	}
   512  	// Create the writeaheadlog transaction.
   513  	txn, err := wal.NewTransaction(updates)
   514  	if err != nil {
   515  		return errors.AddContext(err, "failed to create wal txn")
   516  	}
   517  	// No extra setup is required. Signal that it is done.
   518  	if err := <-txn.SignalSetupComplete(); err != nil {
   519  		return errors.AddContext(err, "failed to signal setup completion")
   520  	}
   521  	// Starting at this point the changes to be made are written to the WAL.
   522  	// This means we need to panic in case applying the updates fails.
   523  	defer func() {
   524  		if err != nil {
   525  			panic(err)
   526  		}
   527  	}()
   528  	// Apply the updates.
   529  	if err := ApplyUpdates(updates...); err != nil {
   530  		return errors.AddContext(err, "failed to apply updates")
   531  	}
   532  	// Updates are applied. Let the writeaheadlog know.
   533  	if err := txn.SignalUpdatesApplied(); err != nil {
   534  		return errors.AddContext(err, "failed to signal that updates are applied")
   535  	}
   536  	return nil
   537  }
   538  
   539  // createDeleteUpdate is a helper method that creates a writeaheadlog for
   540  // deleting a file.
   541  func (sf *SiaFile) createDeleteUpdate() writeaheadlog.Update {
   542  	return createDeleteUpdate(sf.siaFilePath)
   543  }
   544  
   545  // createInsertUpdate is a helper method which creates a writeaheadlog update for
   546  // writing the specified data to the provided index. It is usually not called
   547  // directly but wrapped into another helper that creates an update for a
   548  // specific part of the SiaFile. e.g. the metadata
   549  func createInsertUpdate(path string, index int64, data []byte) writeaheadlog.Update {
   550  	if index < 0 {
   551  		index = 0
   552  		data = []byte{}
   553  		build.Critical("index passed to createUpdate should never be negative")
   554  	}
   555  	// Create update
   556  	return writeaheadlog.Update{
   557  		Name:         updateInsertName,
   558  		Instructions: encoding.MarshalAll(path, index, data),
   559  	}
   560  }
   561  
   562  // createInsertUpdate is a helper method which creates a writeaheadlog update for
   563  // writing the specified data to the provided index. It is usually not called
   564  // directly but wrapped into another helper that creates an update for a
   565  // specific part of the SiaFile. e.g. the metadata
   566  func (sf *SiaFile) createInsertUpdate(index int64, data []byte) writeaheadlog.Update {
   567  	return createInsertUpdate(sf.siaFilePath, index, data)
   568  }
   569  
   570  // readAndApplyInsertUpdate reads the insert update for a SiaFile and then
   571  // applies it
   572  func (sf *SiaFile) readAndApplyInsertUpdate(f modules.File, update writeaheadlog.Update) error {
   573  	// Decode update.
   574  	path, index, data, err := readInsertUpdate(update)
   575  	if err != nil {
   576  		return err
   577  	}
   578  
   579  	// Sanity check path. Update should belong to SiaFile.
   580  	if sf.siaFilePath != path {
   581  		build.Critical(fmt.Sprintf("can't apply update for file %s to SiaFile %s", path, sf.siaFilePath))
   582  		return nil
   583  	}
   584  
   585  	// Write data.
   586  	if n, err := f.WriteAt(data, index); err != nil {
   587  		return err
   588  	} else if n < len(data) {
   589  		return fmt.Errorf("update was only applied partially - %v / %v", n, len(data))
   590  	}
   591  	return nil
   592  }
   593  
   594  // ApplyTruncateUpdate parses and applies a truncate update.
   595  func (sf *SiaFile) readAndApplyTruncateUpdate(f modules.File, u writeaheadlog.Update) error {
   596  	if u.Name != writeaheadlog.NameTruncateUpdate {
   597  		return fmt.Errorf("applyTruncateUpdate called on update of type %v", u.Name)
   598  	}
   599  	// Decode update.
   600  	if len(u.Instructions) < 8 {
   601  		return errors.New("instructions slice of update is too short to contain the size and path")
   602  	}
   603  	size := int64(binary.LittleEndian.Uint64(u.Instructions[:8]))
   604  	// Truncate file.
   605  	return f.Truncate(size)
   606  }
   607  
   608  // saveFile saves the SiaFile's header and the provided chunks atomically.
   609  func (sf *SiaFile) saveFile(chunks []chunk) (err error) {
   610  	// Sanity check that file hasn't been deleted.
   611  	if sf.deleted {
   612  		return errors.New("can't call saveFile on deleted file")
   613  	}
   614  	// Restore metadata on failure.
   615  	defer func(backup Metadata) {
   616  		if err != nil {
   617  			sf.staticMetadata.restore(backup)
   618  		}
   619  	}(sf.staticMetadata.backup())
   620  	// Update header and chunks.
   621  	headerUpdates, err := sf.saveHeaderUpdates()
   622  	if err != nil {
   623  		return errors.AddContext(err, "failed to to create save header updates")
   624  	}
   625  	var chunksUpdates []writeaheadlog.Update
   626  	for _, chunk := range chunks {
   627  		chunksUpdates = append(chunksUpdates, sf.saveChunkUpdate(chunk))
   628  	}
   629  	err = sf.createAndApplyTransaction(append(headerUpdates, chunksUpdates...)...)
   630  	return errors.AddContext(err, "failed to apply saveFile updates")
   631  }
   632  
   633  // saveChunkUpdate creates a writeaheadlog update that saves a single marshaled chunk
   634  // to disk when applied.
   635  // NOTE: For consistency chunk updates always need to be created after the
   636  // header or metadata updates.
   637  func (sf *SiaFile) saveChunkUpdate(chunk chunk) writeaheadlog.Update {
   638  	offset := sf.chunkOffset(chunk.Index)
   639  	chunkBytes := marshalChunk(chunk)
   640  	return sf.createInsertUpdate(offset, chunkBytes)
   641  }
   642  
   643  // saveHeaderUpdates creates writeaheadlog updates to saves the metadata and
   644  // pubKeyTable of the SiaFile to disk using the writeaheadlog. If the metadata
   645  // and overlap due to growing too large and would therefore corrupt if they
   646  // were written to disk, a new page is allocated.
   647  // NOTE: For consistency chunk updates always need to be created after the
   648  // header or metadata updates.
   649  func (sf *SiaFile) saveHeaderUpdates() (_ []writeaheadlog.Update, err error) {
   650  	// Create a list of updates which need to be applied to save the metadata.
   651  	var updates []writeaheadlog.Update
   652  
   653  	// Marshal the pubKeyTable.
   654  	pubKeyTable, err := marshalPubKeyTable(sf.pubKeyTable)
   655  	if err != nil {
   656  		return nil, errors.AddContext(err, "failed to marshal pubkey table")
   657  	}
   658  
   659  	// Update the pubKeyTableOffset. This is not necessarily the final offset
   660  	// but we need to marshal the metadata with this new offset to see if the
   661  	// metadata and the pubKeyTable overlap.
   662  	sf.staticMetadata.PubKeyTableOffset = sf.staticMetadata.ChunkOffset - int64(len(pubKeyTable))
   663  
   664  	// Marshal the metadata.
   665  	metadata, err := marshalMetadata(sf.staticMetadata)
   666  	if err != nil {
   667  		return nil, errors.AddContext(err, "failed to marshal metadata")
   668  	}
   669  
   670  	// If the metadata and the pubKeyTable overlap, we need to allocate a new
   671  	// page for them. Afterwards we need to marshal the metadata again since
   672  	// ChunkOffset and PubKeyTableOffset change when allocating a new page.
   673  	for int64(len(metadata))+int64(len(pubKeyTable)) > sf.staticMetadata.ChunkOffset {
   674  		// Create update to move chunkData back by a page.
   675  		chunkUpdate, err := sf.allocateHeaderPage()
   676  		if err != nil {
   677  			return nil, errors.AddContext(err, "failed to allocate new header page")
   678  		}
   679  		updates = append(updates, chunkUpdate)
   680  		// Update the PubKeyTableOffset.
   681  		sf.staticMetadata.PubKeyTableOffset = sf.staticMetadata.ChunkOffset - int64(len(pubKeyTable))
   682  		// Marshal the metadata again.
   683  		metadata, err = marshalMetadata(sf.staticMetadata)
   684  		if err != nil {
   685  			return nil, errors.AddContext(err, "failed to marshal metadata again")
   686  		}
   687  	}
   688  
   689  	// Create updates for the metadata and pubKeyTable.
   690  	updates = append(updates, sf.createInsertUpdate(0, metadata))
   691  	updates = append(updates, sf.createInsertUpdate(sf.staticMetadata.PubKeyTableOffset, pubKeyTable))
   692  	return updates, nil
   693  }
   694  
   695  // saveMetadataUpdates saves the metadata of the SiaFile but not the
   696  // publicKeyTable. Most of the time updates are only made to the metadata and
   697  // not to the publicKeyTable and the metadata fits within a single disk sector
   698  // on the harddrive. This means that using saveMetadataUpdate instead of
   699  // saveHeader is potentially faster for SiaFiles with a header that can not be
   700  // marshaled within a single page.
   701  // NOTE: For consistency chunk updates always need to be created after the
   702  // header or metadata updates.
   703  func (sf *SiaFile) saveMetadataUpdates() ([]writeaheadlog.Update, error) {
   704  	// Marshal the pubKeyTable.
   705  	pubKeyTable, err := marshalPubKeyTable(sf.pubKeyTable)
   706  	if err != nil {
   707  		return nil, err
   708  	}
   709  	// Sanity check the length of the pubKeyTable to find out if the length of
   710  	// the table changed. We should never just save the metadata if the table
   711  	// changed as well as it might lead to corruptions.
   712  	if sf.staticMetadata.PubKeyTableOffset+int64(len(pubKeyTable)) != sf.staticMetadata.ChunkOffset {
   713  		build.Critical("never call saveMetadata if the pubKeyTable changed, call saveHeader instead")
   714  		return sf.saveHeaderUpdates()
   715  	}
   716  	// Marshal the metadata.
   717  	metadata, err := marshalMetadata(sf.staticMetadata)
   718  	if err != nil {
   719  		return nil, err
   720  	}
   721  	// If the header doesn't fit in the space between the beginning of the file
   722  	// and the pubKeyTable, we need to call saveHeader since the pubKeyTable
   723  	// needs to be moved as well and saveHeader is already handling that
   724  	// edgecase.
   725  	if int64(len(metadata)) > sf.staticMetadata.PubKeyTableOffset {
   726  		return sf.saveHeaderUpdates()
   727  	}
   728  	// Otherwise we can create and return the updates.
   729  	return []writeaheadlog.Update{sf.createInsertUpdate(0, metadata)}, nil
   730  }