gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/renter/siafile/persist_compat.go (about)

     1  package siafile
     2  
     3  import (
     4  	"os"
     5  	"time"
     6  
     7  	"gitlab.com/NebulousLabs/errors"
     8  
     9  	"gitlab.com/SiaPrime/SiaPrime/crypto"
    10  	"gitlab.com/SiaPrime/SiaPrime/modules"
    11  )
    12  
    13  type (
    14  	// FileData is a helper struct that contains all the relevant information
    15  	// of a file. It simplifies passing the necessary data between modules and
    16  	// keeps the interface clean.
    17  	FileData struct {
    18  		Name        string
    19  		FileSize    uint64
    20  		MasterKey   [crypto.EntropySize]byte
    21  		ErasureCode modules.ErasureCoder
    22  		RepairPath  string
    23  		PieceSize   uint64
    24  		Mode        os.FileMode
    25  		Deleted     bool
    26  		UID         SiafileUID
    27  		Chunks      []FileChunk
    28  	}
    29  	// FileChunk is a helper struct that contains data about a chunk.
    30  	FileChunk struct {
    31  		Pieces [][]Piece
    32  	}
    33  )
    34  
    35  // NewFromLegacyData creates a new SiaFile from data that was previously loaded
    36  // from a legacy file.
    37  func (sfs *SiaFileSet) NewFromLegacyData(fd FileData) (*SiaFileSetEntry, error) {
    38  	sfs.mu.Lock()
    39  	defer sfs.mu.Unlock()
    40  
    41  	// Legacy master keys are always twofish keys.
    42  	mk, err := crypto.NewSiaKey(crypto.TypeTwofish, fd.MasterKey[:])
    43  	if err != nil {
    44  		return nil, errors.AddContext(err, "failed to restore master key")
    45  	}
    46  	currentTime := time.Now()
    47  	ecType, ecParams := marshalErasureCoder(fd.ErasureCode)
    48  	siaPath, err := modules.NewSiaPath(fd.Name)
    49  	if err != nil {
    50  		return &SiaFileSetEntry{}, err
    51  	}
    52  	zeroHealth := float64(1 + fd.ErasureCode.MinPieces()/(fd.ErasureCode.NumPieces()-fd.ErasureCode.MinPieces()))
    53  	partialsSiaFile, err := sfs.openPartialsSiaFile(fd.ErasureCode, true)
    54  	if err != nil {
    55  		return nil, err
    56  	}
    57  	file := &SiaFile{
    58  		staticMetadata: Metadata{
    59  			AccessTime:              currentTime,
    60  			ChunkOffset:             defaultReservedMDPages * pageSize,
    61  			ChangeTime:              currentTime,
    62  			HasPartialChunk:         false,
    63  			CreateTime:              currentTime,
    64  			CachedHealth:            zeroHealth,
    65  			CachedStuckHealth:       0,
    66  			CachedRedundancy:        0,
    67  			CachedUserRedundancy:    0,
    68  			CachedUploadProgress:    0,
    69  			FileSize:                int64(fd.FileSize),
    70  			LocalPath:               fd.RepairPath,
    71  			StaticMasterKey:         mk.Key(),
    72  			StaticMasterKeyType:     mk.Type(),
    73  			Mode:                    fd.Mode,
    74  			ModTime:                 currentTime,
    75  			staticErasureCode:       fd.ErasureCode,
    76  			StaticErasureCodeType:   ecType,
    77  			StaticErasureCodeParams: ecParams,
    78  			StaticPagesPerChunk:     numChunkPagesRequired(fd.ErasureCode.NumPieces()),
    79  			StaticPieceSize:         fd.PieceSize,
    80  			UniqueID:                SiafileUID(fd.UID),
    81  		},
    82  		deps:            modules.ProdDependencies,
    83  		deleted:         fd.Deleted,
    84  		partialsSiaFile: partialsSiaFile,
    85  		siaFilePath:     siaPath.SiaFileSysPath(sfs.staticSiaFileDir),
    86  		wal:             sfs.wal,
    87  	}
    88  	// Update cached fields for 0-Byte files.
    89  	if file.staticMetadata.FileSize == 0 {
    90  		file.staticMetadata.CachedHealth = 0
    91  		file.staticMetadata.CachedStuckHealth = 0
    92  		file.staticMetadata.CachedRedundancy = float64(fd.ErasureCode.NumPieces()) / float64(fd.ErasureCode.MinPieces())
    93  		file.staticMetadata.CachedUserRedundancy = file.staticMetadata.CachedRedundancy
    94  		file.staticMetadata.CachedUploadProgress = 100
    95  	}
    96  
    97  	// Create the chunks.
    98  	chunks := make([]chunk, len(fd.Chunks))
    99  	for i := range chunks {
   100  		chunks[i].Pieces = make([][]piece, file.staticMetadata.staticErasureCode.NumPieces())
   101  		chunks[i].Index = i
   102  	}
   103  
   104  	// Populate the pubKeyTable of the file and add the pieces.
   105  	pubKeyMap := make(map[string]uint32)
   106  	for chunkIndex, chunk := range fd.Chunks {
   107  		for pieceIndex, pieceSet := range chunk.Pieces {
   108  			for _, p := range pieceSet {
   109  				// Check if we already added that public key.
   110  				tableOffset, exists := pubKeyMap[string(p.HostPubKey.Key)]
   111  				if !exists {
   112  					tableOffset = uint32(len(file.pubKeyTable))
   113  					pubKeyMap[string(p.HostPubKey.Key)] = tableOffset
   114  					file.pubKeyTable = append(file.pubKeyTable, HostPublicKey{
   115  						PublicKey: p.HostPubKey,
   116  						Used:      true,
   117  					})
   118  				}
   119  				// Add the piece to the SiaFile.
   120  				chunks[chunkIndex].Pieces[pieceIndex] = append(chunks[chunkIndex].Pieces[pieceIndex], piece{
   121  					HostTableOffset: tableOffset,
   122  					MerkleRoot:      p.MerkleRoot,
   123  				})
   124  			}
   125  		}
   126  	}
   127  	entry, err := sfs.newSiaFileSetEntry(file)
   128  	if err != nil {
   129  		return nil, err
   130  	}
   131  	threadUID := randomThreadUID()
   132  	entry.threadMap[threadUID] = newThreadInfo()
   133  	sfse := &SiaFileSetEntry{
   134  		siaFileSetEntry: entry,
   135  		threadUID:       threadUID,
   136  	}
   137  
   138  	// Save file to disk.
   139  	if err := file.saveFile(chunks); err != nil {
   140  		return nil, errors.AddContext(err, "unable to save file")
   141  	}
   142  
   143  	// Update the cached fields for progress and uploaded bytes.
   144  	_, _, err = file.UploadProgressAndBytes()
   145  	return sfse, err
   146  }