gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/renter/persist_compat.go (about)

     1  package renter
     2  
     3  import (
     4  	"compress/gzip"
     5  	"io"
     6  	"io/ioutil"
     7  	"os"
     8  	"path/filepath"
     9  	"strconv"
    10  	"sync"
    11  
    12  	"gitlab.com/NebulousLabs/errors"
    13  
    14  	"gitlab.com/SiaPrime/SiaPrime/build"
    15  	"gitlab.com/SiaPrime/SiaPrime/crypto"
    16  	"gitlab.com/SiaPrime/SiaPrime/encoding"
    17  	"gitlab.com/SiaPrime/SiaPrime/modules"
    18  	"gitlab.com/SiaPrime/SiaPrime/modules/renter/siadir"
    19  	"gitlab.com/SiaPrime/SiaPrime/modules/renter/siafile"
    20  	"gitlab.com/SiaPrime/SiaPrime/persist"
    21  	"gitlab.com/SiaPrime/SiaPrime/types"
    22  )
    23  
    24  // v137Persistence is the persistence struct of a renter that doesn't use the
    25  // new SiaFile format yet.
    26  type v137Persistence struct {
    27  	MaxDownloadSpeed int64
    28  	MaxUploadSpeed   int64
    29  	StreamCacheSize  uint64
    30  	Tracking         map[string]v137TrackedFile
    31  }
    32  
    33  // v137TrackedFile is the tracking information stored about a file on a legacy
    34  // renter.
    35  type v137TrackedFile struct {
    36  	RepairPath string
    37  }
    38  
    39  // The v1.3.7 in-memory file format.
    40  //
    41  // A file is a single file that has been uploaded to the network. Files are
    42  // split into equal-length chunks, which are then erasure-coded into pieces.
    43  // Each piece is separately encrypted, using a key derived from the file's
    44  // master key. The pieces are uploaded to hosts in groups, such that one file
    45  // contract covers many pieces.
    46  type file struct {
    47  	name        string
    48  	size        uint64 // Static - can be accessed without lock.
    49  	contracts   map[types.FileContractID]fileContract
    50  	masterKey   [crypto.EntropySize]byte // Static - can be accessed without lock.
    51  	erasureCode modules.ErasureCoder     // Static - can be accessed without lock.
    52  	pieceSize   uint64                   // Static - can be accessed without lock.
    53  	mode        uint32                   // actually an os.FileMode
    54  	deleted     bool                     // indicates if the file has been deleted.
    55  
    56  	staticUID string // A UID assigned to the file when it gets created.
    57  
    58  	mu sync.RWMutex
    59  }
    60  
    61  // The v1.3.7 in-memory format for a contract used by the v1.3.7 file format.
    62  //
    63  // A fileContract is a contract covering an arbitrary number of file pieces.
    64  // Chunk/Piece metadata is used to split the raw contract data appropriately.
    65  type fileContract struct {
    66  	ID     types.FileContractID
    67  	IP     modules.NetAddress
    68  	Pieces []pieceData
    69  
    70  	WindowStart types.BlockHeight
    71  }
    72  
    73  // The v1.3.7 in-memory format for a piece used by the v1.3.7 file format.
    74  //
    75  // pieceData contains the metadata necessary to request a piece from a
    76  // fetcher.
    77  //
    78  // TODO: Add an 'Unavailable' flag that can be set if the host loses the piece.
    79  // Some TODOs exist in 'repair.go' related to this field.
    80  type pieceData struct {
    81  	Chunk      uint64      // which chunk the piece belongs to
    82  	Piece      uint64      // the index of the piece in the chunk
    83  	MerkleRoot crypto.Hash // the Merkle root of the piece
    84  }
    85  
    86  // numChunks returns the number of chunks that f was split into.
    87  func (f *file) numChunks() uint64 {
    88  	// empty files still need at least one chunk
    89  	if f.size == 0 {
    90  		return 1
    91  	}
    92  	n := f.size / f.staticChunkSize()
    93  	// last chunk will be padded, unless chunkSize divides file evenly.
    94  	if f.size%f.staticChunkSize() != 0 {
    95  		n++
    96  	}
    97  	return n
    98  }
    99  
   100  // staticChunkSize returns the size of one chunk.
   101  func (f *file) staticChunkSize() uint64 {
   102  	return f.pieceSize * uint64(f.erasureCode.MinPieces())
   103  }
   104  
   105  // MarshalSia implements the encoding.SiaMarshaller interface, writing the
   106  // file data to w.
   107  func (f *file) MarshalSia(w io.Writer) error {
   108  	enc := encoding.NewEncoder(w)
   109  
   110  	// encode easy fields
   111  	err := enc.EncodeAll(
   112  		f.name,
   113  		f.size,
   114  		f.masterKey,
   115  		f.pieceSize,
   116  		f.mode,
   117  	)
   118  	if err != nil {
   119  		return err
   120  	}
   121  	// COMPATv0.4.3 - encode the bytesUploaded and chunksUploaded fields
   122  	// TODO: the resulting .sia file may confuse old clients.
   123  	err = enc.EncodeAll(f.pieceSize*f.numChunks()*uint64(f.erasureCode.NumPieces()), f.numChunks())
   124  	if err != nil {
   125  		return err
   126  	}
   127  
   128  	// encode erasureCode
   129  	switch code := f.erasureCode.(type) {
   130  	case *siafile.RSCode:
   131  		err = enc.EncodeAll(
   132  			"Reed-Solomon",
   133  			uint64(code.MinPieces()),
   134  			uint64(code.NumPieces()-code.MinPieces()),
   135  		)
   136  		if err != nil {
   137  			return err
   138  		}
   139  	default:
   140  		if build.DEBUG {
   141  			panic("unknown erasure code")
   142  		}
   143  		return errors.New("unknown erasure code")
   144  	}
   145  	// encode contracts
   146  	if err := enc.Encode(uint64(len(f.contracts))); err != nil {
   147  		return err
   148  	}
   149  	for _, c := range f.contracts {
   150  		if err := enc.Encode(c); err != nil {
   151  			return err
   152  		}
   153  	}
   154  	return nil
   155  }
   156  
   157  // UnmarshalSia implements the encoding.SiaUnmarshaler interface,
   158  // reconstructing a file from the encoded bytes read from r.
   159  func (f *file) UnmarshalSia(r io.Reader) error {
   160  	dec := encoding.NewDecoder(r, 100e6)
   161  
   162  	// COMPATv0.4.3 - decode bytesUploaded and chunksUploaded into dummy vars.
   163  	var bytesUploaded, chunksUploaded uint64
   164  
   165  	// Decode easy fields.
   166  	err := dec.DecodeAll(
   167  		&f.name,
   168  		&f.size,
   169  		&f.masterKey,
   170  		&f.pieceSize,
   171  		&f.mode,
   172  		&bytesUploaded,
   173  		&chunksUploaded,
   174  	)
   175  	if err != nil {
   176  		return err
   177  	}
   178  	f.staticUID = persist.RandomSuffix()
   179  
   180  	// Decode erasure coder.
   181  	var codeType string
   182  	if err := dec.Decode(&codeType); err != nil {
   183  		return err
   184  	}
   185  	switch codeType {
   186  	case "Reed-Solomon":
   187  		var nData, nParity uint64
   188  		err = dec.DecodeAll(
   189  			&nData,
   190  			&nParity,
   191  		)
   192  		if err != nil {
   193  			return err
   194  		}
   195  		rsc, err := siafile.NewRSCode(int(nData), int(nParity))
   196  		if err != nil {
   197  			return err
   198  		}
   199  		f.erasureCode = rsc
   200  	default:
   201  		return errors.New("unrecognized erasure code type: " + codeType)
   202  	}
   203  
   204  	// Decode contracts.
   205  	var nContracts uint64
   206  	if err := dec.Decode(&nContracts); err != nil {
   207  		return err
   208  	}
   209  	f.contracts = make(map[types.FileContractID]fileContract)
   210  	var contract fileContract
   211  	for i := uint64(0); i < nContracts; i++ {
   212  		if err := dec.Decode(&contract); err != nil {
   213  			return err
   214  		}
   215  		f.contracts[contract.ID] = contract
   216  	}
   217  	return nil
   218  }
   219  
   220  // loadSiaFiles walks through the directory searching for siafiles and loading
   221  // them into memory.
   222  func (r *Renter) compatV137ConvertSiaFiles(tracking map[string]v137TrackedFile, oldContracts []modules.RenterContract) error {
   223  	// Recursively convert all files found in renter directory.
   224  	err := filepath.Walk(r.persistDir, func(path string, info os.FileInfo, err error) error {
   225  		// This error is non-nil if filepath.Walk couldn't stat a file or
   226  		// folder.
   227  		if err != nil {
   228  			r.log.Println("WARN: could not stat file or folder during walk:", err)
   229  			return nil
   230  		}
   231  
   232  		// Skip folders and non-sia files.
   233  		if info.IsDir() || filepath.Ext(path) != modules.SiaFileExtension {
   234  			return nil
   235  		}
   236  
   237  		// Check if file was already converted.
   238  		_, err = siafile.LoadSiaFile(path, r.wal)
   239  		if err == nil {
   240  			return nil
   241  		}
   242  
   243  		// Open the file.
   244  		file, err := os.Open(path)
   245  		if err != nil {
   246  			return errors.AddContext(err, "unable to open file for conversion"+path)
   247  		}
   248  
   249  		// Load the file contents into the renter.
   250  		_, err = r.compatV137loadSiaFilesFromReader(file, tracking, oldContracts)
   251  		if err != nil {
   252  			err = errors.AddContext(err, "unable to load v137 siafiles from reader")
   253  			return errors.Compose(err, file.Close())
   254  		}
   255  
   256  		// Close the file and delete it since it was converted.
   257  		if err := file.Close(); err != nil {
   258  			return err
   259  		}
   260  		return os.Remove(path)
   261  	})
   262  	if err != nil {
   263  		return err
   264  	}
   265  	// Cleanup folders in the renter subdir.
   266  	fis, err := ioutil.ReadDir(r.persistDir)
   267  	if err != nil {
   268  		return err
   269  	}
   270  	for _, fi := range fis {
   271  		// Ignore files.
   272  		if !fi.IsDir() {
   273  			continue
   274  		}
   275  		// Skip siafiles and contracts folders.
   276  		if fi.Name() == modules.SiapathRoot || fi.Name() == "contracts" {
   277  			continue
   278  		}
   279  		// Delete the folder.
   280  		if err := os.RemoveAll(filepath.Join(r.persistDir, fi.Name())); err != nil {
   281  			return err
   282  		}
   283  	}
   284  	return nil
   285  }
   286  
   287  // v137FileToSiaFile converts a legacy file to a SiaFile. Fields that can't be
   288  // populated using the legacy file remain blank.
   289  func (r *Renter) v137FileToSiaFile(f *file, repairPath string, oldContracts []modules.RenterContract) (*siafile.SiaFileSetEntry, error) {
   290  	// Create a mapping of contract ids to host keys.
   291  	contracts := r.hostContractor.Contracts()
   292  	idToPk := make(map[types.FileContractID]types.SiaPublicKey)
   293  	for _, c := range contracts {
   294  		idToPk[c.ID] = c.HostPublicKey
   295  	}
   296  	// Add old contracts to the mapping too.
   297  	for _, c := range oldContracts {
   298  		idToPk[c.ID] = c.HostPublicKey
   299  	}
   300  
   301  	fileData := siafile.FileData{
   302  		Name:        f.name,
   303  		FileSize:    f.size,
   304  		MasterKey:   f.masterKey,
   305  		ErasureCode: f.erasureCode,
   306  		RepairPath:  repairPath,
   307  		PieceSize:   f.pieceSize,
   308  		Mode:        os.FileMode(f.mode),
   309  		Deleted:     f.deleted,
   310  		UID:         siafile.SiafileUID(f.staticUID),
   311  	}
   312  	chunks := make([]siafile.FileChunk, f.numChunks())
   313  	for i := 0; i < len(chunks); i++ {
   314  		chunks[i].Pieces = make([][]siafile.Piece, f.erasureCode.NumPieces())
   315  	}
   316  	for _, contract := range f.contracts {
   317  		pk, exists := idToPk[contract.ID]
   318  		if !exists {
   319  			r.log.Printf("Couldn't find pubKey for contract %v with WindowStart %v",
   320  				contract.ID, contract.WindowStart)
   321  			continue
   322  		}
   323  
   324  		for _, piece := range contract.Pieces {
   325  			// Make sure we don't add the same piece on the same host multiple
   326  			// times.
   327  			duplicate := false
   328  			for _, p := range chunks[piece.Chunk].Pieces[piece.Piece] {
   329  				if p.HostPubKey.String() == pk.String() {
   330  					duplicate = true
   331  					break
   332  				}
   333  			}
   334  			if duplicate {
   335  				continue
   336  			}
   337  			chunks[piece.Chunk].Pieces[piece.Piece] = append(chunks[piece.Chunk].Pieces[piece.Piece], siafile.Piece{
   338  				HostPubKey: pk,
   339  				MerkleRoot: piece.MerkleRoot,
   340  			})
   341  		}
   342  	}
   343  	fileData.Chunks = chunks
   344  	return r.staticFileSet.NewFromLegacyData(fileData)
   345  }
   346  
   347  // compatV137LoadSiaFilesFromReader reads .sia data from reader and registers
   348  // the contained files in the renter. It returns the nicknames of the loaded
   349  // files.
   350  func (r *Renter) compatV137loadSiaFilesFromReader(reader io.Reader, tracking map[string]v137TrackedFile, oldContracts []modules.RenterContract) ([]string, error) {
   351  	// read header
   352  	var header [15]byte
   353  	var version string
   354  	var numFiles uint64
   355  	err := encoding.NewDecoder(reader, encoding.DefaultAllocLimit).DecodeAll(
   356  		&header,
   357  		&version,
   358  		&numFiles,
   359  	)
   360  	if err != nil {
   361  		return nil, errors.AddContext(err, "unable to read header")
   362  	} else if header != shareHeader {
   363  		return nil, ErrBadFile
   364  	} else if version != shareVersion {
   365  		return nil, ErrIncompatible
   366  	}
   367  
   368  	// Create decompressor.
   369  	unzip, err := gzip.NewReader(reader)
   370  	if err != nil {
   371  		return nil, errors.AddContext(err, "unable to create gzip decompressor")
   372  	}
   373  	dec := encoding.NewDecoder(unzip, 100e6)
   374  
   375  	// Read each file.
   376  	files := make([]*file, numFiles)
   377  	for i := range files {
   378  		files[i] = new(file)
   379  		err := dec.Decode(files[i])
   380  		if err != nil {
   381  			return nil, errors.AddContext(err, "unable to decode file")
   382  		}
   383  
   384  		// Make sure the file's name does not conflict with existing files.
   385  		dupCount := 0
   386  		origName := files[i].name
   387  		for {
   388  			siaPath, err := modules.NewSiaPath(files[i].name)
   389  			if err != nil {
   390  				return nil, err
   391  			}
   392  			exists := r.staticFileSet.Exists(siaPath)
   393  			if !exists {
   394  				break
   395  			}
   396  			dupCount++
   397  			files[i].name = origName + "_" + strconv.Itoa(dupCount)
   398  		}
   399  	}
   400  
   401  	// Add files to renter.
   402  	names := make([]string, numFiles)
   403  	for i, f := range files {
   404  		// Figure out the repair path.
   405  		var repairPath string
   406  		tf, ok := tracking[f.name]
   407  		if ok {
   408  			repairPath = tf.RepairPath
   409  		}
   410  		// Create and add a siadir to the SiaDirSet if one has not been created
   411  		siaPath, err := modules.NewSiaPath(f.name)
   412  		if err != nil {
   413  			return nil, err
   414  		}
   415  		dirSiaPath, err := siaPath.Dir()
   416  		if err != nil {
   417  			return nil, err
   418  		}
   419  		sd, errDir := r.staticDirSet.NewSiaDir(dirSiaPath)
   420  		if errDir != nil && errDir != siadir.ErrPathOverload {
   421  			errDir = errors.AddContext(errDir, "unable to create new sia dir")
   422  			return nil, errors.Compose(err, errDir)
   423  		}
   424  		if errDir != siadir.ErrPathOverload {
   425  			err = errors.Compose(err, sd.Close())
   426  		}
   427  		// v137FileToSiaFile adds siafile to the SiaFileSet so it does not need to
   428  		// be returned here
   429  		entry, err := r.v137FileToSiaFile(f, repairPath, oldContracts)
   430  		if err != nil {
   431  			return nil, errors.AddContext(err, "unable to transform old file to new file")
   432  		}
   433  		names[i] = f.name
   434  		err = errors.Compose(err, entry.Close())
   435  	}
   436  	return names, err
   437  }
   438  
   439  // convertPersistVersionFrom133To140 upgrades a legacy persist file to the next
   440  // version, converting legacy SiaFiles in the process.
   441  func (r *Renter) convertPersistVersionFrom133To140(path string, oldContracts []modules.RenterContract) error {
   442  	metadata := persist.Metadata{
   443  		Header:  settingsMetadata.Header,
   444  		Version: persistVersion133,
   445  	}
   446  	p := v137Persistence{
   447  		Tracking: make(map[string]v137TrackedFile),
   448  	}
   449  
   450  	err := persist.LoadJSON(metadata, &p, path)
   451  	if err != nil {
   452  		return errors.AddContext(err, "could not load json")
   453  	}
   454  	metadata.Version = persistVersion140
   455  	// Load potential legacy SiaFiles.
   456  	if err := r.compatV137ConvertSiaFiles(p.Tracking, oldContracts); err != nil {
   457  		return errors.AddContext(err, "conversion from v137 failed")
   458  	}
   459  	err = persist.SaveJSON(metadata, p, path)
   460  	if err != nil {
   461  		return errors.AddContext(err, "could not save json")
   462  	}
   463  	return nil
   464  }
   465  
   466  // convertPersistVersionFrom040to133 upgrades a legacy persist file to the next
   467  // version, adding new fields with their default values.
   468  func convertPersistVersionFrom040To133(path string) error {
   469  	metadata := persist.Metadata{
   470  		Header:  settingsMetadata.Header,
   471  		Version: persistVersion040,
   472  	}
   473  	p := persistence{}
   474  
   475  	err := persist.LoadJSON(metadata, &p, path)
   476  	if err != nil {
   477  		return err
   478  	}
   479  	metadata.Version = persistVersion133
   480  	p.MaxDownloadSpeed = DefaultMaxDownloadSpeed
   481  	p.MaxUploadSpeed = DefaultMaxUploadSpeed
   482  	return persist.SaveJSON(metadata, p, path)
   483  }