gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/host/persist_compat_1.2.0.go (about)

     1  package host
     2  
     3  import (
     4  	"encoding/hex"
     5  	"encoding/json"
     6  	"errors"
     7  	"io/ioutil"
     8  	"os"
     9  	"path/filepath"
    10  	"sync"
    11  
    12  	bolt "github.com/coreos/bbolt"
    13  
    14  	"gitlab.com/SiaPrime/SiaPrime/build"
    15  	"gitlab.com/SiaPrime/SiaPrime/crypto"
    16  	"gitlab.com/SiaPrime/SiaPrime/modules"
    17  	"gitlab.com/SiaPrime/SiaPrime/persist"
    18  	"gitlab.com/SiaPrime/SiaPrime/types"
    19  )
    20  
    21  const (
    22  	// contractManagerStorageFolderGranularity is a mirror of the storage
    23  	// folder granularity constant in the contract manager. The two values need
    24  	// to remain equal, however it is unlikely that it will ever change from
    25  	// 64.
    26  	contractManagerStorageFolderGranularity = 64
    27  
    28  	// The directory names and filenames of legacy storage manager files.
    29  	v112StorageManagerDBFilename      = "storagemanager.db"
    30  	v112StorageManagerDir             = "storagemanager"
    31  	v112StorageManagerPersistFilename = "storagemanager.json"
    32  )
    33  
    34  var (
    35  	// minimumStorageFolderSize specifies the minimum storage folder size
    36  	// accepted by the new contract manager.
    37  	//
    38  	// NOTE: This number needs to be kept in sync with the actual minimum
    39  	// storage folder size of the contract manager, but it is unlikely that
    40  	// synchronization would be lost.
    41  	minimumStorageFolderSize = contractManagerStorageFolderGranularity * modules.SectorSize
    42  
    43  	// v112PersistMetadata is the header of the v112 host persist file.
    44  	v112PersistMetadata = persist.Metadata{
    45  		Header:  "Sia Host",
    46  		Version: "0.5",
    47  	}
    48  
    49  	// v112StorageManagerBucketSectorUsage is the name of the bucket that
    50  	// contains all of the sector usage information in the v1.0.0 storage
    51  	// manager.
    52  	v112StorageManagerBucketSectorUsage = []byte("BucketSectorUsage")
    53  
    54  	// v112StorageManagerDBMetadata contains the legacy metadata for the v1.0.0
    55  	// storage manager database. The version is v0.6.0, as that is the last
    56  	// time that compatibility was broken with the storage manager persist.
    57  	v112StorageManagerDBMetadata = persist.Metadata{
    58  		Header:  "Sia Storage Manager DB",
    59  		Version: "0.6.0",
    60  	}
    61  
    62  	// v112StorageManagerMetadata contains the legacy metadata for the v1.0.0
    63  	// storage manager persistence. The version is v0.6.0, as that is the last time
    64  	// that compatibility was broken with the storage manager persist.
    65  	v112StorageManagerMetadata = persist.Metadata{
    66  		Header:  "Sia Storage Manager",
    67  		Version: "0.6.0",
    68  	}
    69  )
    70  
    71  type (
    72  	// v112StorageManagerPersist contains the legacy fields necessary to load the
    73  	// v1.0.0 storage manager persistence.
    74  	v112StorageManagerPersist struct {
    75  		SectorSalt     crypto.Hash
    76  		StorageFolders []*v112StorageManagerStorageFolder
    77  	}
    78  
    79  	// v112StorageManagerSector defines a sector held by the v1.0.0 storage
    80  	// manager, which includes the data itself as well as all of the associated
    81  	// metadata.
    82  	v112StorageManagerSector struct {
    83  		Count int
    84  		Data  []byte
    85  		Key   []byte
    86  		Root  crypto.Hash
    87  	}
    88  
    89  	// v112StorageManagerSectorUsage defines the sectorUsage struct for the
    90  	// v1.0.0 storage manager, the data loaded from the sector database.
    91  	v112StorageManagerSectorUsage struct {
    92  		Corrupted     bool
    93  		Expiry        []types.BlockHeight
    94  		StorageFolder []byte
    95  	}
    96  
    97  	// v112StorageManagerStorageFolder contains the legacy fields necessary to load
    98  	// the v1.0.0 storage manager persistence.
    99  	v112StorageManagerStorageFolder struct {
   100  		Path          string
   101  		Size          uint64
   102  		SizeRemaining uint64
   103  		UID           []byte
   104  	}
   105  )
   106  
   107  // loadCompatV100 loads fields that have changed names or otherwise broken
   108  // compatibility with previous versions, enabling users to upgrade without
   109  // unexpected loss of data.
   110  //
   111  // COMPAT v1.0.0
   112  //
   113  // A spelling error in pre-1.0 versions means that, if this is the first time
   114  // running after an upgrade, the misspelled field needs to be transferred over.
   115  func (h *Host) loadCompatV100(p *persistence) error {
   116  	var compatPersistence struct {
   117  		FinancialMetrics struct {
   118  			PotentialStorageRevenue types.Currency `json:"potentialerevenue"`
   119  		}
   120  		Settings struct {
   121  			MinContractPrice          types.Currency `json:"contractprice"`
   122  			MinDownloadBandwidthPrice types.Currency `json:"minimumdownloadbandwidthprice"`
   123  			MinStoragePrice           types.Currency `json:"storageprice"`
   124  			MinUploadBandwidthPrice   types.Currency `json:"minimumuploadbandwidthprice"`
   125  		}
   126  	}
   127  	err := h.dependencies.LoadFile(v112PersistMetadata, &compatPersistence, filepath.Join(h.persistDir, settingsFile))
   128  	if err != nil {
   129  		return err
   130  	}
   131  	// Load the compat values, but only if the compat values are non-zero and
   132  	// the real values are zero.
   133  	if !compatPersistence.FinancialMetrics.PotentialStorageRevenue.IsZero() && p.FinancialMetrics.PotentialStorageRevenue.IsZero() {
   134  		h.financialMetrics.PotentialStorageRevenue = compatPersistence.FinancialMetrics.PotentialStorageRevenue
   135  	}
   136  	if !compatPersistence.Settings.MinContractPrice.IsZero() && p.Settings.MinContractPrice.IsZero() {
   137  		h.settings.MinContractPrice = compatPersistence.Settings.MinContractPrice
   138  	}
   139  	if !compatPersistence.Settings.MinDownloadBandwidthPrice.IsZero() && p.Settings.MinDownloadBandwidthPrice.IsZero() {
   140  		h.settings.MinDownloadBandwidthPrice = compatPersistence.Settings.MinDownloadBandwidthPrice
   141  	}
   142  	if !compatPersistence.Settings.MinStoragePrice.IsZero() && p.Settings.MinStoragePrice.IsZero() {
   143  		h.settings.MinStoragePrice = compatPersistence.Settings.MinStoragePrice
   144  	}
   145  	if !compatPersistence.Settings.MinUploadBandwidthPrice.IsZero() && p.Settings.MinUploadBandwidthPrice.IsZero() {
   146  		h.settings.MinUploadBandwidthPrice = compatPersistence.Settings.MinUploadBandwidthPrice
   147  	}
   148  	return nil
   149  }
   150  
   151  // readAndDeleteV112Sectors reads some sectors from the v1.0.0 storage
   152  // manager, deleting them from disk and returning. This clears up disk space
   153  // for the new contract manager, though puts the data at risk of loss in the
   154  // event of a power interruption. Risk window is small, amount of data at risk
   155  // is small, so this is acceptable.
   156  func (h *Host) readAndDeleteV112Sectors(oldPersist *v112StorageManagerPersist, oldDB *persist.BoltDatabase, numToFetch int) (sectors []v112StorageManagerSector, err error) {
   157  	err = oldDB.Update(func(tx *bolt.Tx) error {
   158  		// Read at most contractManagerStorageFolderGranularity sectors per
   159  		// storage folder.
   160  		sectorsPerStorageFolder := make(map[string]int)
   161  
   162  		bucket := tx.Bucket(v112StorageManagerBucketSectorUsage)
   163  		i := 0
   164  		c := bucket.Cursor()
   165  		for sectorKey, sectorUsageBytes := c.First(); sectorUsageBytes != nil && i < numToFetch; sectorKey, sectorUsageBytes = c.Next() {
   166  			var usage v112StorageManagerSectorUsage
   167  			err := json.Unmarshal(sectorUsageBytes, &usage)
   168  			if err != nil {
   169  				continue
   170  			}
   171  
   172  			// Don't read more than contractManagerStorageFolderGranularity
   173  			// sectors per storage folder.
   174  			readSoFar := sectorsPerStorageFolder[string(usage.StorageFolder)]
   175  			if readSoFar >= contractManagerStorageFolderGranularity {
   176  				continue
   177  			}
   178  			sectorsPerStorageFolder[string(usage.StorageFolder)]++
   179  
   180  			// Read the sector from disk.
   181  			sectorFilename := filepath.Join(h.persistDir, v112StorageManagerDir, hex.EncodeToString(usage.StorageFolder), string(sectorKey))
   182  			sectorData, err := ioutil.ReadFile(sectorFilename)
   183  			if err != nil {
   184  				h.log.Println("Unable to read a sector from the legacy storage manager during host upgrade:", err)
   185  			}
   186  
   187  			// Delete the sector from disk.
   188  			err = os.Remove(sectorFilename)
   189  			if err != nil {
   190  				h.log.Println("unable to remove sector from the legacy storage manager, be sure to remove manually:", err)
   191  			}
   192  
   193  			sector := v112StorageManagerSector{
   194  				Count: len(usage.Expiry),
   195  				Data:  sectorData,
   196  				Key:   sectorKey,
   197  				Root:  crypto.MerkleRoot(sectorData),
   198  			}
   199  			sectors = append(sectors, sector)
   200  			i++
   201  		}
   202  
   203  		// Delete the usage data from the storage manager db for each of the
   204  		// sectors.
   205  		for _, sector := range sectors {
   206  			err := bucket.Delete(sector.Key)
   207  			if err != nil {
   208  				h.log.Println("Unable to delete a sector from the bucket, the sector could not be found:", err)
   209  			}
   210  		}
   211  		return nil
   212  	})
   213  	return sectors, err
   214  }
   215  
   216  // upgradeFromV112toV120 is an upgrade layer that migrates the host from
   217  // the old storage manager to the new contract manager. This particular upgrade
   218  // only handles migrating the sectors.
   219  func (h *Host) upgradeFromV112ToV120() error {
   220  	h.log.Println("Attempting an upgrade for the host from v1.0.0 to v1.2.0")
   221  
   222  	// Sanity check - the upgrade will not work if the contract manager has not
   223  	// been loaded yet.
   224  	if h.StorageManager == nil {
   225  		return errors.New("cannot perform host upgrade - the contract manager must not be nil")
   226  	}
   227  
   228  	// Fetch the old set of storage folders, and create analogous storage
   229  	// folders in the contract manager. But create them to have sizes of zero,
   230  	// and grow them 112 sectors at a time. This is to make sure the user does
   231  	// not run out of disk space during the upgrade.
   232  	oldPersist := new(v112StorageManagerPersist)
   233  	err := persist.LoadJSON(v112StorageManagerMetadata, oldPersist, filepath.Join(h.persistDir, v112StorageManagerDir, v112StorageManagerPersistFilename))
   234  	if err != nil {
   235  		return build.ExtendErr("unable to load the legacy storage manager persist", err)
   236  	}
   237  
   238  	// Open the old storagemanager database.
   239  	oldDB, err := persist.OpenDatabase(v112StorageManagerDBMetadata, filepath.Join(h.persistDir, v112StorageManagerDir, v112StorageManagerDBFilename))
   240  	if err != nil {
   241  		return build.ExtendErr("unable to open the legacy storage manager database", err)
   242  	}
   243  
   244  	// Create a map from old storage folders to their capacity.
   245  	smFolderCapacities := make(map[string]uint64)
   246  	for _, smFolder := range oldPersist.StorageFolders {
   247  		smFolderCapacities[smFolder.Path] = smFolder.Size
   248  	}
   249  
   250  	// Fetch the set of storage folders already in the current contract
   251  	// manager. When replacing existing storage folders in the storage manager,
   252  	// duplicates will be avoided. Duplicates would otherwise be likely in the
   253  	// event of a power outage during the upgrade.
   254  	currentPaths := make(map[string]struct{})
   255  	currentStorageFolders := h.StorageFolders()
   256  	for _, sf := range currentStorageFolders {
   257  		currentPaths[sf.Path] = struct{}{}
   258  	}
   259  
   260  	// Count the number of storage folders that need to be created in the
   261  	// contract manager.
   262  	var newFoldersNeeded int
   263  	for _, sf := range oldPersist.StorageFolders {
   264  		_, exists := currentPaths[sf.Path]
   265  		if !exists {
   266  			newFoldersNeeded++
   267  		}
   268  	}
   269  
   270  	// Pre-emptively read some sectors from the storage manager. This will
   271  	// clear up space on disk to make room for the contract manager folders.
   272  	//
   273  	// NOTE: The sectorData returned for the sectors may be 'nil' if there
   274  	// were disk I/O errors.
   275  	sectors, err := h.readAndDeleteV112Sectors(oldPersist, oldDB, contractManagerStorageFolderGranularity*newFoldersNeeded)
   276  	if err != nil {
   277  		h.log.Println("Error reading sectors from legacy storage manager:", err)
   278  	}
   279  
   280  	// Iterate through each storage folder and create analogous storage folders
   281  	// in the new contract manager. These storage folders may already exist in
   282  	// the new contract manager.
   283  	for _, sf := range oldPersist.StorageFolders {
   284  		// Nothing to do if the contract manager already has this storage
   285  		// folder (unusually situation though).
   286  		_, exists := currentPaths[sf.Path]
   287  		if exists {
   288  			continue
   289  		}
   290  
   291  		// Create a storage folder in the contract manager for the
   292  		// corresponding storage folder in the storage manager.
   293  		err := h.AddStorageFolder(sf.Path, minimumStorageFolderSize)
   294  		if err != nil {
   295  			h.log.Println("Unable to create a storage folder in the contract manager:", err)
   296  			continue
   297  		}
   298  	}
   299  
   300  	// Add all of the preloaded sectors to the contract manager.
   301  	var wg sync.WaitGroup
   302  	for _, sector := range sectors {
   303  		for i := 0; i < sector.Count; i++ {
   304  			if uint64(len(sector.Data)) == modules.SectorSize {
   305  				wg.Add(1)
   306  				go func(sector v112StorageManagerSector) {
   307  					err := h.AddSector(sector.Root, sector.Data)
   308  					if err != nil {
   309  						err = build.ExtendErr("Unable to add legacy sector to the upgraded contract manager:", err)
   310  						h.log.Println(err)
   311  					}
   312  					wg.Done()
   313  				}(sector)
   314  			}
   315  		}
   316  	}
   317  	wg.Wait()
   318  
   319  	// Read sectors from the storage manager database until all of the sectors
   320  	// have been read.
   321  	for {
   322  		// Determine whether any of the storage folders need to be grown.
   323  		var canGrow int
   324  		cmFolders := h.StorageFolders()
   325  		for _, cmFolder := range cmFolders {
   326  			finalCapacity := smFolderCapacities[cmFolder.Path]
   327  			if cmFolder.Capacity < finalCapacity-(modules.SectorSize*contractManagerStorageFolderGranularity) {
   328  				canGrow++
   329  			}
   330  		}
   331  
   332  		// Read some sectors from the storage manager.
   333  		//
   334  		// NOTE: The sectorData returned for the sectors may be 'nil' if there
   335  		// were disk I/O errors.
   336  		sectors, err := h.readAndDeleteV112Sectors(oldPersist, oldDB, contractManagerStorageFolderGranularity*canGrow)
   337  		if err != nil {
   338  			h.log.Println("Error reading sectors from legacy storage manager:", err)
   339  			continue
   340  		}
   341  		// Break condition - if no sectors were read, the migration is
   342  		// complete.
   343  		if len(sectors) == 0 {
   344  			break
   345  		}
   346  
   347  		// Grow the storage folders that are able to be grown.
   348  		for _, cmFolder := range cmFolders {
   349  			finalCapacity := smFolderCapacities[cmFolder.Path]
   350  			if cmFolder.Capacity < finalCapacity-(modules.SectorSize*contractManagerStorageFolderGranularity) {
   351  				err := h.ResizeStorageFolder(cmFolder.Index, cmFolder.Capacity+(modules.SectorSize*contractManagerStorageFolderGranularity), false)
   352  				if err != nil {
   353  					err = build.ExtendErr("unable to resize storage folder during host upgrade:", err)
   354  					h.log.Println(err)
   355  					continue
   356  				}
   357  			}
   358  		}
   359  
   360  		// Add the sectors to the contract manager.
   361  		var wg sync.WaitGroup
   362  		for _, sector := range sectors {
   363  			for i := 0; i < sector.Count; i++ {
   364  				if uint64(len(sector.Data)) == modules.SectorSize {
   365  					wg.Add(1)
   366  					go func(sector v112StorageManagerSector) {
   367  						err := h.AddSector(sector.Root, sector.Data)
   368  						if err != nil {
   369  							err = build.ExtendErr("Unable to add legacy sector to the upgraded contract manager:", err)
   370  							h.log.Println(err)
   371  						}
   372  						wg.Done()
   373  					}(sector)
   374  				}
   375  			}
   376  		}
   377  		wg.Wait()
   378  	}
   379  
   380  	// Save the desired storage folder sizes before closing out the old persist.
   381  	cmFolders := h.StorageFolders()
   382  
   383  	// Clean up up the old storage manager before growing the storage folders.
   384  	// An interruption during the growing phase should result in storage folders
   385  	// that are whatever size they were left off at.
   386  	err = oldDB.Close()
   387  	if err != nil {
   388  		h.log.Println("Unable to close old database during v1.2.0 compat upgrade", err)
   389  	}
   390  	// Try loading the persist again.
   391  	p := new(persistence)
   392  	err = h.dependencies.LoadFile(v112PersistMetadata, p, filepath.Join(h.persistDir, settingsFile))
   393  	if err != nil {
   394  		return build.ExtendErr("upgrade appears complete, but having difficulties reloading host after upgrade", err)
   395  	}
   396  	h.loadPersistObject(p)
   397  	// Apply the v100 compat upgrade in case the host is loading from a
   398  	// version between v1.0.0 and v1.1.2.
   399  	err = h.loadCompatV100(p)
   400  	if err != nil {
   401  		return build.ExtendErr("upgrade appears complete, but having trouble reloading:", err)
   402  	}
   403  	// Save the updated persist so that the upgrade is not triggered again.
   404  	err = h.saveSync()
   405  	if err != nil {
   406  		return build.ExtendErr("upgrade appears complete, but final save has failed (upgrade likely successful", err)
   407  	}
   408  	// Delete the storage manager files. Note that this must happen after the
   409  	// complete upgrade, including a finishing call to saveSync().
   410  	for _, sf := range oldPersist.StorageFolders {
   411  		err = os.Remove(filepath.Join(h.persistDir, v112StorageManagerDir, hex.EncodeToString(sf.UID)))
   412  		if err != nil {
   413  			h.log.Println("Unable to remove legacy contract manager files:", err)
   414  		}
   415  	}
   416  	err = os.Remove(filepath.Join(h.persistDir, v112StorageManagerDir, v112StorageManagerPersistFilename))
   417  	if err != nil {
   418  		h.log.Println("Unable to remove legacy persist files:", err)
   419  	}
   420  	oldDB.Close()
   421  	err = os.Remove(filepath.Join(h.persistDir, v112StorageManagerDir, v112StorageManagerDBFilename))
   422  	if err != nil {
   423  		h.log.Println("Unable to close legacy database:", err)
   424  	}
   425  
   426  	// Resize any remaining folders to their full size.
   427  	for _, cmFolder := range cmFolders {
   428  		finalCapacity := smFolderCapacities[cmFolder.Path]
   429  		finalCapacity -= finalCapacity % (modules.SectorSize * contractManagerStorageFolderGranularity)
   430  		if cmFolder.Capacity < finalCapacity {
   431  			err := h.ResizeStorageFolder(cmFolder.Index, finalCapacity, false)
   432  			if err != nil {
   433  				err = build.ExtendErr("unable to resize storage folder during host upgrade", err)
   434  				h.log.Println(err)
   435  				continue
   436  			}
   437  		}
   438  	}
   439  	return nil
   440  }