gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/host/contractmanager/storagefolder.go (about)

     1  package contractmanager
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"math"
     7  	"os"
     8  	"path/filepath"
     9  	"sync/atomic"
    10  	"time"
    11  
    12  	"gitlab.com/NebulousLabs/fastrand"
    13  	"gitlab.com/SiaPrime/SiaPrime/modules"
    14  	"gitlab.com/SiaPrime/SiaPrime/sync"
    15  )
    16  
    17  var (
    18  	// errBadStorageFolderIndex is returned if a storage folder is requested
    19  	// that does not have the correct index.
    20  	errBadStorageFolderIndex = errors.New("no storage folder exists at that index")
    21  
    22  	// errIncompleteOffload is returned when the host is tasked with offloading
    23  	// sectors from a storage folder but is unable to offload the requested
    24  	// number - but is able to offload some of them.
    25  	errIncompleteOffload = errors.New("could not successfully offload specified number of sectors from storage folder")
    26  
    27  	// errInsufficientRemainingStorageForRemoval is returned if the remaining
    28  	// storage folders do not have enough space remaining to support being
    29  	// removed.
    30  	errInsufficientRemainingStorageForRemoval = errors.New("not enough storage remaining to support removal of disk")
    31  
    32  	// errInsufficientRemainingStorageForShrink is returned if the remaining
    33  	// storage folders do not have enough space remaining to support being
    34  	// reduced in size.
    35  	errInsufficientRemainingStorageForShrink = errors.New("not enough storage remaining to support shrinking of disk")
    36  
    37  	// ErrLargeStorageFolder is returned if a new storage folder or a resized
    38  	// storage folder would exceed the maximum allowed size.
    39  	ErrLargeStorageFolder = fmt.Errorf("maximum allowed size for a storage folder is %v bytes", MaximumSectorsPerStorageFolder*modules.SectorSize)
    40  
    41  	// errMaxStorageFolders indicates that the limit on the number of allowed
    42  	// storage folders has been reached.
    43  	errMaxStorageFolders = fmt.Errorf("host can only accept up to %v storage folders", maximumStorageFolders)
    44  
    45  	// errNoFreeSectors is returned if there are no free sectors in the usage
    46  	// array fed to randFreeSector. This error should never be returned, as the
    47  	// contract manager should have sufficient internal consistency to know in
    48  	// advance that there are no free sectors.
    49  	errNoFreeSectors = errors.New("could not find a free sector in the usage array")
    50  
    51  	// ErrNoResize is returned if a new size is provided for a storage folder
    52  	// that is the same as the current size of the storage folder.
    53  	ErrNoResize = errors.New("storage folder selected for resize, but new size is same as current size")
    54  
    55  	// errRelativePath is returned if a path must be absolute.
    56  	errRelativePath = errors.New("storage folder paths must be absolute")
    57  
    58  	// ErrRepeatFolder is returned if a storage folder is added which links to
    59  	// a path that is already in use by another storage folder. Only exact path
    60  	// matches will trigger the error.
    61  	ErrRepeatFolder = errors.New("selected path is already in use as a storage folder, please use 'resize'")
    62  
    63  	// ErrSmallStorageFolder is returned if a new storage folder is not large
    64  	// enough to meet the requirements for the minimum storage folder size.
    65  	ErrSmallStorageFolder = fmt.Errorf("minimum allowed size for a storage folder is %v bytes", MinimumSectorsPerStorageFolder*modules.SectorSize)
    66  
    67  	// errStorageFolderGranularity is returned if a call to AddStorageFolder
    68  	// tries to use a storage folder size that does not evenly fit into a
    69  	// factor of 8 sectors.
    70  	errStorageFolderGranularity = fmt.Errorf("storage folder must be a factor of %v sectors", storageFolderGranularity)
    71  
    72  	// errStorageFolderNotFolder is returned if a storage folder gets added
    73  	// that is not a folder.
    74  	errStorageFolderNotFolder = errors.New("must use an existing folder")
    75  
    76  	// errStorageFolderNotFound is returned if a storage folder cannot be
    77  	// found.
    78  	errStorageFolderNotFound = errors.New("could not find storage folder with that id")
    79  )
    80  
    81  // storageFolder contains the metadata for a storage folder, including where
    82  // sectors are being stored in the folder. What sectors are being stored is
    83  // managed by the contract manager's sectorLocations map.
    84  type storageFolder struct {
    85  	// mu needs to be RLocked to safetly write new sectors into the storage
    86  	// folder. mu needs to be Locked when the folder is being added, removed,
    87  	// or resized.
    88  	//
    89  	// NOTE: this field must come first in the struct to ensure proper
    90  	// alignment.
    91  	mu sync.TryRWMutex
    92  
    93  	// Progress statistics that can be reported to the user. Typically for long
    94  	// running actions like adding or resizing a storage folder.
    95  	atomicProgressNumerator   uint64
    96  	atomicProgressDenominator uint64
    97  
    98  	// Disk statistics for this boot cycle.
    99  	atomicFailedReads      uint64
   100  	atomicFailedWrites     uint64
   101  	atomicSuccessfulReads  uint64
   102  	atomicSuccessfulWrites uint64
   103  
   104  	// Atomic bool indicating whether or not the storage folder is available. If
   105  	// the storage folder is not available, it will still be loaded but return
   106  	// an error if it is queried.
   107  	atomicUnavailable uint64 // uint64 for alignment
   108  
   109  	// The index, path, and usage are all saved directly to disk.
   110  	index uint16
   111  	path  string
   112  	usage []uint64
   113  
   114  	// availableSectors indicates sectors which are marked as consumed in the
   115  	// usage field but are actually available. They cannot be marked as free in
   116  	// the usage until the action which freed them has synced to disk, but the
   117  	// settings should mark them as free during syncing.
   118  	//
   119  	// sectors is a count of the number of sectors in use according to the
   120  	// usage field.
   121  	availableSectors map[sectorID]uint32
   122  	sectors          uint64
   123  
   124  	// An open file handle is kept so that writes can easily be made to the
   125  	// storage folder without needing to grab a new file handle. This also
   126  	// makes it easy to do delayed-syncing.
   127  	metadataFile modules.File
   128  	sectorFile   modules.File
   129  }
   130  
   131  // mostSignificantBit returns the index of the most significant bit of an input
   132  // value.
   133  func mostSignificantBit(i uint64) uint64 {
   134  	if i == 0 {
   135  		panic("no bits set in input")
   136  	}
   137  
   138  	bval := []uint64{0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}
   139  	r := uint64(0)
   140  	if i&0xffffffff00000000 != 0 {
   141  		r += 32
   142  		i = i >> 32
   143  	}
   144  	if i&0x00000000ffff0000 != 0 {
   145  		r += 16
   146  		i = i >> 16
   147  	}
   148  	if i&0x000000000000ff00 != 0 {
   149  		r += 8
   150  		i = i >> 8
   151  	}
   152  	if i&0x00000000000000f0 != 0 {
   153  		r += 4
   154  		i = i >> 4
   155  	}
   156  	return r + bval[i]
   157  }
   158  
   159  // randFreeSector will take a usage array and find a random free sector within
   160  // the usage array. The uint32 indicates the index of the sector within the
   161  // usage array.
   162  func randFreeSector(usage []uint64) (uint32, error) {
   163  	// Pick a random starting location. Scanning the sector in a short amount
   164  	// of time requires starting from a random place.
   165  	start := fastrand.Intn(len(usage))
   166  
   167  	// Find the first element of the array that is not completely full.
   168  	var i int
   169  	for i = start; i < len(usage); i++ {
   170  		if usage[i] != math.MaxUint64 {
   171  			break
   172  		}
   173  	}
   174  	// If nothing was found by the end of the array, a wraparound is needed.
   175  	if i == len(usage) {
   176  		for i = 0; i < start; i++ {
   177  			if usage[i] != math.MaxUint64 {
   178  				break
   179  			}
   180  		}
   181  		// Return an error if no empty sectors were found.
   182  		if i == start {
   183  			return 0, errNoFreeSectors
   184  		}
   185  	}
   186  
   187  	// Get the most significant zero. This is achieved by performing a 'most
   188  	// significant bit' on the XOR of the actual value. Return the index of the
   189  	// sector that has been selected.
   190  	msz := mostSignificantBit(^usage[i])
   191  	return uint32((uint64(i) * 64) + msz), nil
   192  }
   193  
   194  // usageSectors takes a storage folder usage array and returns a list of active
   195  // sectors in that usage array by their index.
   196  func usageSectors(usage []uint64) (usageSectors []uint32) {
   197  	// Iterate through the usage elements.
   198  	for i, u := range usage {
   199  		// Each usage element corresponds to storageFolderGranularity sectors.
   200  		// Iterate through them and append the ones that are present.
   201  		for j := uint64(0); j < storageFolderGranularity; j++ {
   202  			uMask := uint64(1) << j
   203  			if u&uMask == uMask {
   204  				usageSectors = append(usageSectors, uint32(i)*storageFolderGranularity+uint32(j))
   205  			}
   206  		}
   207  	}
   208  	return usageSectors
   209  }
   210  
   211  // vacancyStorageFolder takes a set of storage folders and returns a storage
   212  // folder with vacancy for a sector along with its index. 'nil' and '-1' are
   213  // returned if none of the storage folders are available to accept a sector.
   214  // The returned storage folder will be holding an RLock on its mutex.
   215  func vacancyStorageFolder(sfs []*storageFolder) (*storageFolder, int) {
   216  	enoughRoom := false
   217  	var winningIndex int
   218  
   219  	// Go through the folders in random order.
   220  	for _, index := range fastrand.Perm(len(sfs)) {
   221  		sf := sfs[index]
   222  
   223  		// Skip past this storage folder if there is not enough room for at
   224  		// least one sector.
   225  		if sf.sectors >= uint64(len(sf.usage))*storageFolderGranularity {
   226  			continue
   227  		}
   228  
   229  		// Skip past this storage folder if it's not available to receive new
   230  		// data.
   231  		if !sf.mu.TryRLock() {
   232  			continue
   233  		}
   234  
   235  		// Select this storage folder.
   236  		enoughRoom = true
   237  		winningIndex = index
   238  		break
   239  	}
   240  	if !enoughRoom {
   241  		return nil, -1
   242  	}
   243  	return sfs[winningIndex], winningIndex
   244  }
   245  
   246  // clearUsage will unset the usage bit at the provided sector index for this
   247  // storage folder.
   248  func (sf *storageFolder) clearUsage(sectorIndex uint32) {
   249  	usageElement := sf.usage[sectorIndex/storageFolderGranularity]
   250  	bitIndex := sectorIndex % storageFolderGranularity
   251  	usageElementUpdated := usageElement & (^(1 << bitIndex))
   252  	if usageElementUpdated != usageElement {
   253  		sf.sectors--
   254  		sf.usage[sectorIndex/storageFolderGranularity] = usageElementUpdated
   255  	}
   256  }
   257  
   258  // setUsage will set the usage bit at the provided sector index for this
   259  // storage folder.
   260  func (sf *storageFolder) setUsage(sectorIndex uint32) {
   261  	usageElement := sf.usage[sectorIndex/storageFolderGranularity]
   262  	bitIndex := sectorIndex % storageFolderGranularity
   263  	usageElementUpdated := usageElement | (1 << bitIndex)
   264  	if usageElementUpdated != usageElement {
   265  		sf.sectors++
   266  		sf.usage[sectorIndex/storageFolderGranularity] = usageElementUpdated
   267  	}
   268  }
   269  
   270  // availableStorageFolders returns the contract manager's storage folders as a
   271  // slice, excluding any unavailable storeage folders.
   272  func (cm *ContractManager) availableStorageFolders() []*storageFolder {
   273  	sfs := make([]*storageFolder, 0)
   274  	for _, sf := range cm.storageFolders {
   275  		// Skip unavailable storage folders.
   276  		if atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
   277  			continue
   278  		}
   279  		sfs = append(sfs, sf)
   280  	}
   281  	return sfs
   282  }
   283  
   284  // threadedFolderRecheck checks the unavailable storage folders and looks to see
   285  // if they have been mounted or restored by the user.
   286  func (cm *ContractManager) threadedFolderRecheck() {
   287  	// Don't spawn the loop if 'noRecheck' disruption is set.
   288  	if cm.dependencies.Disrupt("noRecheck") {
   289  		return
   290  	}
   291  
   292  	sleepTime := folderRecheckInitialInterval
   293  	for {
   294  		// Check for shutdown.
   295  		select {
   296  		case <-cm.tg.StopChan():
   297  			return
   298  		case <-time.After(sleepTime):
   299  		}
   300  
   301  		// Check all of the storage folders and recover any that have been added
   302  		// to the contract manager.
   303  		cm.wal.mu.Lock()
   304  		for _, sf := range cm.storageFolders {
   305  			if atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
   306  				var err1, err2 error
   307  				sf.metadataFile, err1 = cm.dependencies.OpenFile(filepath.Join(sf.path, metadataFile), os.O_RDWR, 0700)
   308  				sf.sectorFile, err2 = cm.dependencies.OpenFile(filepath.Join(sf.path, sectorFile), os.O_RDWR, 0700)
   309  				if err1 == nil && err2 == nil {
   310  					// The storage folder has been found, and loading can be
   311  					// completed.
   312  					cm.loadSectorLocations(sf)
   313  				} else {
   314  					// One of the opens failed, close the file handle for the
   315  					// opens that did not fail.
   316  					if err1 == nil {
   317  						sf.metadataFile.Close()
   318  					}
   319  					if err2 == nil {
   320  						sf.sectorFile.Close()
   321  					}
   322  				}
   323  			}
   324  		}
   325  		cm.wal.mu.Unlock()
   326  
   327  		// Increase the sleep time.
   328  		if sleepTime*2 < maxFolderRecheckInterval {
   329  			sleepTime *= 2
   330  		}
   331  	}
   332  }
   333  
   334  // ResetStorageFolderHealth will reset the read and write statistics for the
   335  // input storage folder.
   336  func (cm *ContractManager) ResetStorageFolderHealth(index uint16) error {
   337  	err := cm.tg.Add()
   338  	if err != nil {
   339  		return err
   340  	}
   341  	defer cm.tg.Done()
   342  	cm.wal.mu.Lock()
   343  	defer cm.wal.mu.Unlock()
   344  
   345  	sf, exists := cm.storageFolders[index]
   346  	if !exists {
   347  		return errStorageFolderNotFound
   348  	}
   349  	atomic.StoreUint64(&sf.atomicFailedReads, 0)
   350  	atomic.StoreUint64(&sf.atomicFailedWrites, 0)
   351  	atomic.StoreUint64(&sf.atomicSuccessfulReads, 0)
   352  	atomic.StoreUint64(&sf.atomicSuccessfulWrites, 0)
   353  	return nil
   354  }
   355  
   356  // ResizeStorageFolder will resize a storage folder, moving sectors as
   357  // necessary. The resize operation will stop and return an error if any of the
   358  // sector move operations fail. If the force flag is set to true, the resize
   359  // operation will continue through failures, meaning that data will be lost.
   360  func (cm *ContractManager) ResizeStorageFolder(index uint16, newSize uint64, force bool) error {
   361  	err := cm.tg.Add()
   362  	if err != nil {
   363  		return err
   364  	}
   365  	defer cm.tg.Done()
   366  
   367  	cm.wal.mu.Lock()
   368  	sf, exists := cm.storageFolders[index]
   369  	cm.wal.mu.Unlock()
   370  	if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
   371  		return errStorageFolderNotFound
   372  	}
   373  
   374  	if newSize/modules.SectorSize < MinimumSectorsPerStorageFolder {
   375  		return ErrSmallStorageFolder
   376  	}
   377  	if newSize/modules.SectorSize > MaximumSectorsPerStorageFolder {
   378  		return ErrLargeStorageFolder
   379  	}
   380  
   381  	oldSize := uint64(len(sf.usage)) * storageFolderGranularity * modules.SectorSize
   382  	if oldSize == newSize {
   383  		return ErrNoResize
   384  	}
   385  	newSectorCount := uint32(newSize / modules.SectorSize)
   386  	if oldSize > newSize {
   387  		return cm.wal.shrinkStorageFolder(index, newSectorCount, force)
   388  	}
   389  	return cm.wal.growStorageFolder(index, newSectorCount)
   390  }
   391  
   392  // StorageFolders will return a list of storage folders in the host, each
   393  // containing information about the storage folder and any operations currently
   394  // being executed on the storage folder.
   395  func (cm *ContractManager) StorageFolders() []modules.StorageFolderMetadata {
   396  	err := cm.tg.Add()
   397  	if err != nil {
   398  		return nil
   399  	}
   400  	defer cm.tg.Done()
   401  	cm.wal.mu.Lock()
   402  	defer cm.wal.mu.Unlock()
   403  
   404  	// Iterate over the storage folders that are in memory first, and then
   405  	// suppliment them with the storage folders that are not in memory.
   406  	var smfs []modules.StorageFolderMetadata
   407  	for _, sf := range cm.storageFolders {
   408  		// Grab the non-computational data.
   409  		sfm := modules.StorageFolderMetadata{
   410  			ProgressNumerator:   atomic.LoadUint64(&sf.atomicProgressNumerator),
   411  			ProgressDenominator: atomic.LoadUint64(&sf.atomicProgressDenominator),
   412  
   413  			FailedReads:      atomic.LoadUint64(&sf.atomicFailedReads),
   414  			FailedWrites:     atomic.LoadUint64(&sf.atomicFailedWrites),
   415  			SuccessfulReads:  atomic.LoadUint64(&sf.atomicSuccessfulReads),
   416  			SuccessfulWrites: atomic.LoadUint64(&sf.atomicSuccessfulWrites),
   417  
   418  			Capacity:          modules.SectorSize * 64 * uint64(len(sf.usage)),
   419  			CapacityRemaining: ((64 * uint64(len(sf.usage))) - sf.sectors) * modules.SectorSize,
   420  			Index:             sf.index,
   421  			Path:              sf.path,
   422  		}
   423  
   424  		// Set some of the values to extreme numbers if the storage folder is
   425  		// unavailable, to flag the user's attention.
   426  		if atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
   427  			sfm.FailedReads = 9999999999
   428  			sfm.FailedWrites = 9999999999
   429  		}
   430  
   431  		// Add this storage folder to the list of storage folders.
   432  		smfs = append(smfs, sfm)
   433  	}
   434  	return smfs
   435  }