gitlab.com/jokerrs1/Sia@v1.3.2/modules/host/contractmanager/storagefolder.go (about)

     1  package contractmanager
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"math"
     7  	"os"
     8  	"path/filepath"
     9  	"sync/atomic"
    10  	"time"
    11  
    12  	"github.com/NebulousLabs/Sia/modules"
    13  	"github.com/NebulousLabs/Sia/sync"
    14  	"github.com/NebulousLabs/fastrand"
    15  )
    16  
    17  var (
    18  	// errBadStorageFolderIndex is returned if a storage folder is requested
    19  	// that does not have the correct index.
    20  	errBadStorageFolderIndex = errors.New("no storage folder exists at that index")
    21  
    22  	// errIncompleteOffload is returned when the host is tasked with offloading
    23  	// sectors from a storage folder but is unable to offload the requested
    24  	// number - but is able to offload some of them.
    25  	errIncompleteOffload = errors.New("could not successfully offload specified number of sectors from storage folder")
    26  
    27  	// errInsufficientRemainingStorageForRemoval is returned if the remaining
    28  	// storage folders do not have enough space remaining to support being
    29  	// removed.
    30  	errInsufficientRemainingStorageForRemoval = errors.New("not enough storage remaining to support removal of disk")
    31  
    32  	// errInsufficientRemainingStorageForShrink is returned if the remaining
    33  	// storage folders do not have enough space remaining to support being
    34  	// reduced in size.
    35  	errInsufficientRemainingStorageForShrink = errors.New("not enough storage remaining to support shrinking of disk")
    36  
    37  	// ErrLargeStorageFolder is returned if a new storage folder or a resized
    38  	// storage folder would exceed the maximum allowed size.
    39  	ErrLargeStorageFolder = fmt.Errorf("maximum allowed size for a storage folder is %v bytes", MaximumSectorsPerStorageFolder*modules.SectorSize)
    40  
    41  	// errMaxStorageFolders indicates that the limit on the number of allowed
    42  	// storage folders has been reached.
    43  	errMaxStorageFolders = fmt.Errorf("host can only accept up to %v storage folders", maximumStorageFolders)
    44  
    45  	// errNoFreeSectors is returned if there are no free sectors in the usage
    46  	// array fed to randFreeSector. This error should never be returned, as the
    47  	// contract manager should have sufficient internal consistency to know in
    48  	// advance that there are no free sectors.
    49  	errNoFreeSectors = errors.New("could not find a free sector in the usage array")
    50  
    51  	// ErrNoResize is returned if a new size is provided for a storage folder
    52  	// that is the same as the current size of the storage folder.
    53  	ErrNoResize = errors.New("storage folder selected for resize, but new size is same as current size")
    54  
    55  	// errRelativePath is returned if a path must be absolute.
    56  	errRelativePath = errors.New("storage folder paths must be absolute")
    57  
    58  	// ErrRepeatFolder is returned if a storage folder is added which links to
    59  	// a path that is already in use by another storage folder. Only exact path
    60  	// matches will trigger the error.
    61  	ErrRepeatFolder = errors.New("selected path is already in use as a storage folder, please use 'resize'")
    62  
    63  	// ErrSmallStorageFolder is returned if a new storage folder is not large
    64  	// enough to meet the requirements for the minimum storage folder size.
    65  	ErrSmallStorageFolder = fmt.Errorf("minimum allowed size for a storage folder is %v bytes", MinimumSectorsPerStorageFolder*modules.SectorSize)
    66  
    67  	// errStorageFolderGranularity is returned if a call to AddStorageFolder
    68  	// tries to use a storage folder size that does not evenly fit into a
    69  	// factor of 8 sectors.
    70  	errStorageFolderGranularity = fmt.Errorf("storage folder must be a factor of %v sectors", storageFolderGranularity)
    71  
    72  	// errStorageFolderNotFolder is returned if a storage folder gets added
    73  	// that is not a folder.
    74  	errStorageFolderNotFolder = errors.New("must use an existing folder")
    75  
    76  	// errStorageFolderNotFound is returned if a storage folder cannot be
    77  	// found.
    78  	errStorageFolderNotFound = errors.New("could not find storage folder with that id")
    79  )
    80  
    81  // storageFolder contains the metadata for a storage folder, including where
    82  // sectors are being stored in the folder. What sectors are being stored is
    83  // managed by the contract manager's sectorLocations map.
    84  type storageFolder struct {
    85  	// Progress statistics that can be reported to the user. Typically for long
    86  	// running actions like adding or resizing a storage folder.
    87  	atomicProgressNumerator   uint64
    88  	atomicProgressDenominator uint64
    89  
    90  	// Disk statistics for this boot cycle.
    91  	atomicFailedReads      uint64
    92  	atomicFailedWrites     uint64
    93  	atomicSuccessfulReads  uint64
    94  	atomicSuccessfulWrites uint64
    95  
    96  	// Atomic bool indicating whether or not the storage folder is available. If
    97  	// the storage folder is not available, it will still be loaded but return
    98  	// an error if it is queried.
    99  	atomicUnavailable uint64 // uint64 for alignment
   100  
   101  	// The index, path, and usage are all saved directly to disk.
   102  	index uint16
   103  	path  string
   104  	usage []uint64
   105  
   106  	// availableSectors indicates sectors which are marked as consumed in the
   107  	// usage field but are actually available. They cannot be marked as free in
   108  	// the usage until the action which freed them has synced to disk, but the
   109  	// settings should mark them as free during syncing.
   110  	//
   111  	// sectors is a count of the number of sectors in use according to the
   112  	// usage field.
   113  	availableSectors map[sectorID]uint32
   114  	sectors          uint64
   115  
   116  	// mu needs to be RLocked to safetly write new sectors into the storage
   117  	// folder. mu needs to be Locked when the folder is being added, removed,
   118  	// or resized.
   119  	mu sync.TryRWMutex
   120  
   121  	// An open file handle is kept so that writes can easily be made to the
   122  	// storage folder without needing to grab a new file handle. This also
   123  	// makes it easy to do delayed-syncing.
   124  	metadataFile modules.File
   125  	sectorFile   modules.File
   126  }
   127  
   128  // mostSignificantBit returns the index of the most significant bit of an input
   129  // value.
   130  func mostSignificantBit(i uint64) uint64 {
   131  	if i == 0 {
   132  		panic("no bits set in input")
   133  	}
   134  
   135  	bval := []uint64{0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}
   136  	r := uint64(0)
   137  	if i&0xffffffff00000000 != 0 {
   138  		r += 32
   139  		i = i >> 32
   140  	}
   141  	if i&0x00000000ffff0000 != 0 {
   142  		r += 16
   143  		i = i >> 16
   144  	}
   145  	if i&0x000000000000ff00 != 0 {
   146  		r += 8
   147  		i = i >> 8
   148  	}
   149  	if i&0x00000000000000f0 != 0 {
   150  		r += 4
   151  		i = i >> 4
   152  	}
   153  	return r + bval[i]
   154  }
   155  
   156  // randFreeSector will take a usage array and find a random free sector within
   157  // the usage array. The uint32 indicates the index of the sector within the
   158  // usage array.
   159  func randFreeSector(usage []uint64) (uint32, error) {
   160  	// Pick a random starting location. Scanning the sector in a short amount
   161  	// of time requires starting from a random place.
   162  	start := fastrand.Intn(len(usage))
   163  
   164  	// Find the first element of the array that is not completely full.
   165  	var i int
   166  	for i = start; i < len(usage); i++ {
   167  		if usage[i] != math.MaxUint64 {
   168  			break
   169  		}
   170  	}
   171  	// If nothing was found by the end of the array, a wraparound is needed.
   172  	if i == len(usage) {
   173  		for i = 0; i < start; i++ {
   174  			if usage[i] != math.MaxUint64 {
   175  				break
   176  			}
   177  		}
   178  		// Return an error if no empty sectors were found.
   179  		if i == start {
   180  			return 0, errNoFreeSectors
   181  		}
   182  	}
   183  
   184  	// Get the most significant zero. This is achieved by performing a 'most
   185  	// significant bit' on the XOR of the actual value. Return the index of the
   186  	// sector that has been selected.
   187  	msz := mostSignificantBit(^usage[i])
   188  	return uint32((uint64(i) * 64) + msz), nil
   189  }
   190  
   191  // usageSectors takes a storage folder usage array and returns a list of active
   192  // sectors in that usage array by their index.
   193  func usageSectors(usage []uint64) (usageSectors []uint32) {
   194  	// Iterate through the usage elements.
   195  	for i, u := range usage {
   196  		// Each usage element corresponds to storageFolderGranularity sectors.
   197  		// Iterate through them and append the ones that are present.
   198  		for j := uint64(0); j < storageFolderGranularity; j++ {
   199  			uMask := uint64(1) << j
   200  			if u&uMask == uMask {
   201  				usageSectors = append(usageSectors, uint32(i)*storageFolderGranularity+uint32(j))
   202  			}
   203  		}
   204  	}
   205  	return usageSectors
   206  }
   207  
   208  // vacancyStorageFolder takes a set of storage folders and returns a storage
   209  // folder with vacancy for a sector along with its index. 'nil' and '-1' are
   210  // returned if none of the storage folders are available to accept a sector.
   211  // The returned storage folder will be holding an RLock on its mutex.
   212  func vacancyStorageFolder(sfs []*storageFolder) (*storageFolder, int) {
   213  	enoughRoom := false
   214  	var winningIndex int
   215  
   216  	// Go through the folders in random order.
   217  	for _, index := range fastrand.Perm(len(sfs)) {
   218  		sf := sfs[index]
   219  
   220  		// Skip past this storage folder if there is not enough room for at
   221  		// least one sector.
   222  		if sf.sectors >= uint64(len(sf.usage))*storageFolderGranularity {
   223  			continue
   224  		}
   225  
   226  		// Skip past this storage folder if it's not available to receive new
   227  		// data.
   228  		if !sf.mu.TryRLock() {
   229  			continue
   230  		}
   231  
   232  		// Select this storage folder.
   233  		enoughRoom = true
   234  		winningIndex = index
   235  		break
   236  	}
   237  	if !enoughRoom {
   238  		return nil, -1
   239  	}
   240  	return sfs[winningIndex], winningIndex
   241  }
   242  
   243  // clearUsage will unset the usage bit at the provided sector index for this
   244  // storage folder.
   245  func (sf *storageFolder) clearUsage(sectorIndex uint32) {
   246  	usageElement := sf.usage[sectorIndex/storageFolderGranularity]
   247  	bitIndex := sectorIndex % storageFolderGranularity
   248  	usageElementUpdated := usageElement & (^(1 << bitIndex))
   249  	if usageElementUpdated != usageElement {
   250  		sf.sectors--
   251  		sf.usage[sectorIndex/storageFolderGranularity] = usageElementUpdated
   252  	}
   253  }
   254  
   255  // setUsage will set the usage bit at the provided sector index for this
   256  // storage folder.
   257  func (sf *storageFolder) setUsage(sectorIndex uint32) {
   258  	usageElement := sf.usage[sectorIndex/storageFolderGranularity]
   259  	bitIndex := sectorIndex % storageFolderGranularity
   260  	usageElementUpdated := usageElement | (1 << bitIndex)
   261  	if usageElementUpdated != usageElement {
   262  		sf.sectors++
   263  		sf.usage[sectorIndex/storageFolderGranularity] = usageElementUpdated
   264  	}
   265  }
   266  
   267  // availableStorageFolders returns the contract manager's storage folders as a
   268  // slice, excluding any unavailable storeage folders.
   269  func (cm *ContractManager) availableStorageFolders() []*storageFolder {
   270  	sfs := make([]*storageFolder, 0)
   271  	for _, sf := range cm.storageFolders {
   272  		// Skip unavailable storage folders.
   273  		if atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
   274  			continue
   275  		}
   276  		sfs = append(sfs, sf)
   277  	}
   278  	return sfs
   279  }
   280  
   281  // threadedFolderRecheck checks the unavailable storage folders and looks to see
   282  // if they have been mounted or restored by the user.
   283  func (cm *ContractManager) threadedFolderRecheck() {
   284  	// Don't spawn the loop if 'noRecheck' disruption is set.
   285  	if cm.dependencies.Disrupt("noRecheck") {
   286  		return
   287  	}
   288  
   289  	sleepTime := folderRecheckInitialInterval
   290  	for {
   291  		// Check for shutdown.
   292  		select {
   293  		case <-cm.tg.StopChan():
   294  			return
   295  		case <-time.After(sleepTime):
   296  		}
   297  
   298  		// Check all of the storage folders and recover any that have been added
   299  		// to the contract manager.
   300  		cm.wal.mu.Lock()
   301  		for _, sf := range cm.storageFolders {
   302  			if atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
   303  				var err1, err2 error
   304  				sf.metadataFile, err1 = cm.dependencies.OpenFile(filepath.Join(sf.path, metadataFile), os.O_RDWR, 0700)
   305  				sf.sectorFile, err2 = cm.dependencies.OpenFile(filepath.Join(sf.path, sectorFile), os.O_RDWR, 0700)
   306  				if err1 == nil && err2 == nil {
   307  					// The storage folder has been found, and loading can be
   308  					// completed.
   309  					cm.loadSectorLocations(sf)
   310  				} else {
   311  					// One of the opens failed, close the file handle for the
   312  					// opens that did not fail.
   313  					if err1 == nil {
   314  						sf.metadataFile.Close()
   315  					}
   316  					if err2 == nil {
   317  						sf.sectorFile.Close()
   318  					}
   319  				}
   320  			}
   321  		}
   322  		cm.wal.mu.Unlock()
   323  
   324  		// Increase the sleep time.
   325  		if sleepTime*2 < maxFolderRecheckInterval {
   326  			sleepTime *= 2
   327  		}
   328  	}
   329  }
   330  
   331  // ResetStorageFolderHealth will reset the read and write statistics for the
   332  // input storage folder.
   333  func (cm *ContractManager) ResetStorageFolderHealth(index uint16) error {
   334  	err := cm.tg.Add()
   335  	if err != nil {
   336  		return err
   337  	}
   338  	defer cm.tg.Done()
   339  	cm.wal.mu.Lock()
   340  	defer cm.wal.mu.Unlock()
   341  
   342  	sf, exists := cm.storageFolders[index]
   343  	if !exists {
   344  		return errStorageFolderNotFound
   345  	}
   346  	atomic.StoreUint64(&sf.atomicFailedReads, 0)
   347  	atomic.StoreUint64(&sf.atomicFailedWrites, 0)
   348  	atomic.StoreUint64(&sf.atomicSuccessfulReads, 0)
   349  	atomic.StoreUint64(&sf.atomicSuccessfulWrites, 0)
   350  	return nil
   351  }
   352  
   353  // ResizeStorageFolder will resize a storage folder, moving sectors as
   354  // necessary. The resize operation will stop and return an error if any of the
   355  // sector move operations fail. If the force flag is set to true, the resize
   356  // operation will continue through failures, meaning that data will be lost.
   357  func (cm *ContractManager) ResizeStorageFolder(index uint16, newSize uint64, force bool) error {
   358  	err := cm.tg.Add()
   359  	if err != nil {
   360  		return err
   361  	}
   362  	defer cm.tg.Done()
   363  
   364  	cm.wal.mu.Lock()
   365  	sf, exists := cm.storageFolders[index]
   366  	cm.wal.mu.Unlock()
   367  	if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
   368  		return errStorageFolderNotFound
   369  	}
   370  
   371  	if newSize/modules.SectorSize < MinimumSectorsPerStorageFolder {
   372  		return ErrSmallStorageFolder
   373  	}
   374  	if newSize/modules.SectorSize > MaximumSectorsPerStorageFolder {
   375  		return ErrLargeStorageFolder
   376  	}
   377  
   378  	oldSize := uint64(len(sf.usage)) * storageFolderGranularity * modules.SectorSize
   379  	if oldSize == newSize {
   380  		return ErrNoResize
   381  	}
   382  	newSectorCount := uint32(newSize / modules.SectorSize)
   383  	if oldSize > newSize {
   384  		return cm.wal.shrinkStorageFolder(index, newSectorCount, force)
   385  	}
   386  	return cm.wal.growStorageFolder(index, newSectorCount)
   387  }
   388  
   389  // StorageFolders will return a list of storage folders in the host, each
   390  // containing information about the storage folder and any operations currently
   391  // being executed on the storage folder.
   392  func (cm *ContractManager) StorageFolders() []modules.StorageFolderMetadata {
   393  	err := cm.tg.Add()
   394  	if err != nil {
   395  		return nil
   396  	}
   397  	defer cm.tg.Done()
   398  	cm.wal.mu.Lock()
   399  	defer cm.wal.mu.Unlock()
   400  
   401  	// Iterate over the storage folders that are in memory first, and then
   402  	// suppliment them with the storage folders that are not in memory.
   403  	var smfs []modules.StorageFolderMetadata
   404  	for _, sf := range cm.storageFolders {
   405  		// Grab the non-computational data.
   406  		sfm := modules.StorageFolderMetadata{
   407  			ProgressNumerator:   atomic.LoadUint64(&sf.atomicProgressNumerator),
   408  			ProgressDenominator: atomic.LoadUint64(&sf.atomicProgressDenominator),
   409  
   410  			FailedReads:      atomic.LoadUint64(&sf.atomicFailedReads),
   411  			FailedWrites:     atomic.LoadUint64(&sf.atomicFailedWrites),
   412  			SuccessfulReads:  atomic.LoadUint64(&sf.atomicSuccessfulReads),
   413  			SuccessfulWrites: atomic.LoadUint64(&sf.atomicSuccessfulWrites),
   414  
   415  			Capacity:          modules.SectorSize * 64 * uint64(len(sf.usage)),
   416  			CapacityRemaining: ((64 * uint64(len(sf.usage))) - sf.sectors) * modules.SectorSize,
   417  			Index:             sf.index,
   418  			Path:              sf.path,
   419  		}
   420  
   421  		// Set some of the values to extreme numbers if the storage folder is
   422  		// unavailable, to flag the user's attention.
   423  		if atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
   424  			sfm.FailedReads = 9999999999
   425  			sfm.FailedWrites = 9999999999
   426  		}
   427  
   428  		// Add this storage folder to the list of storage folders.
   429  		smfs = append(smfs, sfm)
   430  	}
   431  	return smfs
   432  }