github.com/johnathanhowell/sia@v0.5.1-beta.0.20160524050156-83dcc3d37c94/modules/host/storagemanager/storagefolders.go (about)

     1  package storagemanager
     2  
     3  // storgaefolder.go is responsible for managing the storage folders within the
     4  // host. Storage folders can be added, resized, or removed. There are several
     5  // features in place to make sure that the host is always using a reasonable
     6  // amount of resources. Sectors in the host are currently always 4MiB, though
     7  // support for different sizes is planned. Becaues of the reliance on the
     8  // cached Merkle trees, sector sizes are likely to always be a power of 2.
     9  //
    10  // Though storage folders each contain a bunch of sectors, there is no mapping
    11  // from a storage folder to the sectors that it contains. Instead, one must
    12  // either look at the filesystem or go through the sector usage database.
    13  // There is a mapping from a sector to the storage folder that it is in, so a
    14  // list of sectors for each storage folder can be obtained, though the
    15  // operation is expensive. It is not recommended that you try to look at the
    16  // filesystem to see all of the sectors in a storage folder, because all of the
    17  // golang implementations that let you do this load the whole directory into
    18  // memory at once, and these directories may contain millions of sectors.
    19  //
    20  // Strict resource limits are maintained, to make sure that any user behavior
    21  // which would strain the host will return an error instead of cause the user
    22  // problems. The number of storage folders is capped, the allowed size for a
    23  // storage folder has a range, and anything else that might have a linear or
    24  // nonconstant effect on resource consumption is capped.
    25  //
    26  // Sectors are meant to be spread out across the storage folders as evenly as
    27  // possible, but this is done in a very passive way. When a storage folder is
    28  // added, sectors are not moved from the other storage folder to optimize for a
    29  // quick operation. When a storage folder is reduced in size, sectors are only
    30  // moved if there is not enough room on the remainder of the storage folder to
    31  // hold all of the sectors.
    32  //
    33  // Storage folders are identified by an ID. This ID is short (4 bytes) and is
    34  // randomly generated but is guaranteed not to conflict with any other storage
    35  // folder IDs (if a conflict is generated randomly, a new random folder is
    36  // chosen). A counter was rejected because storage folders can be removed and
    37  // added arbitrarily, and there should be a firm difference between accessing a
    38  // storage folder by index vs. accessing a storage folder by id.
    39  //
    40  // Storage folders statically track how much of their storage is unused.
    41  // Because there is no mapping from a storage folder to the sectors that it
    42  // contains, a static mapping must be manually maintained. While it would be
    43  // possible to track which sectors are in each storage folder by using nested
    44  // buckets in the sector usage database, the implementation cost is high, and
    45  // is perceived to be higher than the implementation cost of statically
    46  // tracking the amount of storage remaining. Also the introduction of nested
    47  // buckets relies on fancier, less used features in the boltdb dependency,
    48  // which carries a higher error risk.
    49  
    50  // TODO: Need to add some command to 'siad' that will correctly repoint a
    51  // storage folder to a new mountpoint. As best I can tell, this needs to happen
    52  // while siad is not running. Either that, or 'siac' needs to do the whole
    53  // shutdown thing itself? Still unclear.
    54  
    55  import (
    56  	"bytes"
    57  	"encoding/hex"
    58  	"encoding/json"
    59  	"errors"
    60  	"fmt"
    61  	"os"
    62  	"path/filepath"
    63  
    64  	"github.com/NebulousLabs/Sia/build"
    65  	"github.com/NebulousLabs/Sia/modules"
    66  
    67  	"github.com/NebulousLabs/bolt"
    68  )
    69  
    70  var (
    71  	// errBadStorageFolderIndex is returned if a storage folder is requested
    72  	// that does not have the correct index.
    73  	errBadStorageFolderIndex = errors.New("no storage folder exists at that index")
    74  
    75  	// errIncompleteOffload is returned when the host is tasked with offloading
    76  	// sectors from a storage folder but is unable to offload the requested
    77  	// number - but is able to offload some of them.
    78  	errIncompleteOffload = errors.New("could not successfully offload specified number of sectors from storage folder")
    79  
    80  	// errInsufficientRemainingStorageForRemoval is returned if the remaining
    81  	// storage folders do not have enough space remaining to support being
    82  	// removed.
    83  	errInsufficientRemainingStorageForRemoval = errors.New("not enough storage remaining to support removal of disk")
    84  
    85  	// errInsufficientRemainingStorageForShrink is returned if the remaining
    86  	// storage folders do not have enough space remaining to support being
    87  	// reduced in size.
    88  	errInsufficientRemainingStorageForShrink = errors.New("not enough storage remaining to support shrinking of disk")
    89  
    90  	// errLargeStorageFolder is returned if a new storage folder or a resized
    91  	// storage folder would exceed the maximum allowed size.
    92  	errLargeStorageFolder = fmt.Errorf("maximum allowed size for a storage folder is %v bytes", maximumStorageFolderSize)
    93  
    94  	// errMaxStorageFolders indicates that the limit on the number of allowed
    95  	// storage folders has been reached.
    96  	errMaxStorageFolders = fmt.Errorf("host can only accept up to %v storage folders", maximumStorageFolders)
    97  
    98  	// errNoResize is returned if a new size is provided for a storage folder
    99  	// that is the same as the current size of the storage folder.
   100  	errNoResize = errors.New("storage folder selected for resize, but new size is same as current size")
   101  
   102  	// errRepeatFolder is returned if a storage folder is added which links to
   103  	// a path that is already in use by another storage folder. Only exact path
   104  	// matches will trigger the error.
   105  	errRepeatFolder = errors.New("selected path is already in use as a storage folder, please use 'resize'")
   106  
   107  	// errSmallStorageFolder is returned if a new storage folder is not large
   108  	// enough to meet the requirements for the minimum storage folder size.
   109  	errSmallStorageFolder = fmt.Errorf("minimum allowed size for a storage folder is %v bytes", minimumStorageFolderSize)
   110  
   111  	// errStorageFolderNotFolder is returned if a storage folder gets added
   112  	// that is not a folder.
   113  	errStorageFolderNotFolder = errors.New("must use to an existing folder")
   114  )
   115  
   116  // storageFolder tracks a folder that is being used to store sectors. There is
   117  // a corresponding symlink in the host directory that points to whatever folder
   118  // the user has chosen for storing data (usually, a separate drive will be
   119  // mounted at that point).
   120  //
   121  // 'Size' is set by the user, indicating how much data can be placed into that
   122  // folder before the host should consider it full. Size is measured in bytes,
   123  // but only accounts for the actual raw data. Sia also places a nontrivial
   124  // amount of load on the filesystem, potentially to the tune of millions of
   125  // files. These files have long, cryptographic names and may take up as much as
   126  // a gigabyte of space in filesystem overhead, depending on how the filesystem
   127  // is architected. The host is programmed to gracefully handle full disks, so
   128  // while it might cause the user surprise that the host can't break past 99%
   129  // utilization, there should not be any issues if the user overestimates how
   130  // much storage is available in the folder they have offered to Sia. The host
   131  // will put the drive at 100% utilization, which may cause performance
   132  // slowdowns or other errors if non-Sia programs are also trying to use the
   133  // filesystem. If users are experiencing problems, having them set the storage
   134  // folder size to 98% of the actual drive size is probably going to fix most of
   135  // the issues.
   136  //
   137  // 'SizeRemaining' is a variable that remembers how much storage is remaining
   138  // in the storage folder. It is managed manually, and is updated every time a
   139  // sector is added to or removed from the storage folder. Because there is no
   140  // property that inherently guarantees the correctness of 'SizeRemaining',
   141  // implementation must be careful to maintain consistency.
   142  //
   143  // The UID of the storage folder is a small number of bytes that uniquely
   144  // identify the storage folder. The UID is generated randomly, but in such a
   145  // way as to guarantee that it will not collide with the ids of other storage
   146  // folders. The UID is used (via the uidString function) to determine the name
   147  // of the symlink which points to the folder holding the data for this storage
   148  // folder.
   149  //
   150  // Statistics are kept on the integrity of reads and writes. Ideally, the
   151  // filesystem is never returning errors, but if errors are being returned they
   152  // will be tracked and can be reported to the user.
   153  type storageFolder struct {
   154  	Path string
   155  	UID  []byte
   156  
   157  	Size          uint64
   158  	SizeRemaining uint64
   159  
   160  	FailedReads      uint64
   161  	FailedWrites     uint64
   162  	SuccessfulReads  uint64
   163  	SuccessfulWrites uint64
   164  }
   165  
   166  // emptiestStorageFolder takes a set of storage folders and returns the storage
   167  // folder with the lowest utilization by percentage. 'nil' is returned if there
   168  // are no storage folders provided with sufficient free space for a sector.
   169  //
   170  // Refusing to return a storage folder that does not have enough space prevents
   171  // the host from overfilling a storage folder.
   172  func emptiestStorageFolder(sfs []*storageFolder) (*storageFolder, int) {
   173  	mostFree := float64(-1) // Set lower than the min amount available to protect from floating point imprecision.
   174  	winningIndex := -1      // Set to impossible value to prevent unintentionally returning the wrong storage folder.
   175  	winner := false
   176  	for i, sf := range sfs {
   177  		// Check that this storage folder has at least enough space to hold a
   178  		// new sector. Also perform a sanity check that the storage folder has
   179  		// a sane amount of storage remaining.
   180  		if sf.SizeRemaining < modules.SectorSize || sf.Size < sf.SizeRemaining {
   181  			continue
   182  		}
   183  		winner = true // at least one storage folder has enough space for a new sector.
   184  
   185  		// Check this storage folder against the current winning storage folder's utilization.
   186  		sfFree := float64(sf.SizeRemaining) / float64(sf.Size)
   187  		if mostFree < sfFree {
   188  			mostFree = sfFree
   189  			winningIndex = i
   190  		}
   191  	}
   192  	// Do not return any storage folder if none of them have enough room for a
   193  	// new sector.
   194  	if !winner {
   195  		return nil, -1
   196  	}
   197  	return sfs[winningIndex], winningIndex
   198  }
   199  
   200  // offloadStorageFolder takes sectors in a storage folder and moves them to
   201  // another storage folder.
   202  func (sm *StorageManager) offloadStorageFolder(offloadFolder *storageFolder, dataToOffload uint64) error {
   203  	// The host is going to check every sector, using a different database tx
   204  	// for each sector. To be able to track progress, a starting point needs to
   205  	// be grabbed. This read grabs the starting point.
   206  	//
   207  	// It is expected that the host is under lock for the whole operation -
   208  	// this function should be the only function with access to the database.
   209  	var currentSectorID []byte
   210  	var currentSectorBytes []byte
   211  	err := sm.db.View(func(tx *bolt.Tx) error {
   212  		currentSectorID, currentSectorBytes = tx.Bucket(bucketSectorUsage).Cursor().First()
   213  		return nil
   214  	})
   215  	if err != nil {
   216  		return err
   217  	}
   218  
   219  	// Create a list of available folders. As folders are filled up, this list
   220  	// will be pruned. Once all folders are full, the offload loop will quit
   221  	// and return with errIncompleteOffload.
   222  	availableFolders := make([]*storageFolder, 0)
   223  	for _, sf := range sm.storageFolders {
   224  		if sf == offloadFolder {
   225  			// The offload folder is not an available folder.
   226  			continue
   227  		}
   228  		if sf.SizeRemaining < modules.SectorSize {
   229  			// Folders that don't have enough room for a new sector are not
   230  			// available.
   231  			continue
   232  		}
   233  		availableFolders = append(availableFolders, sf)
   234  	}
   235  
   236  	// Go through the sectors one at a time. Sectors that are not a part of the
   237  	// provided storage folder are ignored. Sectors that are a part of the
   238  	// storage folder will be moved to a new storage folder. The loop will quit
   239  	// after 'dataToOffload' data has been moved from the storage folder.
   240  	dataOffloaded := uint64(0)
   241  	for currentSectorID != nil && dataOffloaded < dataToOffload && len(availableFolders) > 0 {
   242  		err = sm.db.Update(func(tx *bolt.Tx) error {
   243  			// Defer seeking to the next sector.
   244  			defer func() {
   245  				bsuc := tx.Bucket(bucketSectorUsage).Cursor()
   246  				bsuc.Seek(currentSectorID)
   247  				currentSectorID, currentSectorBytes = bsuc.Next()
   248  			}()
   249  
   250  			// Determine whether the sector needs to be moved.
   251  			var usage sectorUsage
   252  			err = json.Unmarshal(currentSectorBytes, &usage)
   253  			if err != nil {
   254  				return err
   255  			}
   256  			if !bytes.Equal(usage.StorageFolder, offloadFolder.UID) {
   257  				// The current sector is not in the offloading storage folder,
   258  				// try the next sector. Returning nil will advance to the next
   259  				// iteration of the loop.
   260  				return nil
   261  			}
   262  
   263  			// This sector is in the removal folder, and therefore needs to
   264  			// be moved to the next folder.
   265  			success := false
   266  			emptiestFolder, emptiestIndex := emptiestStorageFolder(availableFolders)
   267  			for emptiestFolder != nil {
   268  				oldSectorPath := filepath.Join(sm.persistDir, offloadFolder.uidString(), string(currentSectorID))
   269  				// Try reading the sector from disk.
   270  				sectorData, err := sm.dependencies.readFile(oldSectorPath)
   271  				if err != nil {
   272  					// Inidicate that the storage folder is having read
   273  					// troubles.
   274  					offloadFolder.FailedReads++
   275  
   276  					// Returning nil will move to the next sector. Though the
   277  					// current sector has failed to read, the host will keep
   278  					// trying future sectors in hopes of finishing the task.
   279  					return nil
   280  				}
   281  				// Indicate that the storage folder did a successful read.
   282  				offloadFolder.SuccessfulReads++
   283  
   284  				// Try writing the sector to the emptiest storage folder.
   285  				newSectorPath := filepath.Join(sm.persistDir, emptiestFolder.uidString(), string(currentSectorID))
   286  				err = sm.dependencies.writeFile(newSectorPath, sectorData, 0700)
   287  				if err != nil {
   288  					// Indicate that the storage folder is having write
   289  					// troubles.
   290  					emptiestFolder.FailedWrites++
   291  
   292  					// After the failed write, try removing any garbage that
   293  					// may have gotten left behind. The error is not checked,
   294  					// as it is known that the disk is having write troubles.
   295  					_ = sm.dependencies.removeFile(newSectorPath)
   296  
   297  					// Because the write failed, we should move on to the next
   298  					// storage folder, and remove the current storage folder
   299  					// from the list of available folders.
   300  					availableFolders = append(availableFolders[0:emptiestIndex], availableFolders[emptiestIndex+1:]...)
   301  
   302  					// Try the next folder.
   303  					emptiestFolder, emptiestIndex = emptiestStorageFolder(availableFolders)
   304  					continue
   305  				}
   306  				// Indicate that the storage folder is doing successful writes.
   307  				emptiestFolder.SuccessfulWrites++
   308  				err = sm.dependencies.removeFile(oldSectorPath)
   309  				if err != nil {
   310  					// Indicate that the storage folder is having write
   311  					// troubles.
   312  					offloadFolder.FailedWrites++
   313  				} else {
   314  					offloadFolder.SuccessfulWrites++
   315  				}
   316  
   317  				success = true
   318  				break
   319  			}
   320  			if !success {
   321  				// The sector failed to be written successfully, try moving to
   322  				// the next sector.
   323  				return nil
   324  			}
   325  
   326  			offloadFolder.SizeRemaining += modules.SectorSize
   327  			emptiestFolder.SizeRemaining -= modules.SectorSize
   328  			dataOffloaded += modules.SectorSize
   329  
   330  			// Update the sector usage database to reflect the file movement.
   331  			// Because this cannot be done atomically, recovery tools are
   332  			// required to deal with outlier cases where the swap is fine but
   333  			// the database update is not.
   334  			usage.StorageFolder = emptiestFolder.UID
   335  			newUsageBytes, err := json.Marshal(usage)
   336  			if err != nil {
   337  				return err
   338  			}
   339  			err = tx.Bucket(bucketSectorUsage).Put(currentSectorID, newUsageBytes)
   340  			if err != nil {
   341  				return err
   342  			}
   343  
   344  			// Seek to the next sector.
   345  			return nil
   346  		})
   347  		if err != nil {
   348  			return err
   349  		}
   350  	}
   351  	if dataOffloaded < dataToOffload {
   352  		return errIncompleteOffload
   353  	}
   354  	return nil
   355  }
   356  
   357  // storageFolder returns the storage folder in the host with the input uid. If
   358  // the storage folder is not found, nil is returned.
   359  func (sm *StorageManager) storageFolder(uid []byte) *storageFolder {
   360  	for _, sf := range sm.storageFolders {
   361  		if bytes.Equal(uid, sf.UID) {
   362  			return sf
   363  		}
   364  	}
   365  	return nil
   366  }
   367  
   368  // uidString returns the string value of the storage folder's UID. This string
   369  // maps to the filename of the symlink that is used to point to the folder that
   370  // holds all of the sector data contained by the storage folder.
   371  func (sf *storageFolder) uidString() string {
   372  	if len(sf.UID) != storageFolderUIDSize {
   373  		build.Critical("sector UID length is incorrect - perhaps the wrong version of Sia is being run?")
   374  	}
   375  	return hex.EncodeToString(sf.UID)
   376  }
   377  
   378  // AddStorageFolder adds a storage folder to the host.
   379  func (sm *StorageManager) AddStorageFolder(path string, size uint64) error {
   380  	// Lock the host for the duration of the add operation - it is important
   381  	// that the host not be manipulated while sectors are being moved around.
   382  	sm.mu.Lock()
   383  	defer sm.mu.Unlock()
   384  	// The resource lock is required as the sector movements require access to
   385  	// the logger.
   386  	sm.resourceLock.RLock()
   387  	defer sm.resourceLock.RUnlock()
   388  	if sm.closed {
   389  		return errStorageManagerClosed
   390  	}
   391  
   392  	// Check that the maximum number of allowed storage folders has not been
   393  	// exceeded.
   394  	if len(sm.storageFolders) >= maximumStorageFolders {
   395  		return errMaxStorageFolders
   396  	}
   397  	// Check that the storage folder being added meets the size requirements.
   398  	if size > maximumStorageFolderSize {
   399  		return errLargeStorageFolder
   400  	}
   401  	if size < minimumStorageFolderSize {
   402  		return errSmallStorageFolder
   403  	}
   404  	// Check that the folder being linked to is not already in use.
   405  	for _, sf := range sm.storageFolders {
   406  		if sf.Path == path {
   407  			return errRepeatFolder
   408  		}
   409  	}
   410  
   411  	// Check that the folder being linked to both exists and is a folder.
   412  	pathInfo, err := os.Stat(path)
   413  	if err != nil {
   414  		return err
   415  	}
   416  	if !pathInfo.Mode().IsDir() {
   417  		return errStorageFolderNotFolder
   418  	}
   419  
   420  	// Create a storage folder object.
   421  	newSF := &storageFolder{
   422  		Path: path,
   423  
   424  		Size:          size,
   425  		SizeRemaining: size,
   426  	}
   427  	// Give the storage folder a new UID, while enforcing that the storage
   428  	// folder can't have a collision with any of the other storage folders.
   429  	newSF.UID = make([]byte, storageFolderUIDSize)
   430  	for {
   431  		// Generate an attempt UID for the storage folder.
   432  		_, err = sm.dependencies.randRead(newSF.UID)
   433  		if err != nil {
   434  			return err
   435  		}
   436  
   437  		// Check for collsions. Check should be relatively inexpensive at all
   438  		// times, because the total number of storage folders is limited to
   439  		// 256.
   440  		safe := true
   441  		for _, sf := range sm.storageFolders {
   442  			if bytes.Equal(newSF.UID, sf.UID) {
   443  				safe = false
   444  				break
   445  			}
   446  		}
   447  		if safe {
   448  			break
   449  		}
   450  	}
   451  
   452  	// Symlink the path for the data to the UID location of the host.
   453  	symPath := filepath.Join(sm.persistDir, newSF.uidString())
   454  	err = sm.dependencies.symlink(path, symPath)
   455  	if err != nil {
   456  		return err
   457  	}
   458  
   459  	// Add the storage folder to the list of folders for the host.
   460  	sm.storageFolders = append(sm.storageFolders, newSF)
   461  	return sm.saveSync()
   462  }
   463  
   464  // ResetStorageFolderHealth will reset the read and write statistics for the
   465  // storage folder.
   466  func (sm *StorageManager) ResetStorageFolderHealth(index int) error {
   467  	sm.mu.Lock()
   468  	defer sm.mu.Unlock()
   469  	sm.resourceLock.RLock()
   470  	defer sm.resourceLock.RUnlock()
   471  	if sm.closed {
   472  		return errStorageManagerClosed
   473  	}
   474  
   475  	// Check that the input is valid.
   476  	if index >= len(sm.storageFolders) {
   477  		return errBadStorageFolderIndex
   478  	}
   479  
   480  	// Reset the storage statistics and save the host.
   481  	sm.storageFolders[index].FailedReads = 0
   482  	sm.storageFolders[index].FailedWrites = 0
   483  	sm.storageFolders[index].SuccessfulReads = 0
   484  	sm.storageFolders[index].SuccessfulWrites = 0
   485  	return sm.saveSync()
   486  }
   487  
   488  // RemoveStorageFolder removes a storage folder from the host.
   489  func (sm *StorageManager) RemoveStorageFolder(removalIndex int, force bool) error {
   490  	sm.mu.Lock()
   491  	defer sm.mu.Unlock()
   492  	sm.resourceLock.RLock()
   493  	defer sm.resourceLock.RUnlock()
   494  	if sm.closed {
   495  		return errStorageManagerClosed
   496  	}
   497  
   498  	// Check that the removal folder exists, and create a shortcut to it.
   499  	if removalIndex >= len(sm.storageFolders) || removalIndex < 0 {
   500  		return errBadStorageFolderIndex
   501  	}
   502  	removalFolder := sm.storageFolders[removalIndex]
   503  
   504  	// Move all of the sectors in the storage folder to other storage folders.
   505  	usedSize := removalFolder.Size - removalFolder.SizeRemaining
   506  	offloadErr := sm.offloadStorageFolder(removalFolder, usedSize)
   507  	// If 'force' is set, we want to ignore 'errIncopmleteOffload' and try to
   508  	// remove the storage folder anyway. For any other error, we want to halt
   509  	// and return the error.
   510  	if force && offloadErr == errIncompleteOffload {
   511  		offloadErr = nil
   512  	}
   513  	if offloadErr != nil {
   514  		return offloadErr
   515  	}
   516  
   517  	// Remove the storage folder from the host and then save the host.
   518  	sm.storageFolders = append(sm.storageFolders[0:removalIndex], sm.storageFolders[removalIndex+1:]...)
   519  	removeErr := sm.dependencies.removeFile(filepath.Join(sm.persistDir, removalFolder.uidString()))
   520  	saveErr := sm.saveSync()
   521  	return composeErrors(saveErr, removeErr)
   522  }
   523  
   524  // ResizeStorageFolder changes the amount of disk space that is going to be
   525  // allocated to a storage folder.
   526  func (sm *StorageManager) ResizeStorageFolder(storageFolderIndex int, newSize uint64) error {
   527  	// Lock the host for the duration of the resize operation - it is important
   528  	// that the host not be manipulated while sectors are being moved around.
   529  	sm.mu.Lock()
   530  	defer sm.mu.Unlock()
   531  	// The resource lock is required as the sector movements require access to
   532  	// the logger.
   533  	sm.resourceLock.RLock()
   534  	defer sm.resourceLock.RUnlock()
   535  	if sm.closed {
   536  		return errStorageManagerClosed
   537  	}
   538  
   539  	// Check that the inputs are valid.
   540  	if storageFolderIndex >= len(sm.storageFolders) || storageFolderIndex < 0 {
   541  		return errBadStorageFolderIndex
   542  	}
   543  	resizeFolder := sm.storageFolders[storageFolderIndex]
   544  	if newSize > maximumStorageFolderSize {
   545  		return errLargeStorageFolder
   546  	}
   547  	if newSize < minimumStorageFolderSize {
   548  		return errSmallStorageFolder
   549  	}
   550  	if resizeFolder.Size == newSize {
   551  		return errNoResize
   552  	}
   553  
   554  	// Sectors do not need to be moved onto or away from the resize folder if
   555  	// the folder is growing, or if after being shrunk the folder still has
   556  	// enough storage to house all of the sectors it currently tracks.
   557  	resizeFolderSizeConsumed := resizeFolder.Size - resizeFolder.SizeRemaining
   558  	if resizeFolderSizeConsumed <= newSize {
   559  		resizeFolder.SizeRemaining = newSize - resizeFolderSizeConsumed
   560  		resizeFolder.Size = newSize
   561  		return sm.saveSync()
   562  	}
   563  
   564  	// Calculate the number of sectors that need to be offloaded from the
   565  	// storage folder.
   566  	offloadSize := resizeFolderSizeConsumed - newSize
   567  	offloadErr := sm.offloadStorageFolder(resizeFolder, offloadSize)
   568  	if offloadErr == errIncompleteOffload {
   569  		// Offloading has not fully succeeded, but may have partially
   570  		// succeeded. To prevent new sectors from being added to the storage
   571  		// folder, clamp the size of the storage folder to the current amount
   572  		// of storage in use.
   573  		resizeFolder.Size -= resizeFolder.SizeRemaining
   574  		resizeFolder.SizeRemaining = 0
   575  		return offloadErr
   576  	} else if offloadErr != nil {
   577  		return offloadErr
   578  	}
   579  	resizeFolder.Size = newSize
   580  	resizeFolder.SizeRemaining = 0
   581  	return sm.saveSync()
   582  }
   583  
   584  // StorageFolders provides information about all of the storage folders in the
   585  // host.
   586  func (sm *StorageManager) StorageFolders() (sfms []modules.StorageFolderMetadata) {
   587  	sm.mu.RLock()
   588  	defer sm.mu.RUnlock()
   589  
   590  	for _, sf := range sm.storageFolders {
   591  		sfms = append(sfms, modules.StorageFolderMetadata{
   592  			Capacity:          sf.Size,
   593  			CapacityRemaining: sf.SizeRemaining,
   594  			Path:              sf.Path,
   595  
   596  			FailedReads:      sf.FailedReads,
   597  			FailedWrites:     sf.FailedWrites,
   598  			SuccessfulReads:  sf.SuccessfulReads,
   599  			SuccessfulWrites: sf.SuccessfulWrites,
   600  		})
   601  	}
   602  	return sfms
   603  }