gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/host/contractmanager/storagefolderadd.go (about)

     1  package contractmanager
     2  
     3  import (
     4  	"os"
     5  	"path/filepath"
     6  	"sync"
     7  	"sync/atomic"
     8  
     9  	"gitlab.com/NebulousLabs/fastrand"
    10  	"gitlab.com/SiaPrime/SiaPrime/build"
    11  	"gitlab.com/SiaPrime/SiaPrime/modules"
    12  )
    13  
    14  // findUnfinishedStorageFolderAdditions will scroll through a set of state
    15  // changes and figure out which of the unfinished storage folder additions are
    16  // still unfinished.
    17  func findUnfinishedStorageFolderAdditions(scs []stateChange) []savedStorageFolder {
    18  	// Use a map to figure out what unfinished storage folders exist and use it
    19  	// to remove the ones that have terminated.
    20  	usfMap := make(map[uint16]savedStorageFolder)
    21  	for _, sc := range scs {
    22  		for _, sf := range sc.UnfinishedStorageFolderAdditions {
    23  			usfMap[sf.Index] = sf
    24  		}
    25  		for _, sf := range sc.StorageFolderAdditions {
    26  			delete(usfMap, sf.Index)
    27  		}
    28  		for _, index := range sc.ErroredStorageFolderAdditions {
    29  			delete(usfMap, index)
    30  		}
    31  		for _, sfr := range sc.StorageFolderRemovals {
    32  			delete(usfMap, sfr.Index)
    33  		}
    34  	}
    35  
    36  	// Return the active unifinished storage folders as a slice.
    37  	var sfs []savedStorageFolder
    38  	for _, sf := range usfMap {
    39  		sfs = append(sfs, sf)
    40  	}
    41  	return sfs
    42  }
    43  
    44  // cleanupUnfinishedStorageFolderAdditions will purge any unfinished storage
    45  // folder additions from the previous run.
    46  func (wal *writeAheadLog) cleanupUnfinishedStorageFolderAdditions(scs []stateChange) {
    47  	usfs := findUnfinishedStorageFolderAdditions(scs)
    48  	for _, usf := range usfs {
    49  		sf, exists := wal.cm.storageFolders[usf.Index]
    50  		if exists && atomic.LoadUint64(&sf.atomicUnavailable) == 0 {
    51  			// Close the storage folder file handles.
    52  			err := sf.metadataFile.Close()
    53  			if err != nil {
    54  				wal.cm.log.Println("Unable to close metadata file for storage folder", sf.path)
    55  			}
    56  			err = sf.sectorFile.Close()
    57  			if err != nil {
    58  				wal.cm.log.Println("Unable to close sector file for storage folder", sf.path)
    59  			}
    60  
    61  			// Delete the storage folder from the storage folders map.
    62  			delete(wal.cm.storageFolders, sf.index)
    63  		}
    64  
    65  		// Remove any leftover files.
    66  		sectorLookupName := filepath.Join(usf.Path, metadataFile)
    67  		sectorHousingName := filepath.Join(usf.Path, sectorFile)
    68  		err := wal.cm.dependencies.RemoveFile(sectorLookupName)
    69  		if err != nil {
    70  			wal.cm.log.Println("Unable to remove documented sector metadata lookup:", sectorLookupName, err)
    71  		}
    72  		err = wal.cm.dependencies.RemoveFile(sectorHousingName)
    73  		if err != nil {
    74  			wal.cm.log.Println("Unable to remove documented sector housing:", sectorHousingName, err)
    75  		}
    76  
    77  		// Append an error call to the changeset, indicating that the storage
    78  		// folder add was not completed successfully.
    79  		wal.appendChange(stateChange{
    80  			ErroredStorageFolderAdditions: []uint16{usf.Index},
    81  		})
    82  	}
    83  }
    84  
    85  // managedAddStorageFolder will add a storage folder to the contract manager.
    86  // The parent function, contractmanager.AddStorageFolder, has already performed
    87  // any error checking that can be performed without accessing the contract
    88  // manager state.
    89  //
    90  // managedAddStorageFolder can take a long time, as it writes a giant, zeroed
    91  // out file to disk covering the entire range of the storage folder, and
    92  // failure can occur late in the operation. The WAL is notified that a long
    93  // running operation is in progress, so that any changes to disk can be
    94  // reverted in the event of unclean shutdown.
    95  func (wal *writeAheadLog) managedAddStorageFolder(sf *storageFolder) error {
    96  	// Lock the storage folder for the duration of the function.
    97  	sf.mu.Lock()
    98  	defer sf.mu.Unlock()
    99  
   100  	numSectors := uint64(len(sf.usage)) * 64
   101  	sectorLookupSize := numSectors * sectorMetadataDiskSize
   102  	sectorHousingSize := numSectors * modules.SectorSize
   103  	totalSize := sectorLookupSize + sectorHousingSize
   104  	sectorLookupName := filepath.Join(sf.path, metadataFile)
   105  	sectorHousingName := filepath.Join(sf.path, sectorFile)
   106  
   107  	// Update the uncommitted state to include the storage folder, returning an
   108  	// error if any checks fail.
   109  	var syncChan chan struct{}
   110  	err := func() error {
   111  		wal.mu.Lock()
   112  		defer wal.mu.Unlock()
   113  
   114  		// Check that the storage folder is not a duplicate. That requires
   115  		// first checking the contract manager and then checking the WAL. The
   116  		// number of storage folders are also counted, to make sure that the
   117  		// maximum number of storage folders allowed is not exceeded.
   118  		for _, csf := range wal.cm.storageFolders {
   119  			// The conflicting storage folder may e in the process of being
   120  			// removed, however we refuse to add a replacement storage folder
   121  			// until the existing one has been removed entirely.
   122  			if sf.path == csf.path {
   123  				return ErrRepeatFolder
   124  			}
   125  		}
   126  
   127  		// Check that there is room for another storage folder.
   128  		if uint64(len(wal.cm.storageFolders)) > maximumStorageFolders {
   129  			return errMaxStorageFolders
   130  		}
   131  
   132  		// Determine the index of the storage folder by scanning for an empty
   133  		// spot in the folderLocations map. A random starting place is chosen
   134  		// to keep good average and worst-case runtime.
   135  		var iterator int
   136  		index := uint16(fastrand.Intn(65536))
   137  		for iterator = 0; iterator < 65536; iterator++ {
   138  			// check the list of unique folders we created earlier.
   139  			_, exists := wal.cm.storageFolders[index]
   140  			if !exists {
   141  				break
   142  			}
   143  			index++
   144  		}
   145  		if iterator == 65536 {
   146  			wal.cm.log.Critical("Previous check indicated that there was room to add another storage folder, but folderLocations set is full.")
   147  			return errMaxStorageFolders
   148  		}
   149  		// Assign the empty index to the storage folder.
   150  		sf.index = index
   151  
   152  		// Create the files that get used with the storage folder.
   153  		var err error
   154  		sf.metadataFile, err = wal.cm.dependencies.CreateFile(sectorLookupName)
   155  		if err != nil {
   156  			return build.ExtendErr("could not create storage folder file", err)
   157  		}
   158  		sf.sectorFile, err = wal.cm.dependencies.CreateFile(sectorHousingName)
   159  		if err != nil {
   160  			err = build.ComposeErrors(err, sf.metadataFile.Close())
   161  			err = build.ComposeErrors(err, wal.cm.dependencies.RemoveFile(sectorLookupName))
   162  			return build.ExtendErr("could not create storage folder file", err)
   163  		}
   164  		// Establish the progress fields for the add operation in the storage
   165  		// folder.
   166  		atomic.StoreUint64(&sf.atomicProgressDenominator, totalSize)
   167  
   168  		// Add the storage folder to the list of storage folders.
   169  		wal.cm.storageFolders[index] = sf
   170  
   171  		// Add the storage folder to the list of unfinished storage folder
   172  		// additions. There should be no chance of error between this append
   173  		// operation and the completed commitment to the unfinished storage
   174  		// folder addition (signaled by `<-syncChan` a few lines down).
   175  		wal.appendChange(stateChange{
   176  			UnfinishedStorageFolderAdditions: []savedStorageFolder{sf.savedStorageFolder()},
   177  		})
   178  		// Grab the sync channel so we know when the unfinished storage folder
   179  		// addition has been committed to on disk.
   180  		syncChan = wal.syncChan
   181  		return nil
   182  	}()
   183  	if err != nil {
   184  		return err
   185  	}
   186  	// Block until the commitment to the unfinished storage folder addition is
   187  	// complete.
   188  	<-syncChan
   189  
   190  	// Simulate a disk failure at this point.
   191  	if wal.cm.dependencies.Disrupt("storageFolderAddFinish") {
   192  		return nil
   193  	}
   194  
   195  	// If there's an error in the rest of the function, the storage folder
   196  	// needs to be removed from the list of unfinished storage folder
   197  	// additions. Because the WAL is append-only, a stateChange needs to be
   198  	// appended which indicates that the storage folder was unable to be added
   199  	// successfully.
   200  	defer func(sf *storageFolder) {
   201  		if err != nil {
   202  			wal.mu.Lock()
   203  			defer wal.mu.Unlock()
   204  
   205  			// Delete the storage folder from the storage folders map.
   206  			delete(wal.cm.storageFolders, sf.index)
   207  
   208  			// Remove the leftover files from the failed operation.
   209  			err = build.ComposeErrors(err, sf.sectorFile.Close())
   210  			err = build.ComposeErrors(err, sf.metadataFile.Close())
   211  			err = build.ComposeErrors(err, wal.cm.dependencies.RemoveFile(sectorLookupName))
   212  			err = build.ComposeErrors(err, wal.cm.dependencies.RemoveFile(sectorHousingName))
   213  
   214  			// Signal in the WAL that the unfinished storage folder addition
   215  			// has failed.
   216  			wal.appendChange(stateChange{
   217  				ErroredStorageFolderAdditions: []uint16{sf.index},
   218  			})
   219  		}
   220  	}(sf)
   221  
   222  	// Allocate the files on disk for the storage folder.
   223  	stepCount := sectorHousingSize / folderAllocationStepSize
   224  	for i := uint64(0); i < stepCount; i++ {
   225  		err = sf.sectorFile.Truncate(int64(folderAllocationStepSize * (i + 1)))
   226  		if err != nil {
   227  			return build.ExtendErr("could not allocate storage folder", err)
   228  		}
   229  		// After each iteration, update the progress numerator.
   230  		atomic.AddUint64(&sf.atomicProgressNumerator, folderAllocationStepSize)
   231  	}
   232  	err = sf.sectorFile.Truncate(int64(sectorHousingSize))
   233  	if err != nil {
   234  		return build.ExtendErr("could not allocate sector data file", err)
   235  	}
   236  
   237  	// Write the metadata file.
   238  	err = sf.metadataFile.Truncate(int64(sectorLookupSize))
   239  	if err != nil {
   240  		return build.ExtendErr("could not allocate sector metadata file", err)
   241  	}
   242  
   243  	// The file creation process is essentially complete at this point, report
   244  	// complete progress.
   245  	atomic.StoreUint64(&sf.atomicProgressNumerator, totalSize)
   246  
   247  	// Sync the files.
   248  	var wg sync.WaitGroup
   249  	wg.Add(2)
   250  	go func() {
   251  		defer wg.Done()
   252  		err := sf.metadataFile.Sync()
   253  		if err != nil {
   254  			wal.cm.log.Println("could not synchronize allocated sector metadata file:", err)
   255  		}
   256  	}()
   257  	go func() {
   258  		defer wg.Done()
   259  		err := sf.sectorFile.Sync()
   260  		if err != nil {
   261  			wal.cm.log.Println("could not synchronize allocated sector data file:", err)
   262  		}
   263  	}()
   264  	wg.Wait()
   265  
   266  	// TODO: Sync the directory as well (directory data changed as new files
   267  	// were added)
   268  
   269  	// Simulate power failure at this point for some testing scenarios.
   270  	if wal.cm.dependencies.Disrupt("incompleteAddStorageFolder") {
   271  		return nil
   272  	}
   273  
   274  	// Storage folder addition has completed successfully, commit the addition
   275  	// through the WAL.
   276  	wal.mu.Lock()
   277  	wal.cm.storageFolders[sf.index] = sf
   278  	wal.appendChange(stateChange{
   279  		StorageFolderAdditions: []savedStorageFolder{sf.savedStorageFolder()},
   280  	})
   281  	syncChan = wal.syncChan
   282  	wal.mu.Unlock()
   283  
   284  	// Wait to confirm the storage folder addition has completed until the WAL
   285  	// entry has synced.
   286  	<-syncChan
   287  
   288  	// Set the progress back to '0'.
   289  	atomic.StoreUint64(&sf.atomicProgressNumerator, 0)
   290  	atomic.StoreUint64(&sf.atomicProgressDenominator, 0)
   291  	return nil
   292  }
   293  
   294  // commitAddStorageFolder integrates a pending AddStorageFolder call into the
   295  // state. commitAddStorageFolder should only be called during WAL recovery.
   296  func (wal *writeAheadLog) commitAddStorageFolder(ssf savedStorageFolder) {
   297  	sf, exists := wal.cm.storageFolders[ssf.Index]
   298  	if exists {
   299  		if sf.metadataFile != nil {
   300  			sf.metadataFile.Close()
   301  		}
   302  		if sf.sectorFile != nil {
   303  			sf.sectorFile.Close()
   304  		}
   305  	}
   306  
   307  	sf = &storageFolder{
   308  		index: ssf.Index,
   309  		path:  ssf.Path,
   310  		usage: ssf.Usage,
   311  
   312  		availableSectors: make(map[sectorID]uint32),
   313  	}
   314  
   315  	var err error
   316  	sf.metadataFile, err = wal.cm.dependencies.OpenFile(filepath.Join(sf.path, metadataFile), os.O_RDWR, 0700)
   317  	if err != nil {
   318  		wal.cm.log.Println("Difficulties opening sector file for ", sf.path, ":", err)
   319  		return
   320  	}
   321  	sf.sectorFile, err = wal.cm.dependencies.OpenFile(filepath.Join(sf.path, sectorFile), os.O_RDWR, 0700)
   322  	if err != nil {
   323  		wal.cm.log.Println("Difficulties opening sector metadata file for", sf.path, ":", err)
   324  		sf.metadataFile.Close()
   325  		return
   326  	}
   327  	wal.cm.storageFolders[sf.index] = sf
   328  }
   329  
   330  // AddStorageFolder adds a storage folder to the contract manager.
   331  func (cm *ContractManager) AddStorageFolder(path string, size uint64) error {
   332  	err := cm.tg.Add()
   333  	if err != nil {
   334  		return err
   335  	}
   336  	defer cm.tg.Done()
   337  
   338  	// Check that the storage folder being added meets the size requirements.
   339  	sectors := size / modules.SectorSize
   340  	if sectors > MaximumSectorsPerStorageFolder {
   341  		return ErrLargeStorageFolder
   342  	}
   343  	if sectors < MinimumSectorsPerStorageFolder {
   344  		return ErrSmallStorageFolder
   345  	}
   346  	if sectors%storageFolderGranularity != 0 {
   347  		return errStorageFolderGranularity
   348  	}
   349  	// Check that the path is an absolute path.
   350  	if !filepath.IsAbs(path) {
   351  		return errRelativePath
   352  	}
   353  
   354  	// Check that the folder being linked to both exists and is a folder.
   355  	pathInfo, err := os.Stat(path)
   356  	if err != nil {
   357  		return err
   358  	}
   359  	if !pathInfo.Mode().IsDir() {
   360  		return errStorageFolderNotFolder
   361  	}
   362  
   363  	// Create a storage folder object and add it to the WAL.
   364  	newSF := &storageFolder{
   365  		path:  path,
   366  		usage: make([]uint64, sectors/64),
   367  
   368  		availableSectors: make(map[sectorID]uint32),
   369  	}
   370  	err = cm.wal.managedAddStorageFolder(newSF)
   371  	if err != nil {
   372  		cm.log.Println("Call to AddStorageFolder has failed:", err)
   373  		return err
   374  	}
   375  	return nil
   376  }