gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/host/contractmanager/storagefoldergrow.go (about)

     1  package contractmanager
     2  
     3  import (
     4  	"errors"
     5  	"sync"
     6  	"sync/atomic"
     7  
     8  	"gitlab.com/SiaPrime/SiaPrime/build"
     9  	"gitlab.com/SiaPrime/SiaPrime/modules"
    10  )
    11  
    12  type (
    13  	// storageFolderExtension is the data saved to the WAL to indicate that a
    14  	// storage folder has been extended successfully.
    15  	storageFolderExtension struct {
    16  		Index          uint16
    17  		NewSectorCount uint32
    18  	}
    19  
    20  	// unfinishedStorageFolderExtension contains the data necessary to reverse
    21  	// a storage folder extension that has failed.
    22  	unfinishedStorageFolderExtension struct {
    23  		Index          uint16
    24  		OldSectorCount uint32
    25  	}
    26  )
    27  
    28  // findUnfinishedStorageFolderExtensions will scroll through a set of state
    29  // changes as pull out all of the storage folder extensions which have not yet
    30  // completed.
    31  func findUnfinishedStorageFolderExtensions(scs []stateChange) []unfinishedStorageFolderExtension {
    32  	// Use a map to figure out what unfinished storage folder extensions exist
    33  	// and use it to remove the ones that have terminated.
    34  	usfeMap := make(map[uint16]unfinishedStorageFolderExtension)
    35  	for _, sc := range scs {
    36  		for _, usfe := range sc.UnfinishedStorageFolderExtensions {
    37  			usfeMap[usfe.Index] = usfe
    38  		}
    39  		for _, sfe := range sc.StorageFolderExtensions {
    40  			delete(usfeMap, sfe.Index)
    41  		}
    42  		for _, index := range sc.ErroredStorageFolderExtensions {
    43  			delete(usfeMap, index)
    44  		}
    45  		for _, sfr := range sc.StorageFolderRemovals {
    46  			delete(usfeMap, sfr.Index)
    47  		}
    48  	}
    49  
    50  	// Return the active unifinished storage folder extensions as a slice.
    51  	usfes := make([]unfinishedStorageFolderExtension, 0, len(usfeMap))
    52  	for _, usfe := range usfeMap {
    53  		usfes = append(usfes, usfe)
    54  	}
    55  	return usfes
    56  }
    57  
    58  // cleanupUnfinishedStorageFolderExtensions will reset any unsuccessful storage
    59  // folder extensions from the previous run.
    60  func (wal *writeAheadLog) cleanupUnfinishedStorageFolderExtensions(scs []stateChange) {
    61  	usfes := findUnfinishedStorageFolderExtensions(scs)
    62  	for _, usfe := range usfes {
    63  		sf, exists := wal.cm.storageFolders[usfe.Index]
    64  		if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
    65  			wal.cm.log.Critical("unfinished storage folder extension exists where the storage folder does not exist")
    66  			continue
    67  		}
    68  
    69  		// Truncate the files back to their original size.
    70  		err := sf.metadataFile.Truncate(int64(len(sf.usage) * storageFolderGranularity * sectorMetadataDiskSize))
    71  		if err != nil {
    72  			wal.cm.log.Printf("Error: unable to truncate metadata file as storage folder %v is resized\n", sf.path)
    73  		}
    74  		err = sf.sectorFile.Truncate(int64(modules.SectorSize * storageFolderGranularity * uint64(len(sf.usage))))
    75  		if err != nil {
    76  			wal.cm.log.Printf("Error: unable to truncate sector file as storage folder %v is resized\n", sf.path)
    77  		}
    78  
    79  		// Append an error call to the changeset, indicating that the storage
    80  		// folder add was not completed successfully.
    81  		wal.appendChange(stateChange{
    82  			ErroredStorageFolderExtensions: []uint16{sf.index},
    83  		})
    84  	}
    85  }
    86  
    87  // commitStorageFolderExtension will apply a storage folder extension to the
    88  // state.
    89  func (wal *writeAheadLog) commitStorageFolderExtension(sfe storageFolderExtension) {
    90  	sf, exists := wal.cm.storageFolders[sfe.Index]
    91  	if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
    92  		wal.cm.log.Critical("ERROR: storage folder extension provided for storage folder that does not exist")
    93  		return
    94  	}
    95  
    96  	newUsageSize := sfe.NewSectorCount / storageFolderGranularity
    97  	appendUsage := make([]uint64, int(newUsageSize)-len(sf.usage))
    98  	sf.usage = append(sf.usage, appendUsage...)
    99  }
   100  
   101  // growStorageFolder will extend the storage folder files so that they may hold
   102  // more sectors.
   103  func (wal *writeAheadLog) growStorageFolder(index uint16, newSectorCount uint32) error {
   104  	// Retrieve the specified storage folder.
   105  	wal.mu.Lock()
   106  	sf, exists := wal.cm.storageFolders[index]
   107  	wal.mu.Unlock()
   108  	if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
   109  		return errStorageFolderNotFound
   110  	}
   111  
   112  	// Lock the storage folder for the duration of the operation.
   113  	sf.mu.Lock()
   114  	defer sf.mu.Unlock()
   115  
   116  	// Write the intention to increase the storage folder size to the WAL,
   117  	// providing enough information to allow a truncation if the growing fails.
   118  	wal.mu.Lock()
   119  	wal.appendChange(stateChange{
   120  		UnfinishedStorageFolderExtensions: []unfinishedStorageFolderExtension{{
   121  			Index:          index,
   122  			OldSectorCount: uint32(len(sf.usage)) * storageFolderGranularity,
   123  		}},
   124  	})
   125  	syncChan := wal.syncChan
   126  	wal.mu.Unlock()
   127  	<-syncChan
   128  
   129  	// Prepare variables for growing the storage folder.
   130  	currentHousingSize := int64(len(sf.usage)) * int64(modules.SectorSize) * storageFolderGranularity
   131  	currentMetadataSize := int64(len(sf.usage)) * sectorMetadataDiskSize * storageFolderGranularity
   132  	newHousingSize := int64(newSectorCount) * int64(modules.SectorSize)
   133  	newMetadataSize := int64(newSectorCount) * sectorMetadataDiskSize
   134  	if newHousingSize <= currentHousingSize || newMetadataSize <= currentMetadataSize {
   135  		wal.cm.log.Critical("growStorageFolder called without size increase", newHousingSize, currentHousingSize, newMetadataSize, currentMetadataSize)
   136  		return errors.New("unable to make the requested change, please notify the devs that there is a bug")
   137  	}
   138  	housingWriteSize := newHousingSize - currentHousingSize
   139  	metadataWriteSize := newMetadataSize - currentMetadataSize
   140  
   141  	// If there's an error in the rest of the function, reset the storage
   142  	// folders to their original size.
   143  	var err error
   144  	defer func(sf *storageFolder, housingSize, metadataSize int64) {
   145  		if err != nil {
   146  			wal.mu.Lock()
   147  			defer wal.mu.Unlock()
   148  
   149  			// Remove the leftover files from the failed operation.
   150  			err = build.ComposeErrors(err, sf.metadataFile.Truncate(housingSize))
   151  			err = build.ComposeErrors(err, sf.sectorFile.Truncate(metadataSize))
   152  
   153  			// Signal in the WAL that the unfinished storage folder addition
   154  			// has failed.
   155  			wal.appendChange(stateChange{
   156  				ErroredStorageFolderExtensions: []uint16{sf.index},
   157  			})
   158  		}
   159  	}(sf, currentMetadataSize, currentHousingSize)
   160  
   161  	// Extend the sector file and metadata file on disk.
   162  	atomic.StoreUint64(&sf.atomicProgressDenominator, uint64(housingWriteSize+metadataWriteSize))
   163  
   164  	stepCount := housingWriteSize / folderAllocationStepSize
   165  	for i := int64(0); i < stepCount; i++ {
   166  		err = sf.sectorFile.Truncate(currentHousingSize + (folderAllocationStepSize * (i + 1)))
   167  		if err != nil {
   168  			return build.ExtendErr("could not allocate storage folder", err)
   169  		}
   170  		// After each iteration, update the progress numerator.
   171  		atomic.AddUint64(&sf.atomicProgressNumerator, folderAllocationStepSize)
   172  	}
   173  	err = sf.sectorFile.Truncate(currentHousingSize + housingWriteSize)
   174  	if err != nil {
   175  		return build.ExtendErr("could not allocate sector data file", err)
   176  	}
   177  
   178  	// Write the metadata file.
   179  	err = sf.metadataFile.Truncate(currentMetadataSize + metadataWriteSize)
   180  	if err != nil {
   181  		return build.ExtendErr("could not allocate sector metadata file", err)
   182  	}
   183  
   184  	// The file creation process is essentially complete at this point, report
   185  	// complete progress.
   186  	atomic.StoreUint64(&sf.atomicProgressNumerator, uint64(housingWriteSize+metadataWriteSize))
   187  
   188  	// Sync the files.
   189  	var err1, err2 error
   190  	var wg sync.WaitGroup
   191  	wg.Add(2)
   192  	go func() {
   193  		defer wg.Done()
   194  		err1 = sf.metadataFile.Sync()
   195  		if err != nil {
   196  			wal.cm.log.Println("could not synchronize allocated sector metadata file:", err)
   197  		}
   198  	}()
   199  	go func() {
   200  		defer wg.Done()
   201  		err2 = sf.sectorFile.Sync()
   202  		if err != nil {
   203  			wal.cm.log.Println("could not synchronize allocated sector data file:", err)
   204  		}
   205  	}()
   206  	wg.Wait()
   207  	if err1 != nil || err2 != nil {
   208  		err = build.ComposeErrors(err1, err2)
   209  		wal.cm.log.Println("cound not synchronize storage folder extensions:", err)
   210  		return build.ExtendErr("unable to synchronize storage folder extensions", err)
   211  	}
   212  
   213  	// Simulate power failure at this point for some testing scenarios.
   214  	if wal.cm.dependencies.Disrupt("incompleteGrowStorageFolder") {
   215  		return nil
   216  	}
   217  
   218  	// Storage folder growth has completed successfully, commit through the
   219  	// WAL.
   220  	wal.mu.Lock()
   221  	wal.cm.storageFolders[sf.index] = sf
   222  	wal.appendChange(stateChange{
   223  		StorageFolderExtensions: []storageFolderExtension{{
   224  			Index:          sf.index,
   225  			NewSectorCount: newSectorCount,
   226  		}},
   227  	})
   228  	syncChan = wal.syncChan
   229  	wal.mu.Unlock()
   230  
   231  	// Wait to confirm the storage folder addition has completed until the WAL
   232  	// entry has synced.
   233  	<-syncChan
   234  
   235  	// Set the progress back to '0'.
   236  	atomic.StoreUint64(&sf.atomicProgressNumerator, 0)
   237  	atomic.StoreUint64(&sf.atomicProgressDenominator, 0)
   238  	return nil
   239  }