gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/host/contractmanager/sectorupdate.go (about)

     1  package contractmanager
     2  
     3  import (
     4  	"errors"
     5  	"sync"
     6  	"sync/atomic"
     7  
     8  	"gitlab.com/SiaPrime/SiaPrime/build"
     9  	"gitlab.com/SiaPrime/SiaPrime/crypto"
    10  	"gitlab.com/SiaPrime/SiaPrime/modules"
    11  )
    12  
    13  // commitUpdateSector will commit a sector update to the contract manager,
    14  // writing in metadata and usage info if the sector still exists, and deleting
    15  // the usage info if the sector does not exist. The update is idempotent.
    16  func (wal *writeAheadLog) commitUpdateSector(su sectorUpdate) {
    17  	sf, exists := wal.cm.storageFolders[su.Folder]
    18  	if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
    19  		wal.cm.log.Printf("ERROR: unable to locate storage folder for a committed sector update.")
    20  		return
    21  	}
    22  
    23  	// If the sector is being cleaned from disk, unset the usage flag.
    24  	if su.Count == 0 {
    25  		sf.clearUsage(su.Index)
    26  		return
    27  	}
    28  
    29  	// Set the usage flag and update the on-disk metadata. Abort if the
    30  	// metadata write fails.
    31  	err := wal.writeSectorMetadata(sf, su)
    32  	if err != nil {
    33  		wal.cm.log.Printf("ERROR: unable to write sector metadata for %v: %v\n", sf.path, err)
    34  		return
    35  	}
    36  	sf.setUsage(su.Index)
    37  }
    38  
    39  // managedAddPhysicalSector is a WAL operation to add a physical sector to the
    40  // contract manager.
    41  func (wal *writeAheadLog) managedAddPhysicalSector(id sectorID, data []byte, count uint16) error {
    42  	// Sanity check - data should have modules.SectorSize bytes.
    43  	if uint64(len(data)) != modules.SectorSize {
    44  		wal.cm.log.Critical("sector has the wrong size", modules.SectorSize, len(data))
    45  		return errors.New("malformed sector")
    46  	}
    47  
    48  	// Find a committed storage folder that has enough space to receive
    49  	// this sector. Keep trying new storage folders if some return
    50  	// errors during disk operations.
    51  	wal.mu.Lock()
    52  	storageFolders := wal.cm.availableStorageFolders()
    53  	wal.mu.Unlock()
    54  	var syncChan chan struct{}
    55  	for len(storageFolders) >= 1 {
    56  		var storageFolderIndex int
    57  		err := func() error {
    58  			// NOTE: Convention is broken when working with WAL lock here, due
    59  			// to the complexity required with managing both the WAL lock and
    60  			// the storage folder lock. Pay close attention when reviewing and
    61  			// modifying.
    62  
    63  			// Grab a vacant storage folder.
    64  			wal.mu.Lock()
    65  			var sf *storageFolder
    66  			sf, storageFolderIndex = vacancyStorageFolder(storageFolders)
    67  			if sf == nil {
    68  				// None of the storage folders have enough room to house the
    69  				// sector.
    70  				wal.mu.Unlock()
    71  				return modules.ErrInsufficientStorageForSector
    72  			}
    73  			defer sf.mu.RUnlock()
    74  
    75  			// Grab a sector from the storage folder. WAL lock cannot be
    76  			// released between grabbing the storage folder and grabbing a
    77  			// sector lest another thread request the final available sector in
    78  			// the storage folder.
    79  			sectorIndex, err := randFreeSector(sf.usage)
    80  			if err != nil {
    81  				wal.mu.Unlock()
    82  				wal.cm.log.Critical("a storage folder with full usage was returned from emptiestStorageFolder")
    83  				return err
    84  			}
    85  			// Set the usage, but mark it as uncommitted.
    86  			sf.setUsage(sectorIndex)
    87  			sf.availableSectors[id] = sectorIndex
    88  			wal.mu.Unlock()
    89  
    90  			// NOTE: The usage has been set, in the event of failure the usage
    91  			// must be cleared.
    92  
    93  			// Try writing the new sector to disk.
    94  			err = writeSector(sf.sectorFile, sectorIndex, data)
    95  			if err != nil {
    96  				wal.cm.log.Printf("ERROR: Unable to write sector for folder %v: %v\n", sf.path, err)
    97  				atomic.AddUint64(&sf.atomicFailedWrites, 1)
    98  				wal.mu.Lock()
    99  				sf.clearUsage(sectorIndex)
   100  				delete(sf.availableSectors, id)
   101  				wal.mu.Unlock()
   102  				return errDiskTrouble
   103  			}
   104  
   105  			// Try writing the sector metadata to disk.
   106  			su := sectorUpdate{
   107  				Count:  count,
   108  				ID:     id,
   109  				Folder: sf.index,
   110  				Index:  sectorIndex,
   111  			}
   112  			err = wal.writeSectorMetadata(sf, su)
   113  			if err != nil {
   114  				wal.cm.log.Printf("ERROR: Unable to write sector metadata for folder %v: %v\n", sf.path, err)
   115  				atomic.AddUint64(&sf.atomicFailedWrites, 1)
   116  				wal.mu.Lock()
   117  				sf.clearUsage(sectorIndex)
   118  				delete(sf.availableSectors, id)
   119  				wal.mu.Unlock()
   120  				return errDiskTrouble
   121  			}
   122  
   123  			// Sector added successfully, update the WAL and the state.
   124  			sl := sectorLocation{
   125  				index:         sectorIndex,
   126  				storageFolder: sf.index,
   127  				count:         count,
   128  			}
   129  			wal.mu.Lock()
   130  			wal.appendChange(stateChange{
   131  				SectorUpdates: []sectorUpdate{su},
   132  			})
   133  			delete(wal.cm.storageFolders[su.Folder].availableSectors, id)
   134  			wal.cm.sectorLocations[id] = sl
   135  			syncChan = wal.syncChan
   136  			wal.mu.Unlock()
   137  			return nil
   138  		}()
   139  		if err != nil {
   140  			// End the loop if no storage folder proved suitable.
   141  			if storageFolderIndex == -1 {
   142  				storageFolders = nil
   143  				break
   144  			}
   145  
   146  			// Remove the storage folder that failed and try the next one.
   147  			storageFolders = append(storageFolders[:storageFolderIndex], storageFolders[storageFolderIndex+1:]...)
   148  			continue
   149  		}
   150  		// Sector added successfully, break.
   151  		break
   152  	}
   153  	if len(storageFolders) < 1 {
   154  		return modules.ErrInsufficientStorageForSector
   155  	}
   156  
   157  	// Wait for the synchronize.
   158  	// sectors.
   159  	<-syncChan
   160  	return nil
   161  }
   162  
   163  // managedAddVirtualSector will add a virtual sector to the contract manager.
   164  func (wal *writeAheadLog) managedAddVirtualSector(id sectorID, location sectorLocation) error {
   165  	// Update the location count.
   166  	if location.count == 65535 {
   167  		return errMaxVirtualSectors
   168  	}
   169  	location.count++
   170  
   171  	// Prepare the sector update.
   172  	su := sectorUpdate{
   173  		Count:  location.count,
   174  		ID:     id,
   175  		Folder: location.storageFolder,
   176  		Index:  location.index,
   177  	}
   178  
   179  	// Append the sector update to the WAL.
   180  	wal.mu.Lock()
   181  	sf, exists := wal.cm.storageFolders[su.Folder]
   182  	if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
   183  		// Need to check that the storage folder exists before syncing the
   184  		// commit that increases the virtual sector count.
   185  		wal.mu.Unlock()
   186  		return errStorageFolderNotFound
   187  	}
   188  	wal.appendChange(stateChange{
   189  		SectorUpdates: []sectorUpdate{su},
   190  	})
   191  	wal.cm.sectorLocations[id] = location
   192  	syncChan := wal.syncChan
   193  	wal.mu.Unlock()
   194  	<-syncChan
   195  
   196  	// Update the metadata on disk. Metadata is updated on disk after the sync
   197  	// so that there is no risk of obliterating the previous count in the event
   198  	// that the change is not fully committed during unclean shutdown.
   199  	err := wal.writeSectorMetadata(sf, su)
   200  	if err != nil {
   201  		// Revert the sector update in the WAL to reflect the fact that adding
   202  		// the sector has failed.
   203  		su.Count--
   204  		location.count--
   205  		wal.mu.Lock()
   206  		wal.appendChange(stateChange{
   207  			SectorUpdates: []sectorUpdate{su},
   208  		})
   209  		wal.cm.sectorLocations[id] = location
   210  		wal.mu.Unlock()
   211  		<-syncChan
   212  		return build.ExtendErr("unable to write sector metadata during addSector call", err)
   213  	}
   214  	return nil
   215  }
   216  
   217  // managedDeleteSector will delete a sector (physical) from the contract manager.
   218  func (wal *writeAheadLog) managedDeleteSector(id sectorID) error {
   219  	// Write the sector delete to the WAL.
   220  	var location sectorLocation
   221  	var syncChan chan struct{}
   222  	var sf *storageFolder
   223  	err := func() error {
   224  		wal.mu.Lock()
   225  		defer wal.mu.Unlock()
   226  
   227  		// Fetch the metadata related to the sector.
   228  		var exists bool
   229  		location, exists = wal.cm.sectorLocations[id]
   230  		if !exists {
   231  			return ErrSectorNotFound
   232  		}
   233  		sf, exists = wal.cm.storageFolders[location.storageFolder]
   234  		if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
   235  			wal.cm.log.Critical("deleting a sector from a storage folder that does not exist?")
   236  			return errStorageFolderNotFound
   237  		}
   238  
   239  		// Inform the WAL of the sector update.
   240  		wal.appendChange(stateChange{
   241  			SectorUpdates: []sectorUpdate{{
   242  				Count:  0,
   243  				ID:     id,
   244  				Folder: location.storageFolder,
   245  				Index:  location.index,
   246  			}},
   247  		})
   248  
   249  		// Delete the sector and mark the usage as available.
   250  		delete(wal.cm.sectorLocations, id)
   251  		sf.availableSectors[id] = location.index
   252  
   253  		// Block until the change has been committed.
   254  		syncChan = wal.syncChan
   255  		return nil
   256  	}()
   257  	if err != nil {
   258  		return err
   259  	}
   260  	<-syncChan
   261  
   262  	// Only update the usage after the sector delete has been committed to disk
   263  	// fully.
   264  	wal.mu.Lock()
   265  	delete(sf.availableSectors, id)
   266  	sf.clearUsage(location.index)
   267  	wal.mu.Unlock()
   268  	return nil
   269  }
   270  
   271  // managedRemoveSector will remove a sector (virtual or physical) from the
   272  // contract manager.
   273  func (wal *writeAheadLog) managedRemoveSector(id sectorID) error {
   274  	// Inform the WAL of the removed sector.
   275  	var location sectorLocation
   276  	var su sectorUpdate
   277  	var sf *storageFolder
   278  	var syncChan chan struct{}
   279  	err := func() error {
   280  		wal.mu.Lock()
   281  		defer wal.mu.Unlock()
   282  
   283  		// Grab the number of virtual sectors that have been committed with
   284  		// this root.
   285  		var exists bool
   286  		location, exists = wal.cm.sectorLocations[id]
   287  		if !exists {
   288  			return ErrSectorNotFound
   289  		}
   290  		sf, exists = wal.cm.storageFolders[location.storageFolder]
   291  		if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
   292  			wal.cm.log.Critical("deleting a sector from a storage folder that does not exist?")
   293  			return errStorageFolderNotFound
   294  		}
   295  
   296  		// Inform the WAL of the sector update.
   297  		location.count--
   298  		su = sectorUpdate{
   299  			Count:  location.count,
   300  			ID:     id,
   301  			Folder: location.storageFolder,
   302  			Index:  location.index,
   303  		}
   304  		wal.appendChange(stateChange{
   305  			SectorUpdates: []sectorUpdate{su},
   306  		})
   307  
   308  		// Update the in-memeory representation of the sector.
   309  		if location.count == 0 {
   310  			// Delete the sector and mark it as available.
   311  			delete(wal.cm.sectorLocations, id)
   312  			sf.availableSectors[id] = location.index
   313  		} else {
   314  			// Reduce the sector usage.
   315  			wal.cm.sectorLocations[id] = location
   316  		}
   317  		syncChan = wal.syncChan
   318  		return nil
   319  	}()
   320  	if err != nil {
   321  		return err
   322  	}
   323  	// synchronize before updating the metadata or clearing the usage.
   324  	<-syncChan
   325  
   326  	// Update the metadata, and the usage.
   327  	if location.count != 0 {
   328  		err = wal.writeSectorMetadata(sf, su)
   329  		if err != nil {
   330  			// Revert the previous change.
   331  			wal.mu.Lock()
   332  			su.Count++
   333  			location.count++
   334  			wal.appendChange(stateChange{
   335  				SectorUpdates: []sectorUpdate{su},
   336  			})
   337  			wal.cm.sectorLocations[id] = location
   338  			wal.mu.Unlock()
   339  			return build.ExtendErr("failed to write sector metadata", err)
   340  		}
   341  	}
   342  
   343  	// Only update the usage after the sector removal has been committed to
   344  	// disk entirely. The usage is not updated until after the commit has
   345  	// completed to prevent the actual sector data from being overwritten in
   346  	// the event of unclean shutdown.
   347  	if location.count == 0 {
   348  		wal.mu.Lock()
   349  		sf.clearUsage(location.index)
   350  		delete(sf.availableSectors, id)
   351  		wal.mu.Unlock()
   352  	}
   353  	return nil
   354  }
   355  
   356  // writeSectorMetadata will take a sector update and write the related metadata
   357  // to disk.
   358  func (wal *writeAheadLog) writeSectorMetadata(sf *storageFolder, su sectorUpdate) error {
   359  	err := writeSectorMetadata(sf.metadataFile, su.Index, su.ID, su.Count)
   360  	if err != nil {
   361  		wal.cm.log.Printf("ERROR: unable to write sector metadata to folder %v when adding sector: %v\n", su.Folder, err)
   362  		atomic.AddUint64(&sf.atomicFailedWrites, 1)
   363  		return err
   364  	}
   365  	atomic.AddUint64(&sf.atomicSuccessfulWrites, 1)
   366  	return nil
   367  }
   368  
   369  // AddSector will add a sector to the contract manager.
   370  func (cm *ContractManager) AddSector(root crypto.Hash, sectorData []byte) error {
   371  	// Prevent shutdown until this function completes.
   372  	err := cm.tg.Add()
   373  	if err != nil {
   374  		return err
   375  	}
   376  	defer cm.tg.Done()
   377  
   378  	// Hold a sector lock throughout the duration of the function, but release
   379  	// before syncing.
   380  	id := cm.managedSectorID(root)
   381  	cm.wal.managedLockSector(id)
   382  	defer cm.wal.managedUnlockSector(id)
   383  
   384  	// Determine whether the sector is virtual or physical.
   385  	cm.wal.mu.Lock()
   386  	location, exists := cm.sectorLocations[id]
   387  	cm.wal.mu.Unlock()
   388  	if exists {
   389  		err = cm.wal.managedAddVirtualSector(id, location)
   390  	} else {
   391  		err = cm.wal.managedAddPhysicalSector(id, sectorData, 1)
   392  	}
   393  	if err != nil {
   394  		cm.log.Println("ERROR: Unable to add sector:", err)
   395  		return err
   396  	}
   397  	return nil
   398  }
   399  
   400  // AddSectorBatch is a non-ACID call to add a bunch of sectors at once.
   401  // Necessary for compatibility with old renters.
   402  //
   403  // TODO: Make ACID, and definitely improve the performance as well.
   404  func (cm *ContractManager) AddSectorBatch(sectorRoots []crypto.Hash) error {
   405  	// Prevent shutdown until this function completes.
   406  	err := cm.tg.Add()
   407  	if err != nil {
   408  		return err
   409  	}
   410  	defer cm.tg.Done()
   411  
   412  	go func() {
   413  		// Ensure only 'maxSectorBatchThreads' goroutines are running at a time.
   414  		semaphore := make(chan struct{}, maxSectorBatchThreads)
   415  		for _, root := range sectorRoots {
   416  			semaphore <- struct{}{}
   417  			go func(root crypto.Hash) {
   418  				defer func() {
   419  					<-semaphore
   420  				}()
   421  
   422  				// Hold a sector lock throughout the duration of the function, but release
   423  				// before syncing.
   424  				id := cm.managedSectorID(root)
   425  				cm.wal.managedLockSector(id)
   426  				defer cm.wal.managedUnlockSector(id)
   427  
   428  				// Add the sector as virtual.
   429  				cm.wal.mu.Lock()
   430  				location, exists := cm.sectorLocations[id]
   431  				cm.wal.mu.Unlock()
   432  				if exists {
   433  					cm.wal.managedAddVirtualSector(id, location)
   434  				}
   435  			}(root)
   436  		}
   437  	}()
   438  	return nil
   439  }
   440  
   441  // DeleteSector will delete a sector from the contract manager. If multiple
   442  // copies of the sector exist, all of them will be removed. This should only be
   443  // used to remove offensive data, as it will cause corruption in the contract
   444  // manager. This corruption puts the contract manager at risk of failing
   445  // storage proofs. If the amount of data removed is small, the risk is small.
   446  // This operation will not destabilize the contract manager.
   447  func (cm *ContractManager) DeleteSector(root crypto.Hash) error {
   448  	cm.tg.Add()
   449  	defer cm.tg.Done()
   450  	id := cm.managedSectorID(root)
   451  	cm.wal.managedLockSector(id)
   452  	defer cm.wal.managedUnlockSector(id)
   453  
   454  	return cm.wal.managedDeleteSector(id)
   455  }
   456  
   457  // RemoveSector will remove a sector from the contract manager. If multiple
   458  // copies of the sector exist, only one will be removed.
   459  func (cm *ContractManager) RemoveSector(root crypto.Hash) error {
   460  	cm.tg.Add()
   461  	defer cm.tg.Done()
   462  	id := cm.managedSectorID(root)
   463  	cm.wal.managedLockSector(id)
   464  	defer cm.wal.managedUnlockSector(id)
   465  
   466  	return cm.wal.managedRemoveSector(id)
   467  }
   468  
   469  // RemoveSectorBatch is a non-ACID call to remove a bunch of sectors at once.
   470  // Necessary for compatibility with old renters.
   471  //
   472  // TODO: Make ACID, and definitely improve the performance as well.
   473  func (cm *ContractManager) RemoveSectorBatch(sectorRoots []crypto.Hash) error {
   474  	// Prevent shutdown until this function completes.
   475  	err := cm.tg.Add()
   476  	if err != nil {
   477  		return err
   478  	}
   479  	defer cm.tg.Done()
   480  
   481  	// Add each sector in a separate goroutine.
   482  	var wg sync.WaitGroup
   483  	// Ensure only 'maxSectorBatchThreads' goroutines are running at a time.
   484  	semaphore := make(chan struct{}, maxSectorBatchThreads)
   485  	for _, root := range sectorRoots {
   486  		wg.Add(1)
   487  		semaphore <- struct{}{}
   488  		go func(root crypto.Hash) {
   489  			id := cm.managedSectorID(root)
   490  			cm.wal.managedLockSector(id)
   491  			cm.wal.managedRemoveSector(id) // Error is ignored.
   492  			cm.wal.managedUnlockSector(id)
   493  			<-semaphore
   494  			wg.Done()
   495  		}(root)
   496  	}
   497  	wg.Wait()
   498  	return nil
   499  }