github.com/nebulouslabs/sia@v1.3.7/modules/host/contractmanager/sector.go (about)

     1  package contractmanager
     2  
     3  import (
     4  	"encoding/binary"
     5  	"errors"
     6  	"sync"
     7  	"sync/atomic"
     8  
     9  	"github.com/NebulousLabs/Sia/build"
    10  	"github.com/NebulousLabs/Sia/crypto"
    11  	"github.com/NebulousLabs/Sia/modules"
    12  )
    13  
    14  var (
    15  	// errDiskTrouble is returned when the host is supposed to have enough
    16  	// storage to hold a new sector but failures that are likely related to the
    17  	// disk have prevented the host from successfully adding the sector.
    18  	errDiskTrouble = errors.New("host unable to add sector despite having the storage capacity to do so")
    19  
    20  	// errInsufficientStorageForSector is returned if the host tries to add a
    21  	// sector when there is not enough storage remaining on the host to accept
    22  	// the sector.
    23  	//
    24  	// Ideally, the host will adjust pricing as the host starts to fill up, so
    25  	// this error should be pretty rare. Demand should drive the price up
    26  	// faster than the Host runs out of space, such that the host is always
    27  	// hovering around 95% capacity and rarely over 98% or under 90% capacity.
    28  	errInsufficientStorageForSector = errors.New("not enough storage remaining to accept sector")
    29  
    30  	// errMaxVirtualSectors is returned when a sector cannot be added because
    31  	// the maximum number of virtual sectors for that sector id already exist.
    32  	errMaxVirtualSectors = errors.New("sector collides with a physical sector that already has the maximum allowed number of virtual sectors")
    33  
    34  	// ErrSectorNotFound is returned when a lookup for a sector fails.
    35  	ErrSectorNotFound = errors.New("could not find the desired sector")
    36  )
    37  
    38  // sectorLocation indicates the location of a sector on disk.
    39  type (
    40  	sectorID [12]byte
    41  
    42  	sectorLocation struct {
    43  		// index indicates the index of the sector's location within the storage
    44  		// folder.
    45  		index uint32
    46  
    47  		// storageFolder indicates the index of the storage folder that the sector
    48  		// is stored on.
    49  		storageFolder uint16
    50  
    51  		// count indicates the number of virtual sectors represented by the
    52  		// physical sector described by this object. A maximum of 2^16 virtual
    53  		// sectors are allowed for each sector. Proper use by the renter should
    54  		// mean that the host never has more than 3 virtual sectors for any sector.
    55  		count uint16
    56  	}
    57  
    58  	// sectorLock contains a lock plus a count of the number of threads
    59  	// currently waiting to access the lock.
    60  	sectorLock struct {
    61  		waiting int
    62  		mu      sync.Mutex
    63  	}
    64  )
    65  
    66  // readSector will read the sector in the file, starting from the provided
    67  // location.
    68  func readSector(f modules.File, sectorIndex uint32) ([]byte, error) {
    69  	b := make([]byte, modules.SectorSize)
    70  	_, err := f.ReadAt(b, int64(uint64(sectorIndex)*modules.SectorSize))
    71  	if err != nil {
    72  		return nil, build.ExtendErr("unable to read within storage folder", err)
    73  	}
    74  	return b, nil
    75  }
    76  
    77  // readFullMetadata will read a full sector metadata file into memory.
    78  func readFullMetadata(f modules.File, numSectors int) ([]byte, error) {
    79  	sectorLookupBytes := make([]byte, numSectors*sectorMetadataDiskSize)
    80  	_, err := f.ReadAt(sectorLookupBytes, 0)
    81  	if err != nil {
    82  		return nil, build.ExtendErr("unable to read metadata file for target storage folder", err)
    83  	}
    84  	return sectorLookupBytes, nil
    85  }
    86  
    87  // writeSector will write the given sector into the given file at the given
    88  // index.
    89  func writeSector(f modules.File, sectorIndex uint32, data []byte) error {
    90  	_, err := f.WriteAt(data, int64(uint64(sectorIndex)*modules.SectorSize))
    91  	if err != nil {
    92  		return build.ExtendErr("unable to write within provided file", err)
    93  	}
    94  	return nil
    95  }
    96  
    97  // writeSectorMetadata will take a sector update and write the related metadata
    98  // to disk.
    99  func writeSectorMetadata(f modules.File, sectorIndex uint32, id sectorID, count uint16) error {
   100  	writeData := make([]byte, sectorMetadataDiskSize)
   101  	copy(writeData, id[:])
   102  	binary.LittleEndian.PutUint16(writeData[12:], count)
   103  	_, err := f.WriteAt(writeData, sectorMetadataDiskSize*int64(sectorIndex))
   104  	if err != nil {
   105  		return build.ExtendErr("unable to write in given file", err)
   106  	}
   107  	return nil
   108  }
   109  
   110  // sectorID returns the id that should be used when referring to a sector.
   111  // There are lots of sectors, and to minimize their footprint a reduced size
   112  // hash is used. Hashes are typically 256bits to provide collision resistance
   113  // when an attacker can perform orders of magnitude more than a billion trials
   114  // per second. When attacking the host sector ids though, the attacker can only
   115  // do one trial per sector upload, and even then has minimal means to learn
   116  // whether or not a collision was successfully achieved. Hash length can safely
   117  // be reduced from 32 bytes to 12 bytes, which has a collision resistance of
   118  // 2^48. The host however is unlikely to be storing 2^48 sectors, which would
   119  // be an exabyte of data.
   120  func (cm *ContractManager) managedSectorID(sectorRoot crypto.Hash) (id sectorID) {
   121  	saltedRoot := crypto.HashAll(sectorRoot, cm.sectorSalt)
   122  	copy(id[:], saltedRoot[:])
   123  	return id
   124  }
   125  
   126  // ReadSector will read a sector from the storage manager, returning the bytes
   127  // that match the input sector root.
   128  func (cm *ContractManager) ReadSector(root crypto.Hash) ([]byte, error) {
   129  	err := cm.tg.Add()
   130  	if err != nil {
   131  		return nil, err
   132  	}
   133  	defer cm.tg.Done()
   134  	id := cm.managedSectorID(root)
   135  	cm.wal.managedLockSector(id)
   136  	defer cm.wal.managedUnlockSector(id)
   137  
   138  	// Fetch the sector metadata.
   139  	cm.wal.mu.Lock()
   140  	sl, exists1 := cm.sectorLocations[id]
   141  	sf, exists2 := cm.storageFolders[sl.storageFolder]
   142  	cm.wal.mu.Unlock()
   143  	if !exists1 {
   144  		return nil, ErrSectorNotFound
   145  	}
   146  	if !exists2 {
   147  		cm.log.Critical("Unable to load storage folder despite having sector metadata")
   148  		return nil, ErrSectorNotFound
   149  	}
   150  	if atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
   151  		// TODO: Pick a new error instead.
   152  		return nil, ErrSectorNotFound
   153  	}
   154  
   155  	// Read the sector.
   156  	sectorData, err := readSector(sf.sectorFile, sl.index)
   157  	if err != nil {
   158  		atomic.AddUint64(&sf.atomicFailedReads, 1)
   159  		return nil, build.ExtendErr("unable to fetch sector", err)
   160  	}
   161  	atomic.AddUint64(&sf.atomicSuccessfulReads, 1)
   162  	return sectorData, nil
   163  }
   164  
   165  // managedLockSector grabs a sector lock.
   166  func (wal *writeAheadLog) managedLockSector(id sectorID) {
   167  	wal.mu.Lock()
   168  	sl, exists := wal.cm.lockedSectors[id]
   169  	if exists {
   170  		sl.waiting++
   171  	} else {
   172  		sl = &sectorLock{
   173  			waiting: 1,
   174  		}
   175  		wal.cm.lockedSectors[id] = sl
   176  	}
   177  	wal.mu.Unlock()
   178  
   179  	// Block until the sector is available.
   180  	sl.mu.Lock()
   181  }
   182  
   183  // managedUnlockSector releases a sector lock.
   184  func (wal *writeAheadLog) managedUnlockSector(id sectorID) {
   185  	wal.mu.Lock()
   186  	defer wal.mu.Unlock()
   187  
   188  	// Release the lock on the sector.
   189  	sl, exists := wal.cm.lockedSectors[id]
   190  	if !exists {
   191  		wal.cm.log.Critical("Unlock of sector that is not locked.")
   192  		return
   193  	}
   194  	sl.waiting--
   195  	sl.mu.Unlock()
   196  
   197  	// If nobody else is trying to lock the sector, perform garbage collection.
   198  	if sl.waiting == 0 {
   199  		delete(wal.cm.lockedSectors, id)
   200  	}
   201  }