gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/host/contractmanager/sector.go (about)

     1  package contractmanager
     2  
     3  import (
     4  	"encoding/binary"
     5  	"errors"
     6  	"sync"
     7  	"sync/atomic"
     8  
     9  	"gitlab.com/SiaPrime/SiaPrime/build"
    10  	"gitlab.com/SiaPrime/SiaPrime/crypto"
    11  	"gitlab.com/SiaPrime/SiaPrime/modules"
    12  )
    13  
    14  var (
    15  	// errDiskTrouble is returned when the host is supposed to have enough
    16  	// storage to hold a new sector but failures that are likely related to the
    17  	// disk have prevented the host from successfully adding the sector.
    18  	errDiskTrouble = errors.New("host unable to add sector despite having the storage capacity to do so")
    19  
    20  	// errMaxVirtualSectors is returned when a sector cannot be added because
    21  	// the maximum number of virtual sectors for that sector id already exist.
    22  	errMaxVirtualSectors = errors.New("sector collides with a physical sector that already has the maximum allowed number of virtual sectors")
    23  
    24  	// ErrSectorNotFound is returned when a lookup for a sector fails.
    25  	ErrSectorNotFound = errors.New("could not find the desired sector")
    26  )
    27  
    28  // sectorLocation indicates the location of a sector on disk.
    29  type (
    30  	sectorID [12]byte
    31  
    32  	sectorLocation struct {
    33  		// index indicates the index of the sector's location within the storage
    34  		// folder.
    35  		index uint32
    36  
    37  		// storageFolder indicates the index of the storage folder that the sector
    38  		// is stored on.
    39  		storageFolder uint16
    40  
    41  		// count indicates the number of virtual sectors represented by the
    42  		// physical sector described by this object. A maximum of 2^16 virtual
    43  		// sectors are allowed for each sector. Proper use by the renter should
    44  		// mean that the host never has more than 3 virtual sectors for any sector.
    45  		count uint16
    46  	}
    47  
    48  	// sectorLock contains a lock plus a count of the number of threads
    49  	// currently waiting to access the lock.
    50  	sectorLock struct {
    51  		waiting int
    52  		mu      sync.Mutex
    53  	}
    54  )
    55  
    56  // readSector will read the sector in the file, starting from the provided
    57  // location.
    58  func readSector(f modules.File, sectorIndex uint32) ([]byte, error) {
    59  	b := make([]byte, modules.SectorSize)
    60  	_, err := f.ReadAt(b, int64(uint64(sectorIndex)*modules.SectorSize))
    61  	if err != nil {
    62  		return nil, build.ExtendErr("unable to read within storage folder", err)
    63  	}
    64  	return b, nil
    65  }
    66  
    67  // readFullMetadata will read a full sector metadata file into memory.
    68  func readFullMetadata(f modules.File, numSectors int) ([]byte, error) {
    69  	sectorLookupBytes := make([]byte, numSectors*sectorMetadataDiskSize)
    70  	_, err := f.ReadAt(sectorLookupBytes, 0)
    71  	if err != nil {
    72  		return nil, build.ExtendErr("unable to read metadata file for target storage folder", err)
    73  	}
    74  	return sectorLookupBytes, nil
    75  }
    76  
    77  // writeSector will write the given sector into the given file at the given
    78  // index.
    79  func writeSector(f modules.File, sectorIndex uint32, data []byte) error {
    80  	_, err := f.WriteAt(data, int64(uint64(sectorIndex)*modules.SectorSize))
    81  	if err != nil {
    82  		return build.ExtendErr("unable to write within provided file", err)
    83  	}
    84  	return nil
    85  }
    86  
    87  // writeSectorMetadata will take a sector update and write the related metadata
    88  // to disk.
    89  func writeSectorMetadata(f modules.File, sectorIndex uint32, id sectorID, count uint16) error {
    90  	writeData := make([]byte, sectorMetadataDiskSize)
    91  	copy(writeData, id[:])
    92  	binary.LittleEndian.PutUint16(writeData[12:], count)
    93  	_, err := f.WriteAt(writeData, sectorMetadataDiskSize*int64(sectorIndex))
    94  	if err != nil {
    95  		return build.ExtendErr("unable to write in given file", err)
    96  	}
    97  	return nil
    98  }
    99  
   100  // sectorID returns the id that should be used when referring to a sector.
   101  // There are lots of sectors, and to minimize their footprint a reduced size
   102  // hash is used. Hashes are typically 256bits to provide collision resistance
   103  // when an attacker can perform orders of magnitude more than a billion trials
   104  // per second. When attacking the host sector ids though, the attacker can only
   105  // do one trial per sector upload, and even then has minimal means to learn
   106  // whether or not a collision was successfully achieved. Hash length can safely
   107  // be reduced from 32 bytes to 12 bytes, which has a collision resistance of
   108  // 2^48. The host however is unlikely to be storing 2^48 sectors, which would
   109  // be an exabyte of data.
   110  func (cm *ContractManager) managedSectorID(sectorRoot crypto.Hash) (id sectorID) {
   111  	saltedRoot := crypto.HashAll(sectorRoot, cm.sectorSalt)
   112  	copy(id[:], saltedRoot[:])
   113  	return id
   114  }
   115  
   116  // ReadSector will read a sector from the storage manager, returning the bytes
   117  // that match the input sector root.
   118  func (cm *ContractManager) ReadSector(root crypto.Hash) ([]byte, error) {
   119  	err := cm.tg.Add()
   120  	if err != nil {
   121  		return nil, err
   122  	}
   123  	defer cm.tg.Done()
   124  	id := cm.managedSectorID(root)
   125  	cm.wal.managedLockSector(id)
   126  	defer cm.wal.managedUnlockSector(id)
   127  
   128  	// Fetch the sector metadata.
   129  	cm.wal.mu.Lock()
   130  	sl, exists1 := cm.sectorLocations[id]
   131  	sf, exists2 := cm.storageFolders[sl.storageFolder]
   132  	cm.wal.mu.Unlock()
   133  	if !exists1 {
   134  		return nil, ErrSectorNotFound
   135  	}
   136  	if !exists2 {
   137  		cm.log.Critical("Unable to load storage folder despite having sector metadata")
   138  		return nil, ErrSectorNotFound
   139  	}
   140  	if atomic.LoadUint64(&sf.atomicUnavailable) == 1 {
   141  		// TODO: Pick a new error instead.
   142  		return nil, ErrSectorNotFound
   143  	}
   144  
   145  	// Read the sector.
   146  	sectorData, err := readSector(sf.sectorFile, sl.index)
   147  	if err != nil {
   148  		atomic.AddUint64(&sf.atomicFailedReads, 1)
   149  		return nil, build.ExtendErr("unable to fetch sector", err)
   150  	}
   151  	atomic.AddUint64(&sf.atomicSuccessfulReads, 1)
   152  	return sectorData, nil
   153  }
   154  
   155  // managedLockSector grabs a sector lock.
   156  func (wal *writeAheadLog) managedLockSector(id sectorID) {
   157  	wal.mu.Lock()
   158  	sl, exists := wal.cm.lockedSectors[id]
   159  	if exists {
   160  		sl.waiting++
   161  	} else {
   162  		sl = &sectorLock{
   163  			waiting: 1,
   164  		}
   165  		wal.cm.lockedSectors[id] = sl
   166  	}
   167  	wal.mu.Unlock()
   168  
   169  	// Block until the sector is available.
   170  	sl.mu.Lock()
   171  }
   172  
   173  // managedUnlockSector releases a sector lock.
   174  func (wal *writeAheadLog) managedUnlockSector(id sectorID) {
   175  	wal.mu.Lock()
   176  	defer wal.mu.Unlock()
   177  
   178  	// Release the lock on the sector.
   179  	sl, exists := wal.cm.lockedSectors[id]
   180  	if !exists {
   181  		wal.cm.log.Critical("Unlock of sector that is not locked.")
   182  		return
   183  	}
   184  	sl.waiting--
   185  	sl.mu.Unlock()
   186  
   187  	// If nobody else is trying to lock the sector, perform garbage collection.
   188  	if sl.waiting == 0 {
   189  		delete(wal.cm.lockedSectors, id)
   190  	}
   191  }