github.com/nebulouslabs/sia@v1.3.7/modules/host/contractmanager/storagefolderempty.go (about) 1 package contractmanager 2 3 import ( 4 "errors" 5 "sync" 6 "sync/atomic" 7 8 "github.com/NebulousLabs/Sia/build" 9 ) 10 11 var ( 12 // ErrPartialRelocation is returned during an operation attempting to clear 13 // out the sectors in a storage folder if errors prevented one or more of 14 // the sectors from being properly migrated to a new storage folder. 15 ErrPartialRelocation = errors.New("unable to migrate all sectors") 16 ) 17 18 // managedMoveSector will move a sector from its current storage folder to 19 // another. 20 func (wal *writeAheadLog) managedMoveSector(id sectorID) error { 21 wal.managedLockSector(id) 22 defer wal.managedUnlockSector(id) 23 24 // Find the sector to be moved. 25 wal.mu.Lock() 26 oldLocation, exists1 := wal.cm.sectorLocations[id] 27 oldFolder, exists2 := wal.cm.storageFolders[oldLocation.storageFolder] 28 wal.mu.Unlock() 29 if !exists1 || !exists2 || atomic.LoadUint64(&oldFolder.atomicUnavailable) == 1 { 30 return errors.New("unable to find sector that is targeted for move") 31 } 32 33 // Read the sector data from disk so that it can be added correctly to a 34 // new storage folder. 35 sectorData, err := readSector(oldFolder.sectorFile, oldLocation.index) 36 if err != nil { 37 atomic.AddUint64(&oldFolder.atomicFailedReads, 1) 38 return build.ExtendErr("unable to read sector selected for migration", err) 39 } 40 atomic.AddUint64(&oldFolder.atomicSuccessfulReads, 1) 41 42 // Create the sector update that will remove the old sector. 43 oldSU := sectorUpdate{ 44 Count: 0, 45 ID: id, 46 Folder: oldLocation.storageFolder, 47 Index: oldLocation.index, 48 } 49 50 // Place the sector into its new folder and add the atomic move to the WAL. 51 wal.mu.Lock() 52 storageFolders := wal.cm.availableStorageFolders() 53 wal.mu.Unlock() 54 for len(storageFolders) >= 1 { 55 var storageFolderIndex int 56 err := func() error { 57 // NOTE: Convention is broken when working with WAL lock here, due 58 // to the complexity required with managing both the WAL lock and 59 // the storage folder lock. Pay close attention when reviewing and 60 // modifying. 61 62 // Grab a vacant storage folder. 63 wal.mu.Lock() 64 var sf *storageFolder 65 sf, storageFolderIndex = vacancyStorageFolder(storageFolders) 66 if sf == nil { 67 // None of the storage folders have enough room to house the 68 // sector. 69 wal.mu.Unlock() 70 return errInsufficientStorageForSector 71 } 72 defer sf.mu.RUnlock() 73 74 // Grab a sector from the storage folder. WAL lock cannot be 75 // released between grabbing the storage folder and grabbing a 76 // sector lest another thread request the final available sector in 77 // the storage folder. 78 sectorIndex, err := randFreeSector(sf.usage) 79 if err != nil { 80 wal.mu.Unlock() 81 wal.cm.log.Critical("a storage folder with full usage was returned from emptiestStorageFolder") 82 return err 83 } 84 // Set the usage, but mark it as uncommitted. 85 sf.setUsage(sectorIndex) 86 sf.availableSectors[id] = sectorIndex 87 wal.mu.Unlock() 88 89 // NOTE: The usage has been set, in the event of failure the usage 90 // must be cleared. 91 92 // Try writing the new sector to disk. 93 err = writeSector(sf.sectorFile, sectorIndex, sectorData) 94 if err != nil { 95 wal.cm.log.Printf("ERROR: Unable to write sector for folder %v: %v\n", sf.path, err) 96 atomic.AddUint64(&sf.atomicFailedWrites, 1) 97 wal.mu.Lock() 98 sf.clearUsage(sectorIndex) 99 delete(sf.availableSectors, id) 100 wal.mu.Unlock() 101 return errDiskTrouble 102 } 103 104 // Try writing the sector metadata to disk. 105 su := sectorUpdate{ 106 Count: oldLocation.count, 107 ID: id, 108 Folder: sf.index, 109 Index: sectorIndex, 110 } 111 err = wal.writeSectorMetadata(sf, su) 112 if err != nil { 113 wal.cm.log.Printf("ERROR: Unable to write sector metadata for folder %v: %v\n", sf.path, err) 114 atomic.AddUint64(&sf.atomicFailedWrites, 1) 115 wal.mu.Lock() 116 sf.clearUsage(sectorIndex) 117 delete(sf.availableSectors, id) 118 wal.mu.Unlock() 119 return errDiskTrouble 120 } 121 122 // Sector added successfully, update the WAL and the state. 123 sl := sectorLocation{ 124 index: sectorIndex, 125 storageFolder: sf.index, 126 count: oldLocation.count, 127 } 128 wal.mu.Lock() 129 wal.appendChange(stateChange{ 130 SectorUpdates: []sectorUpdate{oldSU, su}, 131 }) 132 oldFolder.clearUsage(oldLocation.index) 133 delete(wal.cm.sectorLocations, oldSU.ID) 134 delete(sf.availableSectors, id) 135 wal.cm.sectorLocations[id] = sl 136 wal.mu.Unlock() 137 return nil 138 }() 139 if err == errInsufficientStorageForSector { 140 return err 141 } else if err != nil { 142 // Try the next storage folder. 143 storageFolders = append(storageFolders[:storageFolderIndex], storageFolders[storageFolderIndex+1:]...) 144 continue 145 } 146 // Sector added successfully, break. 147 break 148 } 149 if len(storageFolders) < 1 { 150 return errInsufficientStorageForSector 151 } 152 return nil 153 } 154 155 // managedEmptyStorageFolder will empty out the storage folder with the 156 // provided index starting with the 'startingPoint'th sector all the way to the 157 // end of the storage folder, allowing the storage folder to be safely 158 // truncated. If 'force' is set to true, the function will not give up when 159 // there is no more space available, instead choosing to lose data. 160 // 161 // This function assumes that the storage folder has already been made 162 // invisible to AddSector, and that this is the only thread that will be 163 // interacting with the storage folder. 164 func (wal *writeAheadLog) managedEmptyStorageFolder(sfIndex uint16, startingPoint uint32) (uint64, error) { 165 // Grab the storage folder in question. 166 wal.mu.Lock() 167 sf, exists := wal.cm.storageFolders[sfIndex] 168 wal.mu.Unlock() 169 if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 { 170 return 0, errBadStorageFolderIndex 171 } 172 173 // Read the sector lookup bytes into memory; we'll need them to figure out 174 // what sectors are in which locations. 175 sectorLookupBytes, err := readFullMetadata(sf.metadataFile, len(sf.usage)*storageFolderGranularity) 176 if err != nil { 177 atomic.AddUint64(&sf.atomicFailedReads, 1) 178 return 0, build.ExtendErr("unable to read sector metadata", err) 179 } 180 atomic.AddUint64(&sf.atomicSuccessfulReads, 1) 181 182 // Before iterating through the sectors and moving them, set up a thread 183 // pool that can parallelize the transfers without spinning up 250,000 184 // goroutines per TB. 185 var errCount uint64 186 var wg sync.WaitGroup 187 workers := 250 188 workChan := make(chan sectorID) 189 doneChan := make(chan struct{}) 190 for i := 0; i < workers; i++ { 191 go func() { 192 for { 193 select { 194 case id := <-workChan: 195 err := wal.managedMoveSector(id) 196 if err != nil { 197 atomic.AddUint64(&errCount, 1) 198 wal.cm.log.Println("Unable to write sector:", err) 199 } 200 wg.Done() 201 case <-doneChan: 202 return 203 } 204 } 205 }() 206 } 207 208 // Iterate through all of the sectors and perform the move operation on 209 // them. 210 readHead := startingPoint * sectorMetadataDiskSize 211 for _, usage := range sf.usage[startingPoint/storageFolderGranularity:] { 212 // The usage is a bitfield indicating where sectors exist. Iterate 213 // through each bit to check for a sector. 214 usageMask := uint64(1) 215 for j := 0; j < storageFolderGranularity; j++ { 216 // Perform a move operation if a sector exists in this location. 217 if usage&usageMask == usageMask { 218 // Fetch the id of the sector in this location. 219 var id sectorID 220 copy(id[:], sectorLookupBytes[readHead:readHead+12]) 221 // Reference the sector locations map to get the most 222 // up-to-date status for the sector. 223 wal.mu.Lock() 224 _, exists := wal.cm.sectorLocations[id] 225 wal.mu.Unlock() 226 if !exists { 227 // The sector has been deleted, but the usage has not been 228 // updated yet. Safe to ignore. 229 continue 230 } 231 232 // Queue the sector move. 233 wg.Add(1) 234 workChan <- id 235 } 236 readHead += sectorMetadataDiskSize 237 usageMask = usageMask << 1 238 } 239 } 240 wg.Wait() 241 close(doneChan) 242 243 // Return errPartialRelocation if not every sector was migrated out 244 // successfully. 245 if errCount > 0 { 246 return errCount, ErrPartialRelocation 247 } 248 return 0, nil 249 }