gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/host/contractmanager/storagefolderempty.go (about) 1 package contractmanager 2 3 import ( 4 "errors" 5 "sync" 6 "sync/atomic" 7 8 "gitlab.com/SiaPrime/SiaPrime/build" 9 "gitlab.com/SiaPrime/SiaPrime/modules" 10 ) 11 12 var ( 13 // ErrPartialRelocation is returned during an operation attempting to clear 14 // out the sectors in a storage folder if errors prevented one or more of 15 // the sectors from being properly migrated to a new storage folder. 16 ErrPartialRelocation = errors.New("unable to migrate all sectors") 17 ) 18 19 // managedMoveSector will move a sector from its current storage folder to 20 // another. 21 func (wal *writeAheadLog) managedMoveSector(id sectorID) error { 22 wal.managedLockSector(id) 23 defer wal.managedUnlockSector(id) 24 25 // Find the sector to be moved. 26 wal.mu.Lock() 27 oldLocation, exists1 := wal.cm.sectorLocations[id] 28 oldFolder, exists2 := wal.cm.storageFolders[oldLocation.storageFolder] 29 wal.mu.Unlock() 30 if !exists1 || !exists2 || atomic.LoadUint64(&oldFolder.atomicUnavailable) == 1 { 31 return errors.New("unable to find sector that is targeted for move") 32 } 33 34 // Read the sector data from disk so that it can be added correctly to a 35 // new storage folder. 36 sectorData, err := readSector(oldFolder.sectorFile, oldLocation.index) 37 if err != nil { 38 atomic.AddUint64(&oldFolder.atomicFailedReads, 1) 39 return build.ExtendErr("unable to read sector selected for migration", err) 40 } 41 atomic.AddUint64(&oldFolder.atomicSuccessfulReads, 1) 42 43 // Create the sector update that will remove the old sector. 44 oldSU := sectorUpdate{ 45 Count: 0, 46 ID: id, 47 Folder: oldLocation.storageFolder, 48 Index: oldLocation.index, 49 } 50 51 // Place the sector into its new folder and add the atomic move to the WAL. 52 wal.mu.Lock() 53 storageFolders := wal.cm.availableStorageFolders() 54 wal.mu.Unlock() 55 for len(storageFolders) >= 1 { 56 var storageFolderIndex int 57 err := func() error { 58 // NOTE: Convention is broken when working with WAL lock here, due 59 // to the complexity required with managing both the WAL lock and 60 // the storage folder lock. Pay close attention when reviewing and 61 // modifying. 62 63 // Grab a vacant storage folder. 64 wal.mu.Lock() 65 var sf *storageFolder 66 sf, storageFolderIndex = vacancyStorageFolder(storageFolders) 67 if sf == nil { 68 // None of the storage folders have enough room to house the 69 // sector. 70 wal.mu.Unlock() 71 return modules.ErrInsufficientStorageForSector 72 } 73 defer sf.mu.RUnlock() 74 75 // Grab a sector from the storage folder. WAL lock cannot be 76 // released between grabbing the storage folder and grabbing a 77 // sector lest another thread request the final available sector in 78 // the storage folder. 79 sectorIndex, err := randFreeSector(sf.usage) 80 if err != nil { 81 wal.mu.Unlock() 82 wal.cm.log.Critical("a storage folder with full usage was returned from emptiestStorageFolder") 83 return err 84 } 85 // Set the usage, but mark it as uncommitted. 86 sf.setUsage(sectorIndex) 87 sf.availableSectors[id] = sectorIndex 88 wal.mu.Unlock() 89 90 // NOTE: The usage has been set, in the event of failure the usage 91 // must be cleared. 92 93 // Try writing the new sector to disk. 94 err = writeSector(sf.sectorFile, sectorIndex, sectorData) 95 if err != nil { 96 wal.cm.log.Printf("ERROR: Unable to write sector for folder %v: %v\n", sf.path, err) 97 atomic.AddUint64(&sf.atomicFailedWrites, 1) 98 wal.mu.Lock() 99 sf.clearUsage(sectorIndex) 100 delete(sf.availableSectors, id) 101 wal.mu.Unlock() 102 return errDiskTrouble 103 } 104 105 // Try writing the sector metadata to disk. 106 su := sectorUpdate{ 107 Count: oldLocation.count, 108 ID: id, 109 Folder: sf.index, 110 Index: sectorIndex, 111 } 112 err = wal.writeSectorMetadata(sf, su) 113 if err != nil { 114 wal.cm.log.Printf("ERROR: Unable to write sector metadata for folder %v: %v\n", sf.path, err) 115 atomic.AddUint64(&sf.atomicFailedWrites, 1) 116 wal.mu.Lock() 117 sf.clearUsage(sectorIndex) 118 delete(sf.availableSectors, id) 119 wal.mu.Unlock() 120 return errDiskTrouble 121 } 122 123 // Sector added successfully, update the WAL and the state. 124 sl := sectorLocation{ 125 index: sectorIndex, 126 storageFolder: sf.index, 127 count: oldLocation.count, 128 } 129 wal.mu.Lock() 130 wal.appendChange(stateChange{ 131 SectorUpdates: []sectorUpdate{oldSU, su}, 132 }) 133 oldFolder.clearUsage(oldLocation.index) 134 delete(wal.cm.sectorLocations, oldSU.ID) 135 delete(sf.availableSectors, id) 136 wal.cm.sectorLocations[id] = sl 137 wal.mu.Unlock() 138 return nil 139 }() 140 if err == modules.ErrInsufficientStorageForSector { 141 return err 142 } else if err != nil { 143 // Try the next storage folder. 144 storageFolders = append(storageFolders[:storageFolderIndex], storageFolders[storageFolderIndex+1:]...) 145 continue 146 } 147 // Sector added successfully, break. 148 break 149 } 150 if len(storageFolders) < 1 { 151 return modules.ErrInsufficientStorageForSector 152 } 153 return nil 154 } 155 156 // managedEmptyStorageFolder will empty out the storage folder with the 157 // provided index starting with the 'startingPoint'th sector all the way to the 158 // end of the storage folder, allowing the storage folder to be safely 159 // truncated. If 'force' is set to true, the function will not give up when 160 // there is no more space available, instead choosing to lose data. 161 // 162 // This function assumes that the storage folder has already been made 163 // invisible to AddSector, and that this is the only thread that will be 164 // interacting with the storage folder. 165 func (wal *writeAheadLog) managedEmptyStorageFolder(sfIndex uint16, startingPoint uint32) (uint64, error) { 166 // Grab the storage folder in question. 167 wal.mu.Lock() 168 sf, exists := wal.cm.storageFolders[sfIndex] 169 wal.mu.Unlock() 170 if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 { 171 return 0, errBadStorageFolderIndex 172 } 173 174 // Read the sector lookup bytes into memory; we'll need them to figure out 175 // what sectors are in which locations. 176 sectorLookupBytes, err := readFullMetadata(sf.metadataFile, len(sf.usage)*storageFolderGranularity) 177 if err != nil { 178 atomic.AddUint64(&sf.atomicFailedReads, 1) 179 return 0, build.ExtendErr("unable to read sector metadata", err) 180 } 181 atomic.AddUint64(&sf.atomicSuccessfulReads, 1) 182 183 // Before iterating through the sectors and moving them, set up a thread 184 // pool that can parallelize the transfers without spinning up 250,000 185 // goroutines per TB. 186 var errCount uint64 187 var wg sync.WaitGroup 188 workers := 250 189 workChan := make(chan sectorID) 190 doneChan := make(chan struct{}) 191 for i := 0; i < workers; i++ { 192 go func() { 193 for { 194 select { 195 case id := <-workChan: 196 err := wal.managedMoveSector(id) 197 if err != nil { 198 atomic.AddUint64(&errCount, 1) 199 wal.cm.log.Println("Unable to write sector:", err) 200 } 201 wg.Done() 202 case <-doneChan: 203 return 204 } 205 } 206 }() 207 } 208 209 // Iterate through all of the sectors and perform the move operation on 210 // them. 211 readHead := startingPoint * sectorMetadataDiskSize 212 for _, usage := range sf.usage[startingPoint/storageFolderGranularity:] { 213 // The usage is a bitfield indicating where sectors exist. Iterate 214 // through each bit to check for a sector. 215 usageMask := uint64(1) 216 for j := 0; j < storageFolderGranularity; j++ { 217 // Perform a move operation if a sector exists in this location. 218 if usage&usageMask == usageMask { 219 // Fetch the id of the sector in this location. 220 var id sectorID 221 copy(id[:], sectorLookupBytes[readHead:readHead+12]) 222 // Reference the sector locations map to get the most 223 // up-to-date status for the sector. 224 wal.mu.Lock() 225 _, exists := wal.cm.sectorLocations[id] 226 wal.mu.Unlock() 227 if !exists { 228 // The sector has been deleted, but the usage has not been 229 // updated yet. Safe to ignore. 230 continue 231 } 232 233 // Queue the sector move. 234 wg.Add(1) 235 workChan <- id 236 } 237 readHead += sectorMetadataDiskSize 238 usageMask = usageMask << 1 239 } 240 } 241 wg.Wait() 242 close(doneChan) 243 244 // Return errPartialRelocation if not every sector was migrated out 245 // successfully. 246 if errCount > 0 { 247 return errCount, ErrPartialRelocation 248 } 249 return 0, nil 250 }