gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/host/contractmanager/storagefolderremove_test.go (about) 1 package contractmanager 2 3 import ( 4 "bytes" 5 "os" 6 "path/filepath" 7 "sync" 8 "testing" 9 10 "gitlab.com/SiaPrime/SiaPrime/crypto" 11 "gitlab.com/SiaPrime/SiaPrime/modules" 12 ) 13 14 // TestRemoveStorageFolder tries removing a storage folder that has no sectors 15 // in it. 16 func TestRemoveStorageFolder(t *testing.T) { 17 if testing.Short() { 18 t.SkipNow() 19 } 20 t.Parallel() 21 cmt, err := newContractManagerTester("TestRemoveStorageFolder") 22 if err != nil { 23 t.Fatal(err) 24 } 25 defer cmt.panicClose() 26 27 // Add a storage folder to the contract manager tester. 28 storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") 29 // Create the storage folder dir. 30 err = os.MkdirAll(storageFolderDir, 0700) 31 if err != nil { 32 t.Fatal(err) 33 } 34 err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*storageFolderGranularity*2) 35 if err != nil { 36 t.Fatal(err) 37 } 38 39 // Determine the index of the storage folder. 40 sfs := cmt.cm.StorageFolders() 41 if len(sfs) != 1 { 42 t.Fatal("there should be storage folder in the contract manager") 43 } 44 err = cmt.cm.RemoveStorageFolder(sfs[0].Index, false) 45 if err != nil { 46 t.Fatal(err) 47 } 48 49 // Check that the storage folder has been removed. 50 sfs = cmt.cm.StorageFolders() 51 if len(sfs) != 0 { 52 t.Fatal("Storage folder should have been removed") 53 } 54 // Check that the disk objects were removed. 55 _, err = os.Stat(filepath.Join(storageFolderDir, metadataFile)) 56 if !os.IsNotExist(err) { 57 t.Fatal("metadata file should have been removed") 58 } 59 _, err = os.Stat(filepath.Join(storageFolderDir, sectorFile)) 60 if !os.IsNotExist(err) { 61 t.Fatal("sector file should have been removed") 62 } 63 64 // Restart the contract manager to see if the storage folder is still gone. 65 err = cmt.cm.Close() 66 if err != nil { 67 t.Fatal(err) 68 } 69 // Create the new contract manager using the same persist dir, so that it 70 // will see the uncommitted WAL. 71 cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) 72 if err != nil { 73 t.Fatal(err) 74 } 75 // Check that the storage folder was properly eliminated. 76 sfs = cmt.cm.StorageFolders() 77 if len(sfs) != 0 { 78 t.Fatal("Storage folder should have been removed") 79 } 80 } 81 82 // TestRemoveStorageFolderWithSector tries removing a storage folder that has a 83 // sector in it. 84 func TestRemoveStorageFolderWithSector(t *testing.T) { 85 if testing.Short() { 86 t.SkipNow() 87 } 88 t.Parallel() 89 cmt, err := newContractManagerTester("TestRemoveStorageFolderWithSector") 90 if err != nil { 91 t.Fatal(err) 92 } 93 defer cmt.panicClose() 94 95 // Add a storage folder to the contract manager tester. 96 storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") 97 // Create the storage folder dir. 98 err = os.MkdirAll(storageFolderDir, 0700) 99 if err != nil { 100 t.Fatal(err) 101 } 102 err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*storageFolderGranularity*2) 103 if err != nil { 104 t.Fatal(err) 105 } 106 // Give the storage folder a sector. 107 root, data := randSector() 108 err = cmt.cm.AddSector(root, data) 109 if err != nil { 110 t.Fatal(err) 111 } 112 113 // Determine the index of the storage folder. 114 sfs := cmt.cm.StorageFolders() 115 if len(sfs) != 1 { 116 t.Fatal("there should be storage folder in the contract manager") 117 } 118 if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { 119 t.Fatal("there should be one sector reported in the storage folder") 120 } 121 // Verify that the data held within the storage folder is the correct data. 122 readData, err := cmt.cm.ReadSector(root) 123 if err != nil { 124 t.Fatal(err) 125 } 126 if !bytes.Equal(readData, data) { 127 t.Fatal("Reading a sector from the storage folder did not produce the right data") 128 } 129 130 // Add a second storage folder, then remove the first storage folder. 131 storageFolderTwo := filepath.Join(cmt.persistDir, "storageFolderTwo") 132 err = os.MkdirAll(storageFolderTwo, 0700) 133 if err != nil { 134 t.Fatal(err) 135 } 136 err = cmt.cm.AddStorageFolder(storageFolderTwo, modules.SectorSize*storageFolderGranularity*2) 137 if err != nil { 138 t.Fatal(err) 139 } 140 err = cmt.cm.RemoveStorageFolder(sfs[0].Index, false) 141 if err != nil { 142 t.Fatal(err) 143 } 144 145 // Check that the remaining storage folder has picked up the right sector. 146 sfs = cmt.cm.StorageFolders() 147 if len(sfs) != 1 { 148 t.Fatal("there should be storage folder in the contract manager") 149 } 150 if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { 151 t.Fatal("there should be one sector reported in the storage folder") 152 } 153 // Verify that the data held within the storage folder is the correct data. 154 readData, err = cmt.cm.ReadSector(root) 155 if err != nil { 156 t.Fatal(err) 157 } 158 if !bytes.Equal(readData, data) { 159 t.Fatal("Reading a sector from the storage folder did not produce the right data") 160 } 161 162 // Check that the disk objects were removed. 163 _, err = os.Stat(filepath.Join(storageFolderDir, metadataFile)) 164 if !os.IsNotExist(err) { 165 t.Error("metadata file should have been removed") 166 } 167 _, err = os.Stat(filepath.Join(storageFolderDir, sectorFile)) 168 if !os.IsNotExist(err) { 169 t.Error("sector file should have been removed") 170 } 171 172 // Restart the contract manager to see if the storage folder is still gone. 173 err = cmt.cm.Close() 174 if err != nil { 175 t.Fatal(err) 176 } 177 // Create the new contract manager using the same persist dir, so that it 178 // will see the uncommitted WAL. 179 cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) 180 if err != nil { 181 t.Fatal(err) 182 } 183 sfs = cmt.cm.StorageFolders() 184 if len(sfs) != 1 { 185 t.Fatal("there should be storage folder in the contract manager") 186 } 187 if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { 188 t.Fatal("there should be one sector reported in the storage folder") 189 } 190 // Verify that the data held within the storage folder is the correct data. 191 readData, err = cmt.cm.ReadSector(root) 192 if err != nil { 193 t.Fatal(err) 194 } 195 if !bytes.Equal(readData, data) { 196 t.Fatal("Reading a sector from the storage folder did not produce the right data") 197 } 198 } 199 200 // TestRemoveStorageFolderConcurrentAddSector will try removing a storage 201 // folder at the same time that sectors are being added to the contract 202 // manager. 203 func TestRemoveStorageFolderConcurrentAddSector(t *testing.T) { 204 if testing.Short() { 205 t.SkipNow() 206 } 207 t.Parallel() 208 cmt, err := newContractManagerTester("TestRemoveStorageFolderConcurrentAddSector") 209 if err != nil { 210 t.Fatal(err) 211 } 212 defer cmt.panicClose() 213 214 // Add three storage folders. 215 storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") 216 storageFolderTwo := filepath.Join(cmt.persistDir, "storageFolderTwo") 217 storageFolderThree := filepath.Join(cmt.persistDir, "storageFolderThree") 218 storageFolderFour := filepath.Join(cmt.persistDir, "storageFolderFour") 219 // Create the storage folder dir. 220 err = os.MkdirAll(storageFolderOne, 0700) 221 if err != nil { 222 t.Fatal(err) 223 } 224 err = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity) 225 if err != nil { 226 t.Fatal(err) 227 } 228 err = os.MkdirAll(storageFolderTwo, 0700) 229 if err != nil { 230 t.Fatal(err) 231 } 232 err = cmt.cm.AddStorageFolder(storageFolderTwo, modules.SectorSize*storageFolderGranularity*15) 233 if err != nil { 234 t.Fatal(err) 235 } 236 sfs := cmt.cm.StorageFolders() 237 err = os.MkdirAll(storageFolderThree, 0700) 238 if err != nil { 239 t.Fatal(err) 240 } 241 err = cmt.cm.AddStorageFolder(storageFolderThree, modules.SectorSize*storageFolderGranularity*25) 242 if err != nil { 243 t.Fatal(err) 244 } 245 246 // Run a goroutine that will continually add sectors to the contract 247 // manager. 248 var sliceLock sync.Mutex 249 var roots []crypto.Hash 250 var datas [][]byte 251 adderTerminator := make(chan struct{}) 252 var adderWG sync.WaitGroup 253 // Spin up 250 of these threads, putting load on the disk and increasing the 254 // change of complications. 255 for i := 0; i < 100; i++ { 256 adderWG.Add(1) 257 go func() { 258 for { 259 root, data := randSector() 260 err := cmt.cm.AddSector(root, data) 261 if err != nil { 262 t.Error(err) 263 } 264 sliceLock.Lock() 265 roots = append(roots, root) 266 datas = append(datas, data) 267 sliceLock.Unlock() 268 269 // See if we are done. 270 select { 271 case <-adderTerminator: 272 adderWG.Done() 273 return 274 default: 275 continue 276 } 277 } 278 }() 279 } 280 281 // Add a fourth storage folder, mostly because it takes time and guarantees 282 // that a bunch of sectors will be added to the disk. 283 err = os.MkdirAll(storageFolderFour, 0700) 284 if err != nil { 285 t.Fatal(err) 286 } 287 err = cmt.cm.AddStorageFolder(storageFolderFour, modules.SectorSize*storageFolderGranularity*50) 288 if err != nil { 289 t.Fatal(err) 290 } 291 292 // In two separate goroutines, remove storage folders one and two. 293 var wg sync.WaitGroup 294 wg.Add(2) 295 go func() { 296 defer wg.Done() 297 err := cmt.cm.RemoveStorageFolder(sfs[0].Index, false) 298 if err != nil { 299 t.Error(err) 300 } 301 }() 302 go func() { 303 defer wg.Done() 304 err := cmt.cm.RemoveStorageFolder(sfs[1].Index, false) 305 if err != nil { 306 t.Error(err) 307 } 308 }() 309 wg.Wait() 310 311 // Copy over the sectors that have been added thus far. 312 sliceLock.Lock() 313 addedRoots := make([]crypto.Hash, len(roots)) 314 addedDatas := make([][]byte, len(datas)) 315 copy(addedRoots, roots) 316 copy(addedDatas, datas) 317 sliceLock.Unlock() 318 319 // Read all of the sectors to verify that consistency is being maintained. 320 for i, root := range addedRoots { 321 data, err := cmt.cm.ReadSector(root) 322 if err != nil { 323 t.Fatal(err) 324 } 325 if !bytes.Equal(data, addedDatas[i]) { 326 t.Error("Retrieved data does not match the intended data") 327 } 328 } 329 330 // Close the adder threads and wait until all goroutines have finished up. 331 close(adderTerminator) 332 adderWG.Wait() 333 334 // Count the number of sectors total. 335 sfs = cmt.cm.StorageFolders() 336 var totalConsumed uint64 337 for _, sf := range sfs { 338 totalConsumed = totalConsumed + (sf.Capacity - sf.CapacityRemaining) 339 } 340 if totalConsumed != uint64(len(roots))*modules.SectorSize { 341 t.Error("Wrong storage folder consumption being reported.") 342 } 343 344 // Make sure that each sector is retreivable. 345 for i, root := range roots { 346 data, err := cmt.cm.ReadSector(root) 347 if err != nil { 348 t.Fatal(err) 349 } 350 if !bytes.Equal(data, datas[i]) { 351 t.Error("Retrieved data does not match the intended data") 352 } 353 } 354 355 // Restart the contract manager and verify that the changes stuck. 356 err = cmt.cm.Close() 357 if err != nil { 358 t.Fatal(err) 359 } 360 // Create the new contract manager using the same persist dir, so that it 361 // will see the uncommitted WAL. 362 cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) 363 if err != nil { 364 t.Fatal(err) 365 } 366 367 // Count the number of sectors total. 368 sfs = cmt.cm.StorageFolders() 369 totalConsumed = 0 370 for _, sf := range sfs { 371 totalConsumed = totalConsumed + (sf.Capacity - sf.CapacityRemaining) 372 } 373 if totalConsumed != uint64(len(roots))*modules.SectorSize { 374 t.Error("Wrong storage folder consumption being reported.") 375 } 376 377 // Make sure that each sector is retreivable. 378 for i, root := range roots { 379 data, err := cmt.cm.ReadSector(root) 380 if err != nil { 381 t.Fatal(err) 382 } 383 if !bytes.Equal(data, datas[i]) { 384 t.Error("Retrieved data does not match the intended data") 385 } 386 } 387 }