github.com/ZuluSpl0it/Sia@v1.3.7/siatest/renter/renter_test.go (about) 1 package renter 2 3 import ( 4 "fmt" 5 "io" 6 "math" 7 "math/big" 8 "os" 9 "path/filepath" 10 "reflect" 11 "sort" 12 "strconv" 13 "sync" 14 "testing" 15 "time" 16 17 "github.com/NebulousLabs/Sia/build" 18 "github.com/NebulousLabs/Sia/crypto" 19 "github.com/NebulousLabs/Sia/modules" 20 "github.com/NebulousLabs/Sia/modules/renter" 21 "github.com/NebulousLabs/Sia/node" 22 "github.com/NebulousLabs/Sia/node/api" 23 "github.com/NebulousLabs/Sia/node/api/client" 24 "github.com/NebulousLabs/Sia/siatest" 25 "github.com/NebulousLabs/Sia/types" 26 27 "github.com/NebulousLabs/errors" 28 "github.com/NebulousLabs/fastrand" 29 ) 30 31 // TestRenter executes a number of subtests using the same TestGroup to 32 // save time on initialization 33 func TestRenter(t *testing.T) { 34 if testing.Short() { 35 t.SkipNow() 36 } 37 t.Parallel() 38 39 // Create a group for the subtests 40 groupParams := siatest.GroupParams{ 41 Hosts: 5, 42 Renters: 1, 43 Miners: 1, 44 } 45 tg, err := siatest.NewGroupFromTemplate(groupParams) 46 if err != nil { 47 t.Fatal("Failed to create group: ", err) 48 } 49 defer func() { 50 if err := tg.Close(); err != nil { 51 t.Fatal(err) 52 } 53 }() 54 55 // Specify subtests to run 56 subTests := []struct { 57 name string 58 test func(*testing.T, *siatest.TestGroup) 59 }{ 60 {"TestClearDownloadHistory", testClearDownloadHistory}, 61 {"TestDownloadAfterRenew", testDownloadAfterRenew}, 62 {"TestDownloadMultipleLargeSectors", testDownloadMultipleLargeSectors}, 63 {"TestLocalRepair", testLocalRepair}, 64 {"TestRemoteRepair", testRemoteRepair}, 65 {"TestSingleFileGet", testSingleFileGet}, 66 {"TestStreamingCache", testStreamingCache}, 67 {"TestUploadDownload", testUploadDownload}, 68 } 69 // Run subtests 70 for _, subtest := range subTests { 71 t.Run(subtest.name, func(t *testing.T) { 72 subtest.test(t, tg) 73 }) 74 } 75 } 76 77 // testClearDownloadHistory makes sure that the download history is 78 // properly cleared when called through the API 79 func testClearDownloadHistory(t *testing.T, tg *siatest.TestGroup) { 80 // Grab the first of the group's renters 81 r := tg.Renters()[0] 82 83 rdg, err := r.RenterDownloadsGet() 84 if err != nil { 85 t.Fatal("Could not get download history:", err) 86 } 87 numDownloads := 10 88 if len(rdg.Downloads) < numDownloads { 89 remainingDownloads := numDownloads - len(rdg.Downloads) 90 rf, err := r.RenterFilesGet() 91 if err != nil { 92 t.Fatal(err) 93 } 94 // Check if the renter has any files 95 // Upload a file if none 96 if len(rf.Files) == 0 { 97 dataPieces := uint64(1) 98 parityPieces := uint64(1) 99 fileSize := 100 + siatest.Fuzz() 100 _, _, err := r.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) 101 if err != nil { 102 t.Fatal("Failed to upload a file for testing: ", err) 103 } 104 rf, err = r.RenterFilesGet() 105 if err != nil { 106 t.Fatal(err) 107 } 108 } 109 // Download files to build download history 110 dest := filepath.Join(siatest.SiaTestingDir, strconv.Itoa(fastrand.Intn(math.MaxInt32))) 111 for i := 0; i < remainingDownloads; i++ { 112 err = r.RenterDownloadGet(rf.Files[0].SiaPath, dest, 0, rf.Files[0].Filesize, false) 113 if err != nil { 114 t.Fatal("Could not Download file:", err) 115 } 116 } 117 rdg, err = r.RenterDownloadsGet() 118 if err != nil { 119 t.Fatal("Could not get download history:", err) 120 } 121 // Confirm download history is not empty 122 if len(rdg.Downloads) != numDownloads { 123 t.Fatalf("Not all downloads added to download history: only %v downloads added, expected %v", len(rdg.Downloads), numDownloads) 124 } 125 } 126 numDownloads = len(rdg.Downloads) 127 128 // Check removing one download from history 129 // Remove First Download 130 timestamp := rdg.Downloads[0].StartTime 131 err = r.RenterClearDownloadsRangePost(timestamp, timestamp) 132 if err != nil { 133 t.Fatal("Error in API endpoint to remove download from history:", err) 134 } 135 numDownloads-- 136 rdg, err = r.RenterDownloadsGet() 137 if err != nil { 138 t.Fatal("Could not get download history:", err) 139 } 140 if len(rdg.Downloads) != numDownloads { 141 t.Fatalf("Download history not reduced: history has %v downloads, expected %v", len(rdg.Downloads), numDownloads) 142 } 143 i := sort.Search(len(rdg.Downloads), func(i int) bool { return rdg.Downloads[i].StartTime.Equal(timestamp) }) 144 if i < len(rdg.Downloads) { 145 t.Fatal("Specified download not removed from history") 146 } 147 // Remove Last Download 148 timestamp = rdg.Downloads[len(rdg.Downloads)-1].StartTime 149 err = r.RenterClearDownloadsRangePost(timestamp, timestamp) 150 if err != nil { 151 t.Fatal("Error in API endpoint to remove download from history:", err) 152 } 153 numDownloads-- 154 rdg, err = r.RenterDownloadsGet() 155 if err != nil { 156 t.Fatal("Could not get download history:", err) 157 } 158 if len(rdg.Downloads) != numDownloads { 159 t.Fatalf("Download history not reduced: history has %v downloads, expected %v", len(rdg.Downloads), numDownloads) 160 } 161 i = sort.Search(len(rdg.Downloads), func(i int) bool { return rdg.Downloads[i].StartTime.Equal(timestamp) }) 162 if i < len(rdg.Downloads) { 163 t.Fatal("Specified download not removed from history") 164 } 165 166 // Check Clear Before 167 timestamp = rdg.Downloads[len(rdg.Downloads)-2].StartTime 168 err = r.RenterClearDownloadsBeforePost(timestamp) 169 if err != nil { 170 t.Fatal("Error in API endpoint to clear download history before timestamp:", err) 171 } 172 rdg, err = r.RenterDownloadsGet() 173 if err != nil { 174 t.Fatal("Could not get download history:", err) 175 } 176 i = sort.Search(len(rdg.Downloads), func(i int) bool { return rdg.Downloads[i].StartTime.Before(timestamp) }) 177 if i < len(rdg.Downloads) { 178 t.Fatal("Download found that was before given time") 179 } 180 181 // Check Clear After 182 timestamp = rdg.Downloads[1].StartTime 183 err = r.RenterClearDownloadsAfterPost(timestamp) 184 if err != nil { 185 t.Fatal("Error in API endpoint to clear download history after timestamp:", err) 186 } 187 rdg, err = r.RenterDownloadsGet() 188 if err != nil { 189 t.Fatal("Could not get download history:", err) 190 } 191 i = sort.Search(len(rdg.Downloads), func(i int) bool { return rdg.Downloads[i].StartTime.After(timestamp) }) 192 if i < len(rdg.Downloads) { 193 t.Fatal("Download found that was after given time") 194 } 195 196 // Check clear range 197 before := rdg.Downloads[1].StartTime 198 after := rdg.Downloads[len(rdg.Downloads)-1].StartTime 199 err = r.RenterClearDownloadsRangePost(after, before) 200 if err != nil { 201 t.Fatal("Error in API endpoint to remove range of downloads from history:", err) 202 } 203 rdg, err = r.RenterDownloadsGet() 204 if err != nil { 205 t.Fatal("Could not get download history:", err) 206 } 207 i = sort.Search(len(rdg.Downloads), func(i int) bool { 208 return rdg.Downloads[i].StartTime.Before(before) && rdg.Downloads[i].StartTime.After(after) 209 }) 210 if i < len(rdg.Downloads) { 211 t.Fatal("Not all downloads from range removed from history") 212 } 213 214 // Check clearing download history 215 err = r.RenterClearAllDownloadsPost() 216 if err != nil { 217 t.Fatal("Error in API endpoint to clear download history:", err) 218 } 219 rdg, err = r.RenterDownloadsGet() 220 if err != nil { 221 t.Fatal("Could not get download history:", err) 222 } 223 if len(rdg.Downloads) != 0 { 224 t.Fatalf("Download history not cleared: history has %v downloads, expected 0", len(rdg.Downloads)) 225 } 226 } 227 228 // testDownloadAfterRenew makes sure that we can still download a file 229 // after the contract period has ended. 230 func testDownloadAfterRenew(t *testing.T, tg *siatest.TestGroup) { 231 // Grab the first of the group's renters 232 renter := tg.Renters()[0] 233 // Upload file, creating a piece for each host in the group 234 dataPieces := uint64(1) 235 parityPieces := uint64(len(tg.Hosts())) - dataPieces 236 fileSize := 100 + siatest.Fuzz() 237 _, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) 238 if err != nil { 239 t.Fatal("Failed to upload a file for testing: ", err) 240 } 241 // Mine enough blocks for the next period to start. This means the 242 // contracts should be renewed and the data should still be available for 243 // download. 244 miner := tg.Miners()[0] 245 for i := types.BlockHeight(0); i < siatest.DefaultAllowance.Period; i++ { 246 if err := miner.MineBlock(); err != nil { 247 t.Fatal(err) 248 } 249 } 250 // Download the file synchronously directly into memory. 251 _, err = renter.DownloadByStream(remoteFile) 252 if err != nil { 253 t.Fatal(err) 254 } 255 } 256 257 // testDownloadMultipleLargeSectors downloads multiple large files (>5 Sectors) 258 // in parallel and makes sure that the downloads are blocking each other. 259 func testDownloadMultipleLargeSectors(t *testing.T, tg *siatest.TestGroup) { 260 // parallelDownloads is the number of downloads that are run in parallel. 261 parallelDownloads := 10 262 // fileSize is the size of the downloaded file. 263 fileSize := int(10*modules.SectorSize) + siatest.Fuzz() 264 // set download limits and reset them after test. 265 // uniqueRemoteFiles is the number of files that will be uploaded to the 266 // network. Downloads will choose the remote file to download randomly. 267 uniqueRemoteFiles := 5 268 // Grab the first of the group's renters 269 renter := tg.Renters()[0] 270 // set download limits and reset them after test. 271 if err := renter.RenterPostRateLimit(int64(fileSize)*2, 0); err != nil { 272 t.Fatal("failed to set renter bandwidth limit", err) 273 } 274 defer func() { 275 if err := renter.RenterPostRateLimit(0, 0); err != nil { 276 t.Error("failed to reset renter bandwidth limit", err) 277 } 278 }() 279 280 // Upload files 281 dataPieces := uint64(len(tg.Hosts())) - 1 282 parityPieces := uint64(1) 283 remoteFiles := make([]*siatest.RemoteFile, 0, uniqueRemoteFiles) 284 for i := 0; i < uniqueRemoteFiles; i++ { 285 _, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) 286 if err != nil { 287 t.Fatal("Failed to upload a file for testing: ", err) 288 } 289 remoteFiles = append(remoteFiles, remoteFile) 290 } 291 292 // Randomly download using download to file and download to stream methods. 293 wg := new(sync.WaitGroup) 294 for i := 0; i < parallelDownloads; i++ { 295 wg.Add(1) 296 go func() { 297 var err error 298 var rf = remoteFiles[fastrand.Intn(len(remoteFiles))] 299 if fastrand.Intn(2) == 0 { 300 _, err = renter.DownloadByStream(rf) 301 } else { 302 _, err = renter.DownloadToDisk(rf, false) 303 } 304 if err != nil { 305 t.Error("Download failed:", err) 306 } 307 wg.Done() 308 }() 309 } 310 wg.Wait() 311 } 312 313 // testLocalRepair tests if a renter correctly repairs a file from disk 314 // after a host goes offline. 315 func testLocalRepair(t *testing.T, tg *siatest.TestGroup) { 316 // Grab the first of the group's renters 317 renter := tg.Renters()[0] 318 319 // Check that we have enough hosts for this test. 320 if len(tg.Hosts()) < 2 { 321 t.Fatal("This test requires at least 2 hosts") 322 } 323 324 // Set fileSize and redundancy for upload 325 fileSize := int(modules.SectorSize) 326 dataPieces := uint64(1) 327 parityPieces := uint64(len(tg.Hosts())) - dataPieces 328 329 // Upload file 330 _, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) 331 if err != nil { 332 t.Fatal(err) 333 } 334 // Get the file info of the fully uploaded file. Tha way we can compare the 335 // redundancies later. 336 fi, err := renter.FileInfo(remoteFile) 337 if err != nil { 338 t.Fatal("failed to get file info", err) 339 } 340 341 // Take down one of the hosts and check if redundancy decreases. 342 if err := tg.RemoveNode(tg.Hosts()[0]); err != nil { 343 t.Fatal("Failed to shutdown host", err) 344 } 345 expectedRedundancy := float64(dataPieces+parityPieces-1) / float64(dataPieces) 346 if err := renter.WaitForDecreasingRedundancy(remoteFile, expectedRedundancy); err != nil { 347 t.Fatal("Redundancy isn't decreasing", err) 348 } 349 // We should still be able to download 350 if _, err := renter.DownloadByStream(remoteFile); err != nil { 351 t.Fatal("Failed to download file", err) 352 } 353 // Bring up a new host and check if redundancy increments again. 354 _, err = tg.AddNodes(node.HostTemplate) 355 if err != nil { 356 t.Fatal("Failed to create a new host", err) 357 } 358 if err := renter.WaitForUploadRedundancy(remoteFile, fi.Redundancy); err != nil { 359 t.Fatal("File wasn't repaired", err) 360 } 361 // We should be able to download 362 if _, err := renter.DownloadByStream(remoteFile); err != nil { 363 t.Fatal("Failed to download file", err) 364 } 365 } 366 367 // testRemoteRepair tests if a renter correctly repairs a file by 368 // downloading it after a host goes offline. 369 func testRemoteRepair(t *testing.T, tg *siatest.TestGroup) { 370 // Grab the first of the group's renters 371 r := tg.Renters()[0] 372 373 // Check that we have enough hosts for this test. 374 if len(tg.Hosts()) < 2 { 375 t.Fatal("This test requires at least 2 hosts") 376 } 377 378 // Set fileSize and redundancy for upload 379 fileSize := int(modules.SectorSize) 380 dataPieces := uint64(1) 381 parityPieces := uint64(len(tg.Hosts())) - dataPieces 382 383 // Upload file 384 localFile, remoteFile, err := r.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) 385 if err != nil { 386 t.Fatal(err) 387 } 388 // Get the file info of the fully uploaded file. Tha way we can compare the 389 // redundancieslater. 390 fi, err := r.FileInfo(remoteFile) 391 if err != nil { 392 t.Fatal("failed to get file info", err) 393 } 394 395 // Delete the file locally. 396 if err := localFile.Delete(); err != nil { 397 t.Fatal("failed to delete local file", err) 398 } 399 400 // Take down all of the parity hosts and check if redundancy decreases. 401 for i := uint64(0); i < parityPieces; i++ { 402 if err := tg.RemoveNode(tg.Hosts()[0]); err != nil { 403 t.Fatal("Failed to shutdown host", err) 404 } 405 } 406 expectedRedundancy := float64(dataPieces+parityPieces-1) / float64(dataPieces) 407 if err := r.WaitForDecreasingRedundancy(remoteFile, expectedRedundancy); err != nil { 408 t.Fatal("Redundancy isn't decreasing", err) 409 } 410 // We should still be able to download 411 if _, err := r.DownloadByStream(remoteFile); err != nil { 412 t.Fatal("Failed to download file", err) 413 } 414 // Bring up new parity hosts and check if redundancy increments again. 415 _, err = tg.AddNodeN(node.HostTemplate, int(parityPieces)) 416 if err != nil { 417 t.Fatal("Failed to create a new host", err) 418 } 419 // When doing remote repair the redundancy might not reach 100%. 420 expectedRedundancy = (1.0 - renter.RemoteRepairDownloadThreshold) * fi.Redundancy 421 if err := r.WaitForUploadRedundancy(remoteFile, expectedRedundancy); err != nil { 422 t.Fatal("File wasn't repaired", err) 423 } 424 // We should be able to download 425 if _, err := r.DownloadByStream(remoteFile); err != nil { 426 t.Fatal("Failed to download file", err) 427 } 428 } 429 430 // testSingleFileGet is a subtest that uses an existing TestGroup to test if 431 // using the single file API endpoint works 432 func testSingleFileGet(t *testing.T, tg *siatest.TestGroup) { 433 // Grab the first of the group's renters 434 renter := tg.Renters()[0] 435 // Upload file, creating a piece for each host in the group 436 dataPieces := uint64(1) 437 parityPieces := uint64(len(tg.Hosts())) - dataPieces 438 fileSize := 100 + siatest.Fuzz() 439 _, _, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) 440 if err != nil { 441 t.Fatal("Failed to upload a file for testing: ", err) 442 } 443 444 files, err := renter.Files() 445 if err != nil { 446 t.Fatal("Failed to get renter files: ", err) 447 } 448 449 var file modules.FileInfo 450 for _, f := range files { 451 file, err = renter.File(f.SiaPath) 452 if err != nil { 453 t.Fatal("Failed to request single file", err) 454 } 455 if file != f { 456 t.Fatal("Single file queries does not match file previously requested.") 457 } 458 } 459 } 460 461 // testStreamingCache checks if the chunk cache works correctly. 462 func testStreamingCache(t *testing.T, tg *siatest.TestGroup) { 463 // Grab the first of the group's renters 464 r := tg.Renters()[0] 465 466 // Testing setting StreamCacheSize for streaming 467 // Test setting it to larger than the defaultCacheSize 468 if err := r.RenterSetStreamCacheSizePost(4); err != nil { 469 t.Fatal(err, "Could not set StreamCacheSize to 4") 470 } 471 rg, err := r.RenterGet() 472 if err != nil { 473 t.Fatal(err) 474 } 475 if rg.Settings.StreamCacheSize != 4 { 476 t.Fatal("StreamCacheSize not set to 4, set to", rg.Settings.StreamCacheSize) 477 } 478 479 // Test resetting to the value of defaultStreamCacheSize (2) 480 if err := r.RenterSetStreamCacheSizePost(2); err != nil { 481 t.Fatal(err, "Could not set StreamCacheSize to 2") 482 } 483 rg, err = r.RenterGet() 484 if err != nil { 485 t.Fatal(err) 486 } 487 if rg.Settings.StreamCacheSize != 2 { 488 t.Fatal("StreamCacheSize not set to 2, set to", rg.Settings.StreamCacheSize) 489 } 490 491 prev := rg.Settings.StreamCacheSize 492 493 // Test setting to 0 494 if err := r.RenterSetStreamCacheSizePost(0); err == nil { 495 t.Fatal(err, "expected setting stream cache size to zero to fail with an error") 496 } 497 rg, err = r.RenterGet() 498 if err != nil { 499 t.Fatal(err) 500 } 501 if rg.Settings.StreamCacheSize == 0 { 502 t.Fatal("StreamCacheSize set to 0, should have stayed as previous value or", prev) 503 } 504 505 // Set fileSize and redundancy for upload 506 dataPieces := uint64(1) 507 parityPieces := uint64(len(tg.Hosts())) - dataPieces 508 509 // Set the bandwidth limit to 1 chunk per second. 510 pieceSize := modules.SectorSize - crypto.TwofishOverhead 511 chunkSize := int64(pieceSize * dataPieces) 512 if err := r.RenterPostRateLimit(chunkSize, chunkSize); err != nil { 513 t.Fatal(err) 514 } 515 516 rg, err = r.RenterGet() 517 if err != nil { 518 t.Fatal(err) 519 } 520 if rg.Settings.MaxDownloadSpeed != chunkSize { 521 t.Fatal(errors.New("MaxDownloadSpeed doesn't match value set through RenterPostRateLimit")) 522 } 523 if rg.Settings.MaxUploadSpeed != chunkSize { 524 t.Fatal(errors.New("MaxUploadSpeed doesn't match value set through RenterPostRateLimit")) 525 } 526 527 // Upload a file that is a single chunk big. 528 _, remoteFile, err := r.UploadNewFileBlocking(int(chunkSize), dataPieces, parityPieces) 529 if err != nil { 530 t.Fatal(err) 531 } 532 533 // Download the same chunk 250 times. This should take at least 250 seconds 534 // without caching but not more than 30 with caching. 535 start := time.Now() 536 for i := 0; i < 250; i++ { 537 if _, err := r.Stream(remoteFile); err != nil { 538 t.Fatal(err) 539 } 540 if time.Since(start) > time.Second*30 { 541 t.Fatal("download took longer than 30 seconds") 542 } 543 } 544 } 545 546 // testUploadDownload is a subtest that uses an existing TestGroup to test if 547 // uploading and downloading a file works 548 func testUploadDownload(t *testing.T, tg *siatest.TestGroup) { 549 // Grab the first of the group's renters 550 renter := tg.Renters()[0] 551 // Upload file, creating a piece for each host in the group 552 dataPieces := uint64(1) 553 parityPieces := uint64(len(tg.Hosts())) - dataPieces 554 fileSize := 100 + siatest.Fuzz() 555 localFile, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) 556 if err != nil { 557 t.Fatal("Failed to upload a file for testing: ", err) 558 } 559 // Download the file synchronously directly into memory 560 _, err = renter.DownloadByStream(remoteFile) 561 if err != nil { 562 t.Fatal(err) 563 } 564 // Download the file synchronously to a file on disk 565 _, err = renter.DownloadToDisk(remoteFile, false) 566 if err != nil { 567 t.Fatal(err) 568 } 569 // Download the file asynchronously and wait for the download to finish. 570 localFile, err = renter.DownloadToDisk(remoteFile, true) 571 if err != nil { 572 t.Error(err) 573 } 574 if err := renter.WaitForDownload(localFile, remoteFile); err != nil { 575 t.Error(err) 576 } 577 // Stream the file. 578 _, err = renter.Stream(remoteFile) 579 if err != nil { 580 t.Fatal(err) 581 } 582 // Stream the file partially a few times. At least 1 byte is streamed. 583 for i := 0; i < 5; i++ { 584 from := fastrand.Intn(fileSize - 1) // [0..fileSize-2] 585 to := from + 1 + fastrand.Intn(fileSize-from-1) // [from+1..fileSize-1] 586 _, err = renter.StreamPartial(remoteFile, localFile, uint64(from), uint64(to)) 587 if err != nil { 588 t.Fatal(err) 589 } 590 } 591 } 592 593 // TestRenterInterrupt executes a number of subtests using the same TestGroup to 594 // save time on initialization 595 func TestRenterInterrupt(t *testing.T) { 596 if testing.Short() { 597 t.SkipNow() 598 } 599 t.Parallel() 600 601 // Create a group for the subtests 602 groupParams := siatest.GroupParams{ 603 Hosts: 5, 604 Miners: 1, 605 } 606 tg, err := siatest.NewGroupFromTemplate(groupParams) 607 if err != nil { 608 t.Fatal("Failed to create group: ", err) 609 } 610 defer func() { 611 if err := tg.Close(); err != nil { 612 t.Fatal(err) 613 } 614 }() 615 616 // Download sub tests 617 subTests := []struct { 618 name string 619 test func(*testing.T, *siatest.TestGroup) 620 }{ 621 {"TestDownloadInterruptedAfterSendingRevision", testDownloadInterruptedAfterSendingRevision}, 622 {"TestDownloadInterruptedBeforeSendingRevision", testDownloadInterruptedBeforeSendingRevision}, 623 {"TestUploadInterruptedAfterSendingRevision", testUploadInterruptedAfterSendingRevision}, 624 {"TestUploadInterruptedBeforeSendingRevision", testUploadInterruptedBeforeSendingRevision}, 625 } 626 // Run subtests 627 for _, subtest := range subTests { 628 t.Run(subtest.name, func(t *testing.T) { 629 subtest.test(t, tg) 630 }) 631 } 632 } 633 634 // testDownloadInterruptedAfterSendingRevision runs testDownloadInterrupted with 635 // a dependency that interrupts the download after sending the signed revision 636 // to the host. 637 func testDownloadInterruptedAfterSendingRevision(t *testing.T, tg *siatest.TestGroup) { 638 testDownloadInterrupted(t, tg, newDependencyInterruptDownloadAfterSendingRevision()) 639 } 640 641 // testDownloadInterruptedBeforeSendingRevision runs testDownloadInterrupted 642 // with a dependency that interrupts the download before sending the signed 643 // revision to the host. 644 func testDownloadInterruptedBeforeSendingRevision(t *testing.T, tg *siatest.TestGroup) { 645 testDownloadInterrupted(t, tg, newDependencyInterruptDownloadBeforeSendingRevision()) 646 } 647 648 // testUploadInterruptedAfterSendingRevision runs testUploadInterrupted with a 649 // dependency that interrupts the upload after sending the signed revision to 650 // the host. 651 func testUploadInterruptedAfterSendingRevision(t *testing.T, tg *siatest.TestGroup) { 652 testUploadInterrupted(t, tg, newDependencyInterruptUploadAfterSendingRevision()) 653 } 654 655 // testUploadInterruptedBeforeSendingRevision runs testUploadInterrupted with a 656 // dependency that interrupts the upload before sending the signed revision to 657 // the host. 658 func testUploadInterruptedBeforeSendingRevision(t *testing.T, tg *siatest.TestGroup) { 659 testUploadInterrupted(t, tg, newDependencyInterruptUploadBeforeSendingRevision()) 660 } 661 662 // testDownloadInterrupted interrupts a download using the provided dependencies. 663 func testDownloadInterrupted(t *testing.T, tg *siatest.TestGroup, deps *siatest.DependencyInterruptOnceOnKeyword) { 664 // Add Renter 665 testDir, err := siatest.TestDir(t.Name()) 666 if err != nil { 667 t.Fatal(err) 668 } 669 renterTemplate := node.Renter(testDir + "/renter") 670 renterTemplate.ContractSetDeps = deps 671 nodes, err := tg.AddNodes(renterTemplate) 672 if err != nil { 673 t.Fatal(err) 674 } 675 676 // Set the bandwidth limit to 1 chunk per second. 677 renter := nodes[0] 678 dataPieces := uint64(len(tg.Hosts())) - 1 679 parityPieces := uint64(1) 680 chunkSize := siatest.ChunkSize(uint64(dataPieces)) 681 _, remoteFile, err := renter.UploadNewFileBlocking(int(chunkSize), dataPieces, parityPieces) 682 if err != nil { 683 t.Fatal(err) 684 } 685 686 // Set the bandwidth limit to 1 chunk per second. 687 if err := renter.RenterPostRateLimit(int64(chunkSize), int64(chunkSize)); err != nil { 688 t.Fatal(err) 689 } 690 691 // Call fail on the dependency every 100 ms. 692 cancel := make(chan struct{}) 693 wg := new(sync.WaitGroup) 694 wg.Add(1) 695 go func() { 696 for { 697 // Cause the next download to fail. 698 deps.Fail() 699 select { 700 case <-cancel: 701 wg.Done() 702 return 703 case <-time.After(10 * time.Millisecond): 704 } 705 } 706 }() 707 // Try downloading the file 5 times. 708 for i := 0; i < 5; i++ { 709 if _, err := renter.DownloadByStream(remoteFile); err == nil { 710 t.Fatal("Download shouldn't succeed since it was interrupted") 711 } 712 } 713 // Stop calling fail on the dependency. 714 close(cancel) 715 wg.Wait() 716 deps.Disable() 717 // Download the file once more successfully 718 if _, err := renter.DownloadByStream(remoteFile); err != nil { 719 t.Fatal("Failed to download the file", err) 720 } 721 } 722 723 // testUploadInterrupted let's the upload fail using the provided dependencies 724 // and makes sure that this doesn't corrupt the contract. 725 func testUploadInterrupted(t *testing.T, tg *siatest.TestGroup, deps *siatest.DependencyInterruptOnceOnKeyword) { 726 // Add Renter 727 testDir, err := siatest.TestDir(t.Name()) 728 if err != nil { 729 t.Fatal(err) 730 } 731 renterTemplate := node.Renter(testDir + "/renter") 732 renterTemplate.ContractSetDeps = deps 733 nodes, err := tg.AddNodes(renterTemplate) 734 if err != nil { 735 t.Fatal(err) 736 } 737 738 // Set the bandwidth limit to 1 chunk per second. 739 renter := nodes[0] 740 dataPieces := uint64(len(tg.Hosts())) - 1 741 parityPieces := uint64(1) 742 chunkSize := siatest.ChunkSize(uint64(dataPieces)) 743 if err := renter.RenterPostRateLimit(int64(chunkSize), int64(chunkSize)); err != nil { 744 t.Fatal(err) 745 } 746 747 // Call fail on the dependency every two seconds to allow some uploads to 748 // finish. 749 cancel := make(chan struct{}) 750 done := make(chan struct{}) 751 wg := new(sync.WaitGroup) 752 wg.Add(1) 753 go func() { 754 defer close(done) 755 // Loop until cancel was closed or we reach 5 iterations. Otherwise we 756 // might end up blocking the upload for too long. 757 for i := 0; i < 10; i++ { 758 // Cause the next upload to fail. 759 deps.Fail() 760 select { 761 case <-cancel: 762 wg.Done() 763 return 764 case <-time.After(100 * time.Millisecond): 765 } 766 } 767 wg.Done() 768 }() 769 770 // Upload a file that's 1 chunk large. 771 _, remoteFile, err := renter.UploadNewFileBlocking(int(chunkSize), dataPieces, parityPieces) 772 if err != nil { 773 t.Fatal(err) 774 } 775 // Make sure that the upload does not finish before the interrupting go 776 // routine is finished 777 select { 778 case <-done: 779 default: 780 t.Fatal("Upload finished before interrupt signal is done") 781 } 782 // Stop calling fail on the dependency. 783 close(cancel) 784 wg.Wait() 785 deps.Disable() 786 // Download the file. 787 if _, err := renter.DownloadByStream(remoteFile); err != nil { 788 t.Fatal("Failed to download the file", err) 789 } 790 } 791 792 // The following are tests that need to use their own test groups due to 793 // specific requirements of the tests 794 795 // TestRedundancyReporting verifies that redundancy reporting is accurate if 796 // contracts become offline. 797 func TestRedundancyReporting(t *testing.T) { 798 if testing.Short() { 799 t.SkipNow() 800 } 801 t.Parallel() 802 803 // Create a group for testing. 804 groupParams := siatest.GroupParams{ 805 Hosts: 2, 806 Renters: 1, 807 Miners: 1, 808 } 809 tg, err := siatest.NewGroupFromTemplate(groupParams) 810 if err != nil { 811 t.Fatal("Failed to create group: ", err) 812 } 813 defer func() { 814 if err := tg.Close(); err != nil { 815 t.Fatal(err) 816 } 817 }() 818 819 // Upload a file. 820 dataPieces := uint64(1) 821 parityPieces := uint64(len(tg.Hosts()) - 1) 822 823 renter := tg.Renters()[0] 824 _, rf, err := renter.UploadNewFileBlocking(100, dataPieces, parityPieces) 825 if err != nil { 826 t.Fatal(err) 827 } 828 829 // Stop a host. 830 host := tg.Hosts()[0] 831 if err := tg.StopNode(host); err != nil { 832 t.Fatal(err) 833 } 834 835 // Mine a block to trigger contract maintenance. 836 miner := tg.Miners()[0] 837 if err := miner.MineBlock(); err != nil { 838 t.Fatal(err) 839 } 840 841 // Redundancy should decrease. 842 expectedRedundancy := float64(dataPieces+parityPieces-1) / float64(dataPieces) 843 if err := renter.WaitForDecreasingRedundancy(rf, expectedRedundancy); err != nil { 844 t.Fatal("Redundancy isn't decreasing", err) 845 } 846 847 // Restart the host. 848 if err := tg.StartNode(host); err != nil { 849 t.Fatal(err) 850 } 851 852 // Wait until the host shows up as active again. 853 pk, err := host.HostPublicKey() 854 if err != nil { 855 t.Fatal(err) 856 } 857 err = build.Retry(60, time.Second, func() error { 858 hdag, err := renter.HostDbActiveGet() 859 if err != nil { 860 return err 861 } 862 for _, h := range hdag.Hosts { 863 if reflect.DeepEqual(h.PublicKey, pk) { 864 return nil 865 } 866 } 867 // If host is not active, announce it again and mine a block. 868 if err := host.HostAnnouncePost(); err != nil { 869 return (err) 870 } 871 miner := tg.Miners()[0] 872 if err := miner.MineBlock(); err != nil { 873 return (err) 874 } 875 if err := tg.Sync(); err != nil { 876 return (err) 877 } 878 hg, err := host.HostGet() 879 if err != nil { 880 return err 881 } 882 return fmt.Errorf("host with address %v not active", hg.InternalSettings.NetAddress) 883 }) 884 if err != nil { 885 t.Fatal(err) 886 } 887 888 if err := miner.MineBlock(); err != nil { 889 t.Fatal(err) 890 } 891 892 // Redundancy should go back to normal. 893 expectedRedundancy = float64(dataPieces+parityPieces) / float64(dataPieces) 894 if err := renter.WaitForUploadRedundancy(rf, expectedRedundancy); err != nil { 895 t.Fatal("Redundancy is not increasing") 896 } 897 } 898 899 // TestRenewFailing checks if a contract gets marked as !goodForRenew after 900 // failing multiple times in a row. 901 func TestRenewFailing(t *testing.T) { 902 if testing.Short() { 903 t.SkipNow() 904 } 905 t.Parallel() 906 renterDir, err := siatest.TestDir(filepath.Join(t.Name(), "renter")) 907 if err != nil { 908 t.Fatal(err) 909 } 910 911 // Create a group for the subtests 912 groupParams := siatest.GroupParams{ 913 Hosts: 3, 914 Miners: 1, 915 } 916 tg, err := siatest.NewGroupFromTemplate(groupParams) 917 if err != nil { 918 t.Fatal("Failed to create group: ", err) 919 } 920 defer func() { 921 if err := tg.Close(); err != nil { 922 t.Fatal(err) 923 } 924 }() 925 926 // Add a renter with a custom allowance to give it plenty of time to renew 927 // the contract later. 928 renterParams := node.Renter(renterDir) 929 renterParams.Allowance = siatest.DefaultAllowance 930 renterParams.Allowance.Hosts = uint64(len(tg.Hosts()) - 1) 931 renterParams.Allowance.Period = 100 932 renterParams.Allowance.RenewWindow = 50 933 nodes, err := tg.AddNodes(renterParams) 934 if err != nil { 935 t.Fatal(err) 936 } 937 renter := nodes[0] 938 939 // All the contracts of the renter should be goodForRenew. So there should 940 // be no inactive contracts, only active contracts 941 rcg, err := renter.RenterInactiveContractsGet() 942 if err != nil { 943 t.Fatal(err) 944 } 945 if uint64(len(rcg.ActiveContracts)) != renterParams.Allowance.Hosts { 946 for i, c := range rcg.ActiveContracts { 947 fmt.Println(i, c.HostPublicKey) 948 } 949 t.Fatalf("renter had %v contracts but should have %v", 950 len(rcg.ActiveContracts), renterParams.Allowance.Hosts) 951 } 952 if len(rcg.InactiveContracts) != 0 { 953 t.Fatal("Renter should have 0 inactive contracts but has", len(rcg.InactiveContracts)) 954 } 955 956 // Create a map of the hosts in the group. 957 hostMap := make(map[string]*siatest.TestNode) 958 for _, host := range tg.Hosts() { 959 pk, err := host.HostPublicKey() 960 if err != nil { 961 t.Fatal(err) 962 } 963 hostMap[pk.String()] = host 964 } 965 // Lock the wallet of one of the used hosts to make the renew fail. 966 for _, c := range rcg.ActiveContracts { 967 if host, used := hostMap[c.HostPublicKey.String()]; used { 968 if err := host.WalletLockPost(); err != nil { 969 t.Fatal(err) 970 } 971 break 972 } 973 } 974 // Wait until the contract is supposed to be renewed. 975 cg, err := renter.ConsensusGet() 976 if err != nil { 977 t.Fatal(err) 978 } 979 rg, err := renter.RenterGet() 980 if err != nil { 981 t.Fatal(err) 982 } 983 miner := tg.Miners()[0] 984 blockHeight := cg.Height 985 for blockHeight+rg.Settings.Allowance.RenewWindow < rcg.ActiveContracts[0].EndHeight { 986 if err := miner.MineBlock(); err != nil { 987 t.Fatal(err) 988 } 989 blockHeight++ 990 } 991 992 // there should be no inactive contracts, only active contracts. 993 rcg, err = renter.RenterInactiveContractsGet() 994 if err != nil { 995 t.Fatal(err) 996 } 997 if uint64(len(rcg.ActiveContracts)) != renterParams.Allowance.Hosts { 998 for i, c := range rcg.ActiveContracts { 999 fmt.Println(i, c.HostPublicKey) 1000 } 1001 t.Fatalf("renter had %v contracts but should have %v", 1002 len(rcg.ActiveContracts), renterParams.Allowance.Hosts) 1003 } 1004 if len(rcg.InactiveContracts) != 0 { 1005 t.Fatal("Renter should have 0 inactive contracts but has", len(rcg.InactiveContracts)) 1006 } 1007 1008 // mine enough blocks to reach the second half of the renew window. 1009 for ; blockHeight+rg.Settings.Allowance.RenewWindow/2 < rcg.ActiveContracts[0].EndHeight; blockHeight++ { 1010 if err := miner.MineBlock(); err != nil { 1011 t.Fatal(err) 1012 } 1013 } 1014 1015 // We should be within the second half of the renew window now. We keep 1016 // mining blocks until the host with the locked wallet has been replaced. 1017 // This should happen before we reach the endHeight of the contracts. This 1018 // means we should have 2 active contracts and 2 inactive contracts. One of 1019 // the inactive contracts will be !goodForRenew due to the host 1020 replaced := false 1021 err = build.Retry(int(rcg.ActiveContracts[0].EndHeight-blockHeight), 5*time.Second, func() error { 1022 // contract should be !goodForRenew now. 1023 rc, err := renter.RenterInactiveContractsGet() 1024 if err != nil { 1025 return err 1026 } 1027 if len(rc.ActiveContracts) != 2 { 1028 return fmt.Errorf("Expected 2 active contracts, got %v", len(rc.ActiveContracts)) 1029 } 1030 if len(rc.InactiveContracts) != 2 { 1031 return fmt.Errorf("Expected 2 inactive contracts, got %v", len(rc.InactiveContracts)) 1032 } 1033 1034 notGoodForRenew := 0 1035 goodForRenew := 0 1036 for _, c := range rc.InactiveContracts { 1037 if !c.GoodForRenew { 1038 notGoodForRenew++ 1039 } else { 1040 goodForRenew++ 1041 } 1042 } 1043 if err := miner.MineBlock(); err != nil { 1044 return err 1045 } 1046 if !replaced && notGoodForRenew != 1 && goodForRenew != 1 { 1047 return fmt.Errorf("there should be exactly 1 inactive contract that is !goodForRenew but was %v", 1048 notGoodForRenew) 1049 } 1050 replaced = true 1051 if replaced && notGoodForRenew != 1 && goodForRenew != 2 { 1052 return fmt.Errorf("contract was set to !goodForRenew but hasn't been replaced yet") 1053 } 1054 return nil 1055 }) 1056 if err != nil { 1057 t.Fatal(err) 1058 } 1059 } 1060 1061 // TestRenterCancelAllowance tests that setting an empty allowance causes 1062 // uploads, downloads, and renewals to cease. 1063 func TestRenterCancelAllowance(t *testing.T) { 1064 if testing.Short() { 1065 t.SkipNow() 1066 } 1067 t.Parallel() 1068 1069 // Create a group for testing. 1070 groupParams := siatest.GroupParams{ 1071 Hosts: 2, 1072 Renters: 1, 1073 Miners: 1, 1074 } 1075 tg, err := siatest.NewGroupFromTemplate(groupParams) 1076 if err != nil { 1077 t.Fatal("Failed to create group: ", err) 1078 } 1079 defer func() { 1080 if err := tg.Close(); err != nil { 1081 t.Fatal(err) 1082 } 1083 }() 1084 1085 // Upload a file. 1086 dataPieces := uint64(1) 1087 parityPieces := uint64(len(tg.Hosts()) - 1) 1088 1089 renter := tg.Renters()[0] 1090 _, rf, err := renter.UploadNewFileBlocking(100, dataPieces, parityPieces) 1091 if err != nil { 1092 t.Fatal(err) 1093 } 1094 1095 // Cancel the allowance 1096 if err := renter.RenterCancelAllowance(); err != nil { 1097 t.Fatal(err) 1098 } 1099 1100 // Give it some time to mark the contracts as !goodForUpload and 1101 // !goodForRenew. 1102 err = build.Retry(200, 100*time.Millisecond, func() error { 1103 rc, err := renter.RenterInactiveContractsGet() 1104 if err != nil { 1105 return err 1106 } 1107 // Should now have 2 inactive contracts. 1108 if len(rc.ActiveContracts) != 0 { 1109 return fmt.Errorf("expected 0 active contracts, got %v", len(rc.ActiveContracts)) 1110 } 1111 if len(rc.InactiveContracts) != groupParams.Hosts { 1112 return fmt.Errorf("expected %v inactive contracts, got %v", groupParams.Hosts, len(rc.InactiveContracts)) 1113 } 1114 for _, c := range rc.InactiveContracts { 1115 if c.GoodForUpload { 1116 return errors.New("contract shouldn't be goodForUpload") 1117 } 1118 if c.GoodForRenew { 1119 return errors.New("contract shouldn't be goodForRenew") 1120 } 1121 } 1122 return nil 1123 }) 1124 if err != nil { 1125 t.Fatal(err) 1126 } 1127 1128 // Try downloading the file; should succeed. 1129 if _, err := renter.DownloadByStream(rf); err != nil { 1130 t.Fatal("downloading file failed", err) 1131 } 1132 1133 // Wait for a few seconds to make sure that the upload heap is rebuilt. 1134 // The rebuilt interval is 3 seconds. Sleep for 5 to be safe. 1135 time.Sleep(5 * time.Second) 1136 1137 // Try to upload a file after the allowance was cancelled. Should fail. 1138 _, rf2, err := renter.UploadNewFile(100, dataPieces, parityPieces) 1139 if err != nil { 1140 t.Fatal(err) 1141 } 1142 1143 // Give it some time to upload. 1144 time.Sleep(time.Second) 1145 1146 // Redundancy should still be 0. 1147 renterFiles, err := renter.RenterFilesGet() 1148 if err != nil { 1149 t.Fatal("Failed to get files") 1150 } 1151 if len(renterFiles.Files) != 2 { 1152 t.Fatal("There should be exactly 2 tracked files") 1153 } 1154 fileInfo, err := renter.File(rf2.SiaPath()) 1155 if err != nil { 1156 t.Fatal(err) 1157 } 1158 if fileInfo.UploadProgress > 0 || fileInfo.UploadedBytes > 0 || fileInfo.Redundancy > 0 { 1159 t.Fatal("Uploading a file after canceling the allowance should fail") 1160 } 1161 1162 // Mine enough blocks for the period to pass and the contracts to expire. 1163 miner := tg.Miners()[0] 1164 for i := types.BlockHeight(0); i < siatest.DefaultAllowance.Period; i++ { 1165 if err := miner.MineBlock(); err != nil { 1166 t.Fatal(err) 1167 } 1168 } 1169 1170 // All contracts should be archived. 1171 err = build.Retry(200, 100*time.Millisecond, func() error { 1172 rc, err := renter.RenterInactiveContractsGet() 1173 if err != nil { 1174 return err 1175 } 1176 rcExpired, err := renter.RenterExpiredContractsGet() 1177 if err != nil { 1178 return err 1179 } 1180 // Should now have 2 expired contracts. 1181 if len(rc.ActiveContracts) != 0 { 1182 return fmt.Errorf("expected 0 active contracts, got %v", len(rc.ActiveContracts)) 1183 } 1184 if len(rc.InactiveContracts) != 0 { 1185 return fmt.Errorf("expected 0 inactive contracts, got %v", len(rc.InactiveContracts)) 1186 } 1187 if len(rcExpired.ExpiredContracts) != groupParams.Hosts { 1188 return fmt.Errorf("expected %v expired contracts, got %v", groupParams.Hosts, len(rc.InactiveContracts)) 1189 } 1190 return nil 1191 }) 1192 if err != nil { 1193 t.Error(err) 1194 } 1195 1196 // Try downloading the file; should fail. 1197 if _, err := renter.DownloadByStream(rf2); err == nil { 1198 t.Error("downloading file succeeded even though it shouldnt", err) 1199 } 1200 1201 // The uploaded files should have 0x redundancy now. 1202 err = build.Retry(200, 100*time.Millisecond, func() error { 1203 rf, err := renter.RenterFilesGet() 1204 if err != nil { 1205 return errors.New("Failed to get files") 1206 } 1207 if len(rf.Files) != 2 || rf.Files[0].Redundancy != 0 || rf.Files[1].Redundancy != 0 { 1208 return errors.New("file redundancy should be 0 now") 1209 } 1210 return nil 1211 }) 1212 if err != nil { 1213 t.Error(err) 1214 } 1215 } 1216 1217 // TestRenterContractEndHeight makes sure that the endheight of renewed 1218 // contracts is set properly 1219 func TestRenterContractEndHeight(t *testing.T) { 1220 if testing.Short() { 1221 t.SkipNow() 1222 } 1223 t.Parallel() 1224 1225 // Create a group for the subtests 1226 groupParams := siatest.GroupParams{ 1227 Hosts: 2, 1228 Renters: 1, 1229 Miners: 1, 1230 } 1231 tg, err := siatest.NewGroupFromTemplate(groupParams) 1232 if err != nil { 1233 t.Fatal("Failed to create group: ", err) 1234 } 1235 defer func() { 1236 if err := tg.Close(); err != nil { 1237 t.Fatal(err) 1238 } 1239 }() 1240 1241 // Get Renter 1242 r := tg.Renters()[0] 1243 rg, err := r.RenterGet() 1244 if err != nil { 1245 t.Fatal(err) 1246 } 1247 1248 // Record the start period at the beginning of test 1249 currentPeriodStart := rg.CurrentPeriod 1250 period := rg.Settings.Allowance.Period 1251 renewWindow := rg.Settings.Allowance.RenewWindow 1252 numRenewals := 0 1253 1254 // Confirm Contracts were created as expected. There should be 2 active 1255 // contracts and no inactive or expired contracts 1256 err = build.Retry(200, 100*time.Millisecond, func() error { 1257 rc, err := r.RenterInactiveContractsGet() 1258 if err != nil { 1259 return err 1260 } 1261 if len(rc.ActiveContracts) != len(tg.Hosts()) { 1262 return fmt.Errorf("Expected %v active contracts, got %v", len(tg.Hosts()), len(rc.ActiveContracts)) 1263 } 1264 if len(rc.InactiveContracts) != 0 { 1265 return fmt.Errorf("Expected 0 inactive contracts, got %v", len(rc.InactiveContracts)) 1266 } 1267 rcExpired, err := r.RenterExpiredContractsGet() 1268 if err != nil { 1269 return err 1270 } 1271 if len(rcExpired.ExpiredContracts) != 0 { 1272 return fmt.Errorf("Expected 0 expired contracts, got %v", len(rcExpired.ExpiredContracts)) 1273 } 1274 return nil 1275 }) 1276 if err != nil { 1277 t.Fatal(err) 1278 } 1279 1280 rc, err := r.RenterContractsGet() 1281 if err != nil { 1282 t.Fatal(err) 1283 } 1284 1285 // Confirm contract end heights were set properly 1286 for _, c := range rc.ActiveContracts { 1287 if c.EndHeight != currentPeriodStart+period { 1288 t.Log("Endheight:", c.EndHeight) 1289 t.Log("Allowance Period:", period) 1290 t.Log("Current Period:", currentPeriodStart) 1291 t.Fatal("Contract endheight not set to Current period + Allowance Period") 1292 } 1293 } 1294 1295 // Mine blocks to force contract renewal 1296 if err = renewContractsByRenewWindow(r, tg); err != nil { 1297 t.Fatal(err) 1298 } 1299 numRenewals++ 1300 1301 // Confirm Contracts were renewed as expected, all original contracts should 1302 // have been renewed if GoodForRenew = true. There should be 2 active and 1303 // inactive contracts, and 0 expired contracts 1304 err = build.Retry(200, 100*time.Millisecond, func() error { 1305 rc, err := r.RenterInactiveContractsGet() 1306 if err != nil { 1307 return err 1308 } 1309 rcExpired, err := r.RenterExpiredContractsGet() 1310 if err != nil { 1311 return err 1312 } 1313 if len(rcExpired.ExpiredContracts) != 0 { 1314 return fmt.Errorf("Expected 0 expired contracts, got %v", len(rcExpired.ExpiredContracts)) 1315 } 1316 // checkContracts will confirm correct number of inactive and active contracts 1317 if err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil { 1318 return err 1319 } 1320 if err = checkRenewedContracts(rc.ActiveContracts); err != nil { 1321 return err 1322 } 1323 return nil 1324 }) 1325 if err != nil { 1326 t.Fatal(err) 1327 } 1328 1329 // Confirm contract end heights were set properly End height should be the 1330 // end of the next period as the contracts are renewed due to reaching the 1331 // renew window 1332 rc, err = r.RenterContractsGet() 1333 if err != nil { 1334 t.Fatal(err) 1335 } 1336 for _, c := range rc.ActiveContracts { 1337 if c.EndHeight != currentPeriodStart+(2*period)-renewWindow && c.GoodForRenew { 1338 t.Log("Endheight:", c.EndHeight) 1339 t.Log("Allowance Period:", period) 1340 t.Log("Renew Window:", renewWindow) 1341 t.Log("Current Period:", currentPeriodStart) 1342 t.Fatal("Contract endheight not set to Current period + 2 * Allowance Period - Renew Window") 1343 } 1344 } 1345 1346 // Capturing end height to compare against renewed contracts 1347 endHeight := rc.ActiveContracts[0].EndHeight 1348 1349 // Renew contracts by running out of funds 1350 startingUploadSpend, err := renewContractsBySpending(r, tg) 1351 if err != nil { 1352 t.Fatal(err) 1353 } 1354 1355 // Confirm contract end heights were set properly 1356 // End height should not have changed since the renewal 1357 // was due to running out of funds 1358 rc, err = r.RenterContractsGet() 1359 if err != nil { 1360 t.Fatal(err) 1361 } 1362 for _, c := range rc.ActiveContracts { 1363 if c.EndHeight != endHeight && c.GoodForRenew && c.UploadSpending.Cmp(startingUploadSpend) <= 0 { 1364 t.Log("Allowance Period:", period) 1365 t.Log("Current Period:", currentPeriodStart) 1366 t.Fatalf("Contract endheight Changed, EH was %v, expected %v\n", c.EndHeight, endHeight) 1367 } 1368 } 1369 } 1370 1371 // TestRenterContractsEndpoint tests the API endpoint for old contracts 1372 func TestRenterContractsEndpoint(t *testing.T) { 1373 if testing.Short() { 1374 t.SkipNow() 1375 } 1376 t.Parallel() 1377 1378 // Create a group for testing. 1379 groupParams := siatest.GroupParams{ 1380 Hosts: 2, 1381 Renters: 1, 1382 Miners: 1, 1383 } 1384 tg, err := siatest.NewGroupFromTemplate(groupParams) 1385 if err != nil { 1386 t.Fatal("Failed to create group: ", err) 1387 } 1388 defer func() { 1389 if err := tg.Close(); err != nil { 1390 t.Fatal(err) 1391 } 1392 }() 1393 1394 // Get Renter 1395 r := tg.Renters()[0] 1396 1397 // Renter should only have active contracts 1398 rc, err := r.RenterInactiveContractsGet() 1399 if err != nil { 1400 t.Fatal(err) 1401 } 1402 if len(tg.Hosts()) != len(rc.ActiveContracts) { 1403 t.Fatalf("Expected the same number for active contracts as hosts: %v active and %v expected", len(rc.ActiveContracts), len(tg.Hosts())) 1404 } 1405 if len(rc.InactiveContracts) != 0 { 1406 t.Fatal("Expected zero inactive contracts, got", len(rc.InactiveContracts)) 1407 } 1408 rcExpired, err := r.RenterExpiredContractsGet() 1409 if err != nil { 1410 t.Fatal(err) 1411 } 1412 if len(rcExpired.ExpiredContracts) != 0 { 1413 t.Fatal("Expected zero expired contracts, got", len(rcExpired.ExpiredContracts)) 1414 } 1415 1416 // Record original Contracts and create Maps for comparison 1417 originalContracts := rc.ActiveContracts 1418 originalContractIDMap := make(map[types.FileContractID]struct{}) 1419 for _, c := range originalContracts { 1420 originalContractIDMap[c.ID] = struct{}{} 1421 } 1422 1423 // Renew contracts 1424 // Mine blocks to force contract renewal 1425 if err = renewContractsByRenewWindow(r, tg); err != nil { 1426 t.Fatal(err) 1427 } 1428 numRenewals := 1 1429 // Waiting for nodes to sync 1430 if err = tg.Sync(); err != nil { 1431 t.Fatal(err) 1432 } 1433 1434 // Confirm contracts were renewed as expected, there should be no expired 1435 // contracts since we are still within the endheight of the original 1436 // contracts, there should be the same number of active and inactive 1437 // contracts, and the inactive contracts should be the same contracts as the 1438 // original active contracts. 1439 err = build.Retry(200, 100*time.Millisecond, func() error { 1440 // Check active and expired contracts 1441 rc, err = r.RenterInactiveContractsGet() 1442 if err != nil { 1443 return err 1444 } 1445 if len(rc.ActiveContracts) != len(rc.InactiveContracts) { 1446 return fmt.Errorf("Expected the same number of active and inactive contracts; got %v active and %v inactive", len(rc.ActiveContracts), len(rc.InactiveContracts)) 1447 } 1448 if len(originalContracts) != len(rc.InactiveContracts) { 1449 return fmt.Errorf("Didn't get expected number of inactive contracts, expected %v got %v", len(originalContracts), len(rc.InactiveContracts)) 1450 } 1451 for _, c := range rc.InactiveContracts { 1452 if _, ok := originalContractIDMap[c.ID]; !ok { 1453 return errors.New("ID from rc not found in originalContracts") 1454 } 1455 } 1456 1457 // Check expired contracts 1458 rcExpired, err = r.RenterExpiredContractsGet() 1459 if err != nil { 1460 return err 1461 } 1462 if len(rcExpired.ExpiredContracts) != 0 { 1463 return fmt.Errorf("Expected zero expired contracts, got %v", len(rcExpired.ExpiredContracts)) 1464 } 1465 1466 return nil 1467 }) 1468 if err != nil { 1469 t.Error(err) 1470 } 1471 1472 // Record inactive contracts 1473 rc, err = r.RenterInactiveContractsGet() 1474 inactiveContracts := rc.InactiveContracts 1475 if err != nil { 1476 t.Fatal(err) 1477 } 1478 inactiveContractIDMap := make(map[types.FileContractID]struct{}) 1479 for _, c := range inactiveContracts { 1480 inactiveContractIDMap[c.ID] = struct{}{} 1481 } 1482 1483 // Mine to force inactive contracts to be expired contracts 1484 m := tg.Miners()[0] 1485 cg, err := r.ConsensusGet() 1486 if err != nil { 1487 t.Fatal(err) 1488 } 1489 for i := 0; i < int(inactiveContracts[0].EndHeight-cg.Height+types.MaturityDelay); i++ { 1490 if err = m.MineBlock(); err != nil { 1491 t.Fatal(err) 1492 } 1493 } 1494 1495 // Waiting for nodes to sync 1496 if err = tg.Sync(); err != nil { 1497 t.Fatal(err) 1498 } 1499 1500 // Confirm contracts, the expired contracts should now be the same contracts 1501 // as the previous inactive contracts. 1502 err = build.Retry(200, 100*time.Millisecond, func() error { 1503 rc, err = r.RenterExpiredContractsGet() 1504 if err != nil { 1505 return err 1506 } 1507 if len(rc.ActiveContracts) != len(tg.Hosts()) { 1508 return errors.New("Waiting for active contracts to form") 1509 } 1510 if len(rc.ExpiredContracts) != len(inactiveContracts) { 1511 return fmt.Errorf("Expected the same number of expired and inactive contracts; got %v expired and %v inactive", len(rc.ExpiredContracts), len(inactiveContracts)) 1512 } 1513 for _, c := range inactiveContracts { 1514 if _, ok := inactiveContractIDMap[c.ID]; !ok { 1515 return errors.New("ID from rc not found in inactiveContracts") 1516 } 1517 } 1518 return nil 1519 }) 1520 if err != nil { 1521 t.Fatal(err) 1522 } 1523 1524 // Record current active and expired contracts 1525 err = build.Retry(200, 100*time.Millisecond, func() error { 1526 rc, err = r.RenterContractsGet() 1527 if err != nil { 1528 return err 1529 } 1530 if len(rc.ActiveContracts) != len(tg.Hosts()) { 1531 return errors.New("Waiting for active contracts to form") 1532 } 1533 return nil 1534 }) 1535 if err != nil { 1536 t.Fatal(err) 1537 } 1538 rc, err = r.RenterExpiredContractsGet() 1539 if err != nil { 1540 t.Fatal(err) 1541 } 1542 activeContracts := rc.ActiveContracts 1543 expiredContracts := rc.ExpiredContracts 1544 if err != nil { 1545 t.Fatal(err) 1546 } 1547 expiredContractIDMap := make(map[types.FileContractID]struct{}) 1548 for _, c := range expiredContracts { 1549 expiredContractIDMap[c.ID] = struct{}{} 1550 } 1551 1552 // Renew contracts by spending 1553 startingUploadSpend, err := renewContractsBySpending(r, tg) 1554 if err != nil { 1555 t.Fatal(err) 1556 } 1557 numRenewals++ 1558 // Waiting for nodes to sync 1559 if err = tg.Sync(); err != nil { 1560 t.Fatal(err) 1561 } 1562 1563 // Confirm contracts were renewed as expected. Active contracts prior to 1564 // renewal should now be in the inactive contracts 1565 err = build.Retry(200, 100*time.Millisecond, func() error { 1566 rc, err = r.RenterInactiveContractsGet() 1567 if err != nil { 1568 return err 1569 } 1570 if len(rc.ActiveContracts) != len(tg.Hosts()) { 1571 return errors.New("Waiting for active contracts to form") 1572 } 1573 rcExpired, err = r.RenterExpiredContractsGet() 1574 if err != nil { 1575 return err 1576 } 1577 1578 // Confirm active and inactive contracts 1579 inactiveContractIDMap := make(map[types.FileContractID]struct{}) 1580 for _, c := range rc.InactiveContracts { 1581 inactiveContractIDMap[c.ID] = struct{}{} 1582 } 1583 for _, c := range activeContracts { 1584 if _, ok := inactiveContractIDMap[c.ID]; !ok && c.UploadSpending.Cmp(startingUploadSpend) <= 0 { 1585 return errors.New("ID from activeContacts not found in rc") 1586 } 1587 } 1588 1589 // Confirm expired contracts 1590 if len(expiredContracts) != len(rcExpired.ExpiredContracts) { 1591 return fmt.Errorf("Didn't get expected number of expired contracts, expected %v got %v", len(expiredContracts), len(rcExpired.ExpiredContracts)) 1592 } 1593 for _, c := range rcExpired.ExpiredContracts { 1594 if _, ok := expiredContractIDMap[c.ID]; !ok { 1595 return errors.New("ID from rcExpired not found in expiredContracts") 1596 } 1597 } 1598 1599 return nil 1600 }) 1601 if err != nil { 1602 t.Fatal(err) 1603 } 1604 } 1605 1606 // TestRenterPersistData checks if the RenterSettings are persisted 1607 func TestRenterPersistData(t *testing.T) { 1608 if testing.Short() { 1609 t.SkipNow() 1610 } 1611 t.Parallel() 1612 1613 // Get test directory 1614 testdir, err := siatest.TestDir(t.Name()) 1615 if err != nil { 1616 t.Fatal(err) 1617 } 1618 1619 // Copying legacy file to test directory 1620 renterDir := filepath.Join(testdir, "renter") 1621 destination := filepath.Join(renterDir, "renter.json") 1622 err = os.MkdirAll(renterDir, 0700) 1623 if err != nil { 1624 t.Fatal(err) 1625 } 1626 from, err := os.Open("../../compatibility/renter_v04.json") 1627 if err != nil { 1628 t.Fatal(err) 1629 } 1630 to, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE, 0700) 1631 if err != nil { 1632 t.Fatal(err) 1633 } 1634 _, err = io.Copy(to, from) 1635 if err != nil { 1636 t.Fatal(err) 1637 } 1638 if err = from.Close(); err != nil { 1639 t.Fatal(err) 1640 } 1641 if err = to.Close(); err != nil { 1642 t.Fatal(err) 1643 } 1644 1645 // Create new node from legacy renter.json persistence file 1646 r, err := siatest.NewNode(node.AllModules(testdir)) 1647 if err != nil { 1648 t.Fatal(err) 1649 } 1650 defer func() { 1651 if err = r.Close(); err != nil { 1652 t.Fatal(err) 1653 } 1654 }() 1655 1656 // Set renter allowance to finish renter set up 1657 // Currently /renter POST endpoint errors if the allowance 1658 // is not previously set or passed in as an argument 1659 err = r.RenterPostAllowance(siatest.DefaultAllowance) 1660 if err != nil { 1661 t.Fatal(err) 1662 } 1663 1664 // Check Settings, should be defaults 1665 rg, err := r.RenterGet() 1666 if err != nil { 1667 t.Fatal(err) 1668 } 1669 if rg.Settings.StreamCacheSize != renter.DefaultStreamCacheSize { 1670 t.Fatalf("StreamCacheSize not set to default of %v, set to %v", 1671 renter.DefaultStreamCacheSize, rg.Settings.StreamCacheSize) 1672 } 1673 if rg.Settings.MaxDownloadSpeed != renter.DefaultMaxDownloadSpeed { 1674 t.Fatalf("MaxDownloadSpeed not set to default of %v, set to %v", 1675 renter.DefaultMaxDownloadSpeed, rg.Settings.MaxDownloadSpeed) 1676 } 1677 if rg.Settings.MaxUploadSpeed != renter.DefaultMaxUploadSpeed { 1678 t.Fatalf("MaxUploadSpeed not set to default of %v, set to %v", 1679 renter.DefaultMaxUploadSpeed, rg.Settings.MaxUploadSpeed) 1680 } 1681 1682 // Set StreamCacheSize, MaxDownloadSpeed, and MaxUploadSpeed to new values 1683 cacheSize := uint64(4) 1684 ds := int64(20) 1685 us := int64(10) 1686 if err := r.RenterSetStreamCacheSizePost(cacheSize); err != nil { 1687 t.Fatalf("%v: Could not set StreamCacheSize to %v", err, cacheSize) 1688 } 1689 if err := r.RenterPostRateLimit(ds, us); err != nil { 1690 t.Fatalf("%v: Could not set RateLimits to %v and %v", err, ds, us) 1691 } 1692 1693 // Confirm Settings were updated 1694 rg, err = r.RenterGet() 1695 if err != nil { 1696 t.Fatal(err) 1697 } 1698 if rg.Settings.StreamCacheSize != cacheSize { 1699 t.Fatalf("StreamCacheSize not set to %v, set to %v", cacheSize, rg.Settings.StreamCacheSize) 1700 } 1701 if rg.Settings.MaxDownloadSpeed != ds { 1702 t.Fatalf("MaxDownloadSpeed not set to %v, set to %v", ds, rg.Settings.MaxDownloadSpeed) 1703 } 1704 if rg.Settings.MaxUploadSpeed != us { 1705 t.Fatalf("MaxUploadSpeed not set to %v, set to %v", us, rg.Settings.MaxUploadSpeed) 1706 } 1707 1708 // Restart node 1709 err = r.RestartNode() 1710 if err != nil { 1711 t.Fatal("Failed to restart node:", err) 1712 } 1713 1714 // check Settings, settings should be values set through API endpoints 1715 rg, err = r.RenterGet() 1716 if err != nil { 1717 t.Fatal(err) 1718 } 1719 if rg.Settings.StreamCacheSize != cacheSize { 1720 t.Fatalf("StreamCacheSize not persisted as %v, set to %v", cacheSize, rg.Settings.StreamCacheSize) 1721 } 1722 if rg.Settings.MaxDownloadSpeed != ds { 1723 t.Fatalf("MaxDownloadSpeed not persisted as %v, set to %v", ds, rg.Settings.MaxDownloadSpeed) 1724 } 1725 if rg.Settings.MaxUploadSpeed != us { 1726 t.Fatalf("MaxUploadSpeed not persisted as %v, set to %v", us, rg.Settings.MaxUploadSpeed) 1727 } 1728 } 1729 1730 // TestRenterResetAllowance tests that resetting the allowance after the 1731 // allowance was cancelled will trigger the correct contract formation. 1732 func TestRenterResetAllowance(t *testing.T) { 1733 if testing.Short() { 1734 t.SkipNow() 1735 } 1736 t.Parallel() 1737 1738 // Create a group for testing. 1739 groupParams := siatest.GroupParams{ 1740 Hosts: 2, 1741 Renters: 1, 1742 Miners: 1, 1743 } 1744 tg, err := siatest.NewGroupFromTemplate(groupParams) 1745 if err != nil { 1746 t.Fatal("Failed to create group: ", err) 1747 } 1748 defer func() { 1749 if err := tg.Close(); err != nil { 1750 t.Fatal(err) 1751 } 1752 }() 1753 renter := tg.Renters()[0] 1754 1755 // Cancel the allowance 1756 if err := renter.RenterCancelAllowance(); err != nil { 1757 t.Fatal(err) 1758 } 1759 1760 // Give it some time to mark the contracts as !goodForUpload and 1761 // !goodForRenew. 1762 err = build.Retry(200, 100*time.Millisecond, func() error { 1763 rc, err := renter.RenterInactiveContractsGet() 1764 if err != nil { 1765 return err 1766 } 1767 // Should now have 2 inactive contracts. 1768 if len(rc.ActiveContracts) != 0 { 1769 return fmt.Errorf("expected 0 active contracts, got %v", len(rc.ActiveContracts)) 1770 } 1771 if len(rc.InactiveContracts) != groupParams.Hosts { 1772 return fmt.Errorf("expected %v inactive contracts, got %v", groupParams.Hosts, len(rc.InactiveContracts)) 1773 } 1774 for _, c := range rc.InactiveContracts { 1775 if c.GoodForUpload { 1776 return errors.New("contract shouldn't be goodForUpload") 1777 } 1778 if c.GoodForRenew { 1779 return errors.New("contract shouldn't be goodForRenew") 1780 } 1781 } 1782 return nil 1783 }) 1784 if err != nil { 1785 t.Fatal(err) 1786 } 1787 1788 // Set the allowance again. 1789 if err := renter.RenterPostAllowance(siatest.DefaultAllowance); err != nil { 1790 t.Fatal(err) 1791 } 1792 1793 // Mine a block to start the threadedContractMaintenance. 1794 if err := tg.Miners()[0].MineBlock(); err != nil { 1795 t.Fatal(err) 1796 } 1797 1798 // Give it some time to mark the contracts as goodForUpload and 1799 // goodForRenew again. 1800 err = build.Retry(200, 100*time.Millisecond, func() error { 1801 rc, err := renter.RenterContractsGet() 1802 if err != nil { 1803 return err 1804 } 1805 // Should now have 2 active contracts. 1806 if len(rc.ActiveContracts) != groupParams.Hosts { 1807 return fmt.Errorf("expected %v active contracts, got %v", groupParams.Hosts, len(rc.ActiveContracts)) 1808 } 1809 for _, c := range rc.ActiveContracts { 1810 if !c.GoodForUpload { 1811 return errors.New("contract should be goodForUpload") 1812 } 1813 if !c.GoodForRenew { 1814 return errors.New("contract should be goodForRenew") 1815 } 1816 } 1817 return nil 1818 }) 1819 if err != nil { 1820 t.Fatal(err) 1821 } 1822 } 1823 1824 // TestRenterSpendingReporting checks the accuracy for the reported 1825 // spending 1826 func TestRenterSpendingReporting(t *testing.T) { 1827 if testing.Short() { 1828 t.SkipNow() 1829 } 1830 t.Parallel() 1831 1832 // Create a testgroup, creating without renter so the renter's 1833 // initial balance can be obtained 1834 groupParams := siatest.GroupParams{ 1835 Hosts: 2, 1836 Miners: 1, 1837 } 1838 tg, err := siatest.NewGroupFromTemplate(groupParams) 1839 if err != nil { 1840 t.Fatal("Failed to create group: ", err) 1841 } 1842 defer func() { 1843 if err := tg.Close(); err != nil { 1844 t.Fatal(err) 1845 } 1846 }() 1847 1848 // Add a Renter node 1849 renterDir, err := siatest.TestDir(filepath.Join(t.Name(), "renter")) 1850 if err != nil { 1851 t.Fatal(err) 1852 } 1853 renterParams := node.Renter(renterDir) 1854 renterParams.SkipSetAllowance = true 1855 nodes, err := tg.AddNodes(renterParams) 1856 if err != nil { 1857 t.Fatal(err) 1858 } 1859 r := nodes[0] 1860 1861 // Get largest WindowSize from Hosts 1862 var windowSize types.BlockHeight 1863 for _, h := range tg.Hosts() { 1864 hg, err := h.HostGet() 1865 if err != nil { 1866 t.Fatal(err) 1867 } 1868 if hg.ExternalSettings.WindowSize >= windowSize { 1869 windowSize = hg.ExternalSettings.WindowSize 1870 } 1871 } 1872 1873 // Get renter's initial siacoin balance 1874 wg, err := r.WalletGet() 1875 if err != nil { 1876 t.Fatal("Failed to get wallet:", err) 1877 } 1878 initialBalance := wg.ConfirmedSiacoinBalance 1879 1880 // Set allowance 1881 if err = tg.SetRenterAllowance(r, siatest.DefaultAllowance); err != nil { 1882 t.Fatal("Failed to set renter allowance:", err) 1883 } 1884 numRenewals := 0 1885 1886 // Confirm Contracts were created as expected 1887 err = build.Retry(200, 100*time.Millisecond, func() error { 1888 rc, err := r.RenterInactiveContractsGet() 1889 if err != nil { 1890 return err 1891 } 1892 rcExpired, err := r.RenterExpiredContractsGet() 1893 if err != nil { 1894 return err 1895 } 1896 if err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil { 1897 return err 1898 } 1899 return nil 1900 }) 1901 if err != nil { 1902 t.Fatal(err) 1903 } 1904 1905 // Check that the funds allocated when setting the allowance 1906 // are reflected correctly in the wallet balance 1907 err = build.Retry(200, 100*time.Millisecond, func() error { 1908 err = checkBalanceVsSpending(r, initialBalance) 1909 if err != nil { 1910 return err 1911 } 1912 return nil 1913 }) 1914 if err != nil { 1915 t.Fatal(err) 1916 } 1917 1918 // Upload and download files to show spending 1919 var remoteFiles []*siatest.RemoteFile 1920 for i := 0; i < 10; i++ { 1921 dataPieces := uint64(1) 1922 parityPieces := uint64(1) 1923 fileSize := 100 + siatest.Fuzz() 1924 _, rf, err := r.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) 1925 if err != nil { 1926 t.Fatal("Failed to upload a file for testing: ", err) 1927 } 1928 remoteFiles = append(remoteFiles, rf) 1929 } 1930 for _, rf := range remoteFiles { 1931 _, err = r.DownloadToDisk(rf, false) 1932 if err != nil { 1933 t.Fatal("Could not DownloadToDisk:", err) 1934 } 1935 } 1936 1937 // Check to confirm upload and download spending was captured correctly 1938 // and reflected in the wallet balance 1939 err = build.Retry(200, 100*time.Millisecond, func() error { 1940 err = checkBalanceVsSpending(r, initialBalance) 1941 if err != nil { 1942 return err 1943 } 1944 return nil 1945 }) 1946 if err != nil { 1947 t.Fatal(err) 1948 } 1949 1950 // Mine blocks to force contract renewal 1951 if err = renewContractsByRenewWindow(r, tg); err != nil { 1952 t.Fatal(err) 1953 } 1954 numRenewals++ 1955 1956 // Confirm Contracts were renewed as expected 1957 err = build.Retry(200, 100*time.Millisecond, func() error { 1958 rc, err := r.RenterInactiveContractsGet() 1959 if err != nil { 1960 return err 1961 } 1962 rcExpired, err := r.RenterExpiredContractsGet() 1963 if err != nil { 1964 return err 1965 } 1966 if err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil { 1967 return err 1968 } 1969 if err = checkRenewedContracts(rc.ActiveContracts); err != nil { 1970 return err 1971 } 1972 return nil 1973 }) 1974 if err != nil { 1975 t.Fatal(err) 1976 } 1977 1978 // Mine Block to confirm contracts and spending into blockchain 1979 m := tg.Miners()[0] 1980 if err = m.MineBlock(); err != nil { 1981 t.Fatal(err) 1982 } 1983 1984 // Waiting for nodes to sync 1985 if err = tg.Sync(); err != nil { 1986 t.Fatal(err) 1987 } 1988 1989 // Check contract spending against reported spending 1990 rc, err := r.RenterInactiveContractsGet() 1991 if err != nil { 1992 t.Fatal(err) 1993 } 1994 rcExpired, err := r.RenterExpiredContractsGet() 1995 if err != nil { 1996 t.Fatal(err) 1997 } 1998 if err = checkContractVsReportedSpending(r, windowSize, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil { 1999 t.Fatal(err) 2000 } 2001 2002 // Check to confirm reported spending is still accurate with the renewed contracts 2003 // and reflected in the wallet balance 2004 err = build.Retry(200, 100*time.Millisecond, func() error { 2005 err = checkBalanceVsSpending(r, initialBalance) 2006 if err != nil { 2007 return err 2008 } 2009 return nil 2010 }) 2011 if err != nil { 2012 t.Fatal(err) 2013 } 2014 2015 // Record current Wallet Balance 2016 wg, err = r.WalletGet() 2017 if err != nil { 2018 t.Fatal("Failed to get wallet:", err) 2019 } 2020 initialPeriodEndBalance := wg.ConfirmedSiacoinBalance 2021 2022 // Mine blocks to force contract renewal and new period 2023 cg, err := r.ConsensusGet() 2024 if err != nil { 2025 t.Fatal("Failed to get consensus:", err) 2026 } 2027 blockHeight := cg.Height 2028 endHeight := rc.ActiveContracts[0].EndHeight 2029 rg, err := r.RenterGet() 2030 if err != nil { 2031 t.Fatal("Failed to get renter:", err) 2032 } 2033 rw := rg.Settings.Allowance.RenewWindow 2034 for i := 0; i < int(endHeight-rw-blockHeight+types.MaturityDelay); i++ { 2035 if err = m.MineBlock(); err != nil { 2036 t.Fatal(err) 2037 } 2038 } 2039 numRenewals++ 2040 2041 // Waiting for nodes to sync 2042 if err = tg.Sync(); err != nil { 2043 t.Fatal(err) 2044 } 2045 2046 // Check if Unspent unallocated funds were released after allowance period 2047 // was exceeded 2048 wg, err = r.WalletGet() 2049 if err != nil { 2050 t.Fatal("Failed to get wallet:", err) 2051 } 2052 if initialPeriodEndBalance.Cmp(wg.ConfirmedSiacoinBalance) > 0 { 2053 t.Fatal("Unspent Unallocated funds not released after contract renewal and maturity delay") 2054 } 2055 2056 // Confirm Contracts were renewed as expected 2057 err = build.Retry(200, 100*time.Millisecond, func() error { 2058 rc, err := r.RenterInactiveContractsGet() 2059 if err != nil { 2060 return err 2061 } 2062 rcExpired, err := r.RenterExpiredContractsGet() 2063 if err != nil { 2064 return err 2065 } 2066 if err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil { 2067 return err 2068 } 2069 if err = checkRenewedContracts(rc.ActiveContracts); err != nil { 2070 return err 2071 } 2072 return nil 2073 }) 2074 if err != nil { 2075 t.Fatal(err) 2076 } 2077 2078 // Mine Block to confirm contracts and spending on blockchain 2079 if err = m.MineBlock(); err != nil { 2080 t.Fatal(err) 2081 } 2082 2083 // Waiting for nodes to sync 2084 if err = tg.Sync(); err != nil { 2085 t.Fatal(err) 2086 } 2087 2088 // Check contract spending against reported spending 2089 rc, err = r.RenterInactiveContractsGet() 2090 if err != nil { 2091 t.Fatal(err) 2092 } 2093 rcExpired, err = r.RenterExpiredContractsGet() 2094 if err != nil { 2095 t.Fatal(err) 2096 } 2097 if err = checkContractVsReportedSpending(r, windowSize, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil { 2098 t.Fatal(err) 2099 } 2100 2101 // Check to confirm reported spending is still accurate with the renewed contracts 2102 // and a new period and reflected in the wallet balance 2103 err = build.Retry(200, 100*time.Millisecond, func() error { 2104 err = checkBalanceVsSpending(r, initialBalance) 2105 if err != nil { 2106 return err 2107 } 2108 return nil 2109 }) 2110 if err != nil { 2111 t.Fatal(err) 2112 } 2113 2114 // Renew contracts by running out of funds 2115 _, err = renewContractsBySpending(r, tg) 2116 if err != nil { 2117 t.Fatal(err) 2118 } 2119 numRenewals++ 2120 2121 // Mine Block to confirm contracts and spending on blockchain 2122 if err = m.MineBlock(); err != nil { 2123 t.Fatal(err) 2124 } 2125 2126 // Waiting for nodes to sync 2127 if err = tg.Sync(); err != nil { 2128 t.Fatal(err) 2129 } 2130 2131 // Confirm Contracts were renewed as expected 2132 err = build.Retry(200, 100*time.Millisecond, func() error { 2133 rc, err := r.RenterInactiveContractsGet() 2134 if err != nil { 2135 return err 2136 } 2137 rcExpired, err := r.RenterExpiredContractsGet() 2138 if err != nil { 2139 return err 2140 } 2141 if err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil { 2142 return err 2143 } 2144 if err = checkRenewedContracts(rc.ActiveContracts); err != nil { 2145 return err 2146 } 2147 return nil 2148 }) 2149 if err != nil { 2150 t.Fatal(err) 2151 } 2152 2153 // Mine Block to confirm contracts and spending on blockchain 2154 if err = m.MineBlock(); err != nil { 2155 t.Fatal(err) 2156 } 2157 2158 // Waiting for nodes to sync 2159 if err = tg.Sync(); err != nil { 2160 t.Fatal(err) 2161 } 2162 2163 // Check contract spending against reported spending 2164 rc, err = r.RenterInactiveContractsGet() 2165 if err != nil { 2166 t.Fatal(err) 2167 } 2168 rcExpired, err = r.RenterExpiredContractsGet() 2169 if err != nil { 2170 t.Fatal(err) 2171 } 2172 if err = checkContractVsReportedSpending(r, windowSize, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil { 2173 t.Fatal(err) 2174 } 2175 2176 // Check to confirm reported spending is still accurate with the renewed contracts 2177 // and a new period and reflected in the wallet balance 2178 err = build.Retry(200, 100*time.Millisecond, func() error { 2179 err = checkBalanceVsSpending(r, initialBalance) 2180 if err != nil { 2181 return err 2182 } 2183 return nil 2184 }) 2185 if err != nil { 2186 t.Fatal(err) 2187 } 2188 2189 // Mine blocks to force contract renewal 2190 if err = renewContractsByRenewWindow(r, tg); err != nil { 2191 t.Fatal(err) 2192 } 2193 numRenewals++ 2194 2195 // Confirm Contracts were renewed as expected 2196 err = build.Retry(200, 100*time.Millisecond, func() error { 2197 rc, err := r.RenterInactiveContractsGet() 2198 if err != nil { 2199 return err 2200 } 2201 rcExpired, err := r.RenterExpiredContractsGet() 2202 if err != nil { 2203 return err 2204 } 2205 if err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil { 2206 return err 2207 } 2208 if err = checkRenewedContracts(rc.ActiveContracts); err != nil { 2209 return err 2210 } 2211 return nil 2212 }) 2213 if err != nil { 2214 t.Fatal(err) 2215 } 2216 2217 // Mine Block to confirm contracts and spending into blockchain 2218 if err = m.MineBlock(); err != nil { 2219 t.Fatal(err) 2220 } 2221 2222 // Waiting for nodes to sync 2223 if err = tg.Sync(); err != nil { 2224 t.Fatal(err) 2225 } 2226 2227 // Check contract spending against reported spending 2228 rc, err = r.RenterInactiveContractsGet() 2229 if err != nil { 2230 t.Fatal(err) 2231 } 2232 rcExpired, err = r.RenterExpiredContractsGet() 2233 if err != nil { 2234 t.Fatal(err) 2235 } 2236 if err = checkContractVsReportedSpending(r, windowSize, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil { 2237 t.Fatal(err) 2238 } 2239 2240 // Check to confirm reported spending is still accurate with the renewed contracts 2241 // and reflected in the wallet balance 2242 err = build.Retry(200, 100*time.Millisecond, func() error { 2243 err = checkBalanceVsSpending(r, initialBalance) 2244 if err != nil { 2245 return err 2246 } 2247 return nil 2248 }) 2249 if err != nil { 2250 t.Fatal(err) 2251 } 2252 } 2253 2254 // The following are helper functions for the renter tests 2255 2256 // checkBalanceVsSpending checks the renters confirmed siacoin balance in their 2257 // wallet against their reported spending 2258 func checkBalanceVsSpending(r *siatest.TestNode, initialBalance types.Currency) error { 2259 // Getting initial financial metrics 2260 // Setting variables to easier reference 2261 rg, err := r.RenterGet() 2262 if err != nil { 2263 return err 2264 } 2265 fm := rg.FinancialMetrics 2266 2267 // Check balance after allowance is set 2268 wg, err := r.WalletGet() 2269 if err != nil { 2270 return err 2271 } 2272 expectedBalance := initialBalance.Sub(fm.TotalAllocated).Sub(fm.WithheldFunds).Sub(fm.PreviousSpending) 2273 if expectedBalance.Cmp(wg.ConfirmedSiacoinBalance) != 0 { 2274 details := fmt.Sprintf(`Initial balance minus Renter Reported Spending does not equal wallet Confirmed Siacoin Balance 2275 Expected Balance: %v 2276 Wallet Balance: %v 2277 Actual difference: %v 2278 ExpectedBalance: %v 2279 walletBalance: %v 2280 `, expectedBalance.HumanString(), wg.ConfirmedSiacoinBalance.HumanString(), initialBalance.Sub(wg.ConfirmedSiacoinBalance).HumanString(), 2281 expectedBalance.HumanString(), wg.ConfirmedSiacoinBalance.HumanString()) 2282 var diff string 2283 if expectedBalance.Cmp(wg.ConfirmedSiacoinBalance) > 0 { 2284 diff = fmt.Sprintf("Under reported by: %v\n", expectedBalance.Sub(wg.ConfirmedSiacoinBalance).HumanString()) 2285 } else { 2286 diff = fmt.Sprintf("Over reported by: %v\n", wg.ConfirmedSiacoinBalance.Sub(expectedBalance).HumanString()) 2287 } 2288 err := details + diff 2289 return errors.New(err) 2290 } 2291 return nil 2292 } 2293 2294 // checkContracts confirms that contracts are renewed as expected, renewed 2295 // contracts should be the renter's active contracts and oldContracts should be 2296 // the renter's inactive and expired contracts 2297 func checkContracts(numHosts, numRenewals int, oldContracts, renewedContracts []api.RenterContract) error { 2298 if len(renewedContracts) != numHosts { 2299 return fmt.Errorf("Incorrect number of Active contracts: have %v expected %v", len(renewedContracts), numHosts) 2300 } 2301 if len(oldContracts) == 0 && numRenewals == 0 { 2302 return nil 2303 } 2304 // Confirm contracts were renewed, this will also mean there are old contracts 2305 // Verify there are not more renewedContracts than there are oldContracts 2306 // This would mean contracts are not getting archived 2307 if len(oldContracts) < len(renewedContracts) { 2308 return errors.New("Too many renewed contracts") 2309 } 2310 if len(oldContracts) != numHosts*numRenewals { 2311 return fmt.Errorf("Incorrect number of Old contracts: have %v expected %v", len(oldContracts), numHosts*numRenewals) 2312 } 2313 2314 // Create Maps for comparison 2315 initialContractIDMap := make(map[types.FileContractID]struct{}) 2316 initialContractKeyMap := make(map[crypto.Hash]struct{}) 2317 for _, c := range oldContracts { 2318 initialContractIDMap[c.ID] = struct{}{} 2319 initialContractKeyMap[crypto.HashBytes(c.HostPublicKey.Key)] = struct{}{} 2320 } 2321 2322 for _, c := range renewedContracts { 2323 // Verify that all the contracts marked as GoodForRenew 2324 // were renewed 2325 if _, ok := initialContractIDMap[c.ID]; ok { 2326 return errors.New("ID from renewedContracts found in oldContracts") 2327 } 2328 // Verifying that Renewed Contracts have the same HostPublicKey 2329 // as an initial contract 2330 if _, ok := initialContractKeyMap[crypto.HashBytes(c.HostPublicKey.Key)]; !ok { 2331 return errors.New("Host Public Key from renewedContracts not found in oldContracts") 2332 } 2333 } 2334 return nil 2335 } 2336 2337 // checkContractVsReportedSpending confirms that the spending recorded in the 2338 // renter's contracts matches the reported spending for the renter. Renewed 2339 // contracts should be the renter's active contracts and oldContracts should be 2340 // the renter's inactive and expired contracts 2341 func checkContractVsReportedSpending(r *siatest.TestNode, WindowSize types.BlockHeight, oldContracts, renewedContracts []api.RenterContract) error { 2342 // Get Current BlockHeight 2343 cg, err := r.ConsensusGet() 2344 if err != nil { 2345 return err 2346 } 2347 2348 // Getting financial metrics after uploads, downloads, and 2349 // contract renewal 2350 rg, err := r.RenterGet() 2351 if err != nil { 2352 return err 2353 } 2354 2355 fm := rg.FinancialMetrics 2356 totalSpent := fm.ContractFees.Add(fm.UploadSpending). 2357 Add(fm.DownloadSpending).Add(fm.StorageSpending) 2358 total := totalSpent.Add(fm.Unspent) 2359 allowance := rg.Settings.Allowance 2360 2361 // Check that renter financial metrics add up to allowance 2362 if total.Cmp(allowance.Funds) != 0 { 2363 return fmt.Errorf(`Combined Total of reported spending and unspent funds not equal to allowance: 2364 total: %v 2365 allowance: %v 2366 `, total.HumanString(), allowance.Funds.HumanString()) 2367 } 2368 2369 // Check renter financial metrics against contract spending 2370 var spending modules.ContractorSpending 2371 for _, contract := range oldContracts { 2372 if contract.StartHeight >= rg.CurrentPeriod { 2373 // Calculate ContractFees 2374 spending.ContractFees = spending.ContractFees.Add(contract.Fees) 2375 // Calculate TotalAllocated 2376 spending.TotalAllocated = spending.TotalAllocated.Add(contract.TotalCost) 2377 // Calculate Spending 2378 spending.DownloadSpending = spending.DownloadSpending.Add(contract.DownloadSpending) 2379 spending.UploadSpending = spending.UploadSpending.Add(contract.UploadSpending) 2380 spending.StorageSpending = spending.StorageSpending.Add(contract.StorageSpending) 2381 } else if contract.EndHeight+WindowSize+types.MaturityDelay > cg.Height { 2382 // Calculated funds that are being withheld in contracts 2383 spending.WithheldFunds = spending.WithheldFunds.Add(contract.RenterFunds) 2384 // Record the largest window size for worst case when reporting the spending 2385 if contract.EndHeight+WindowSize+types.MaturityDelay >= spending.ReleaseBlock { 2386 spending.ReleaseBlock = contract.EndHeight + WindowSize + types.MaturityDelay 2387 } 2388 // Calculate Previous spending 2389 spending.PreviousSpending = spending.PreviousSpending.Add(contract.Fees). 2390 Add(contract.DownloadSpending).Add(contract.UploadSpending).Add(contract.StorageSpending) 2391 } else { 2392 // Calculate Previous spending 2393 spending.PreviousSpending = spending.PreviousSpending.Add(contract.Fees). 2394 Add(contract.DownloadSpending).Add(contract.UploadSpending).Add(contract.StorageSpending) 2395 } 2396 } 2397 for _, contract := range renewedContracts { 2398 if contract.GoodForRenew { 2399 // Calculate ContractFees 2400 spending.ContractFees = spending.ContractFees.Add(contract.Fees) 2401 // Calculate TotalAllocated 2402 spending.TotalAllocated = spending.TotalAllocated.Add(contract.TotalCost) 2403 // Calculate Spending 2404 spending.DownloadSpending = spending.DownloadSpending.Add(contract.DownloadSpending) 2405 spending.UploadSpending = spending.UploadSpending.Add(contract.UploadSpending) 2406 spending.StorageSpending = spending.StorageSpending.Add(contract.StorageSpending) 2407 } 2408 } 2409 2410 // Compare contract fees 2411 if fm.ContractFees.Cmp(spending.ContractFees) != 0 { 2412 return fmt.Errorf(`Fees not equal: 2413 Financial Metrics Fees: %v 2414 Contract Fees: %v 2415 `, fm.ContractFees.HumanString(), spending.ContractFees.HumanString()) 2416 } 2417 // Compare Total Allocated 2418 if fm.TotalAllocated.Cmp(spending.TotalAllocated) != 0 { 2419 return fmt.Errorf(`Total Allocated not equal: 2420 Financial Metrics TA: %v 2421 Contract TA: %v 2422 `, fm.TotalAllocated.HumanString(), spending.TotalAllocated.HumanString()) 2423 } 2424 // Compare Upload Spending 2425 if fm.UploadSpending.Cmp(spending.UploadSpending) != 0 { 2426 return fmt.Errorf(`Upload spending not equal: 2427 Financial Metrics US: %v 2428 Contract US: %v 2429 `, fm.UploadSpending.HumanString(), spending.UploadSpending.HumanString()) 2430 } 2431 // Compare Download Spending 2432 if fm.DownloadSpending.Cmp(spending.DownloadSpending) != 0 { 2433 return fmt.Errorf(`Download spending not equal: 2434 Financial Metrics DS: %v 2435 Contract DS: %v 2436 `, fm.DownloadSpending.HumanString(), spending.DownloadSpending.HumanString()) 2437 } 2438 // Compare Storage Spending 2439 if fm.StorageSpending.Cmp(spending.StorageSpending) != 0 { 2440 return fmt.Errorf(`Storage spending not equal: 2441 Financial Metrics SS: %v 2442 Contract SS: %v 2443 `, fm.StorageSpending.HumanString(), spending.StorageSpending.HumanString()) 2444 } 2445 // Compare Withheld Funds 2446 if fm.WithheldFunds.Cmp(spending.WithheldFunds) != 0 { 2447 return fmt.Errorf(`Withheld Funds not equal: 2448 Financial Metrics WF: %v 2449 Contract WF: %v 2450 `, fm.WithheldFunds.HumanString(), spending.WithheldFunds.HumanString()) 2451 } 2452 // Compare Release Block 2453 if fm.ReleaseBlock != spending.ReleaseBlock { 2454 return fmt.Errorf(`Release Block not equal: 2455 Financial Metrics RB: %v 2456 Contract RB: %v 2457 `, fm.ReleaseBlock, spending.ReleaseBlock) 2458 } 2459 // Compare Previous Spending 2460 if fm.PreviousSpending.Cmp(spending.PreviousSpending) != 0 { 2461 return fmt.Errorf(`Previous spending not equal: 2462 Financial Metrics PS: %v 2463 Contract PS: %v 2464 `, fm.PreviousSpending.HumanString(), spending.PreviousSpending.HumanString()) 2465 } 2466 2467 return nil 2468 } 2469 2470 // checkRenewedContracts confirms that renewed contracts have zero upload and 2471 // download spending. Renewed contracts should be the renter's active contracts 2472 func checkRenewedContracts(renewedContracts []api.RenterContract) error { 2473 for _, c := range renewedContracts { 2474 if c.UploadSpending.Cmp(types.ZeroCurrency) != 0 && c.GoodForUpload { 2475 return fmt.Errorf("Upload spending on renewed contract equal to %v, expected zero", c.UploadSpending.HumanString()) 2476 } 2477 if c.DownloadSpending.Cmp(types.ZeroCurrency) != 0 { 2478 return fmt.Errorf("Download spending on renewed contract equal to %v, expected zero", c.DownloadSpending.HumanString()) 2479 } 2480 } 2481 return nil 2482 } 2483 2484 // renewContractByRenewWindow mines blocks to force contract renewal 2485 func renewContractsByRenewWindow(renter *siatest.TestNode, tg *siatest.TestGroup) error { 2486 rg, err := renter.RenterGet() 2487 if err != nil { 2488 return errors.AddContext(err, "failed to get RenterGet") 2489 } 2490 m := tg.Miners()[0] 2491 for i := 0; i < int(rg.Settings.Allowance.Period-rg.Settings.Allowance.RenewWindow); i++ { 2492 if err = m.MineBlock(); err != nil { 2493 return err 2494 } 2495 } 2496 2497 // Waiting for nodes to sync 2498 if err = tg.Sync(); err != nil { 2499 return err 2500 } 2501 return nil 2502 } 2503 2504 // renewContractsBySpending uploads files until the contracts renew due to 2505 // running out of funds 2506 func renewContractsBySpending(renter *siatest.TestNode, tg *siatest.TestGroup) (startingUploadSpend types.Currency, err error) { 2507 // Renew contracts by running out of funds 2508 // Set upload price to max price 2509 maxStoragePrice := types.SiacoinPrecision.Mul64(30e5).Div(modules.BlockBytesPerMonthTerabyte) // 30k SC / TB / Month 2510 maxUploadPrice := maxStoragePrice.Mul64(30 * 4320) 2511 hosts := tg.Hosts() 2512 for _, h := range hosts { 2513 err := h.HostModifySettingPost(client.HostParamMinUploadBandwidthPrice, maxUploadPrice) 2514 if err != nil { 2515 return types.ZeroCurrency, errors.AddContext(err, "could not set Host Upload Price") 2516 } 2517 } 2518 2519 // Waiting for nodes to sync 2520 m := tg.Miners()[0] 2521 if err := m.MineBlock(); err != nil { 2522 return types.ZeroCurrency, errors.AddContext(err, "error mining block") 2523 } 2524 if err := tg.Sync(); err != nil { 2525 return types.ZeroCurrency, err 2526 } 2527 2528 // Set upload parameters 2529 dataPieces := uint64(1) 2530 parityPieces := uint64(1) 2531 chunkSize := siatest.ChunkSize(1) 2532 2533 // Upload once to show upload spending 2534 _, _, err = renter.UploadNewFileBlocking(int(chunkSize), dataPieces, parityPieces) 2535 if err != nil { 2536 return types.ZeroCurrency, errors.AddContext(err, "failed to upload first file in renewContractsBySpending") 2537 } 2538 2539 // Get current upload spend, previously contracts had zero upload spend 2540 rc, err := renter.RenterContractsGet() 2541 if err != nil { 2542 return types.ZeroCurrency, errors.AddContext(err, "could not get renter active contracts") 2543 } 2544 startingUploadSpend = rc.ActiveContracts[0].UploadSpending 2545 2546 // Upload files to force contract renewal due to running out of funds 2547 LOOP: 2548 for { 2549 // To protect against contracts not renewing during uploads 2550 for _, c := range rc.ActiveContracts { 2551 percentRemaining, _ := big.NewRat(0, 1).SetFrac(c.RenterFunds.Big(), c.TotalCost.Big()).Float64() 2552 if percentRemaining < float64(0.03) { 2553 break LOOP 2554 } 2555 } 2556 _, _, err = renter.UploadNewFileBlocking(int(chunkSize), dataPieces, parityPieces) 2557 if err != nil { 2558 pr, _ := big.NewRat(0, 1).SetFrac(rc.ActiveContracts[0].RenterFunds.Big(), rc.ActiveContracts[0].TotalCost.Big()).Float64() 2559 s := fmt.Sprintf("failed to upload file in renewContractsBySpending loop, percentRemaining: %v", pr) 2560 return types.ZeroCurrency, errors.AddContext(err, s) 2561 } 2562 2563 rc, err = renter.RenterContractsGet() 2564 if err != nil { 2565 return types.ZeroCurrency, errors.AddContext(err, "could not get renter active contracts") 2566 } 2567 } 2568 if err = m.MineBlock(); err != nil { 2569 return startingUploadSpend, err 2570 } 2571 return startingUploadSpend, nil 2572 }