github.com/Synthesix/Sia@v1.3.3-0.20180413141344-f863baeed3ca/siatest/renter/renter_test.go (about) 1 package renter 2 3 import ( 4 "sync" 5 "testing" 6 7 "github.com/Synthesix/Sia/modules" 8 "github.com/Synthesix/Sia/modules/renter" 9 "github.com/Synthesix/Sia/node" 10 "github.com/Synthesix/Sia/siatest" 11 "github.com/NebulousLabs/fastrand" 12 ) 13 14 // TestRenter executes a number of subtests using the same TestGroup to 15 // save time on initialization 16 func TestRenter(t *testing.T) { 17 if testing.Short() { 18 t.SkipNow() 19 } 20 21 // Create a group for the subtests 22 groupParams := siatest.GroupParams{ 23 Hosts: 5, 24 Renters: 1, 25 Miners: 1, 26 } 27 tg, err := siatest.NewGroupFromTemplate(groupParams) 28 if err != nil { 29 t.Fatal("Failed to create group: ", err) 30 } 31 defer func() { 32 if err := tg.Close(); err != nil { 33 t.Fatal(err) 34 } 35 }() 36 37 // Specify subtests to run 38 subTests := []struct { 39 name string 40 test func(*testing.T, *siatest.TestGroup) 41 }{ 42 {"UploadDownload", testUploadDownload}, 43 {"DownloadMultipleLargeSectors", testDownloadMultipleLargeSectors}, 44 {"TestRenterLocalRepair", testRenterLocalRepair}, 45 {"TestRenterRemoteRepair", testRenterRemoteRepair}, 46 } 47 // Run subtests 48 for _, subtest := range subTests { 49 t.Run(subtest.name, func(t *testing.T) { 50 subtest.test(t, tg) 51 }) 52 } 53 } 54 55 // testUploadDownload is a subtest that uses an existing TestGroup to test if 56 // uploading and downloading a file works 57 func testUploadDownload(t *testing.T, tg *siatest.TestGroup) { 58 // Grab the first of the group's renters 59 renter := tg.Renters()[0] 60 // Upload file, creating a piece for each host in the group 61 dataPieces := uint64(1) 62 parityPieces := uint64(len(tg.Hosts())) - dataPieces 63 fileSize := 100 + siatest.Fuzz() 64 localFile, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) 65 if err != nil { 66 t.Fatal("Failed to upload a file for testing: ", err) 67 } 68 // Download the file synchronously directly into memory 69 _, err = renter.DownloadByStream(remoteFile) 70 if err != nil { 71 t.Fatal(err) 72 } 73 // Download the file synchronously to a file on disk 74 _, err = renter.DownloadToDisk(remoteFile, false) 75 if err != nil { 76 t.Fatal(err) 77 } 78 // Download the file asynchronously and wait for the download to finish. 79 localFile, err = renter.DownloadToDisk(remoteFile, true) 80 if err != nil { 81 t.Error(err) 82 } 83 if err := renter.WaitForDownload(localFile, remoteFile); err != nil { 84 t.Error(err) 85 } 86 // Stream the file. 87 _, err = renter.Stream(remoteFile) 88 if err != nil { 89 t.Fatal(err) 90 } 91 // Stream the file partially a few times. At least 1 byte is streamed. 92 for i := 0; i < 5; i++ { 93 from := fastrand.Intn(fileSize - 1) // [0..fileSize-2] 94 to := from + 1 + fastrand.Intn(fileSize-from-1) // [from+1..fileSize-1] 95 _, err = renter.StreamPartial(remoteFile, localFile, uint64(from), uint64(to)) 96 if err != nil { 97 t.Fatal(err) 98 } 99 } 100 } 101 102 // testDownloadMultipleLargeSectors downloads multiple large files (>5 Sectors) 103 // in parallel and makes sure that the downloads are blocking each other. 104 func testDownloadMultipleLargeSectors(t *testing.T, tg *siatest.TestGroup) { 105 // parallelDownloads is the number of downloads that are run in parallel. 106 parallelDownloads := 10 107 // fileSize is the size of the downloaded file. 108 fileSize := int(10*modules.SectorSize) + siatest.Fuzz() 109 // set download limits and reset them after test. 110 // uniqueRemoteFiles is the number of files that will be uploaded to the 111 // network. Downloads will choose the remote file to download randomly. 112 uniqueRemoteFiles := 5 113 // Grab the first of the group's renters 114 renter := tg.Renters()[0] 115 // set download limits and reset them after test. 116 if err := renter.RenterPostRateLimit(int64(fileSize)*2, 0); err != nil { 117 t.Fatal("failed to set renter bandwidth limit", err) 118 } 119 defer func() { 120 if err := renter.RenterPostRateLimit(0, 0); err != nil { 121 t.Error("failed to reset renter bandwidth limit", err) 122 } 123 }() 124 125 // Upload files 126 dataPieces := uint64(len(tg.Hosts())) - 1 127 parityPieces := uint64(1) 128 remoteFiles := make([]*siatest.RemoteFile, 0, uniqueRemoteFiles) 129 for i := 0; i < uniqueRemoteFiles; i++ { 130 _, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) 131 if err != nil { 132 t.Fatal("Failed to upload a file for testing: ", err) 133 } 134 remoteFiles = append(remoteFiles, remoteFile) 135 } 136 137 // Randomly download using download to file and download to stream methods. 138 wg := new(sync.WaitGroup) 139 for i := 0; i < parallelDownloads; i++ { 140 wg.Add(1) 141 go func() { 142 var err error 143 var rf = remoteFiles[fastrand.Intn(len(remoteFiles))] 144 if fastrand.Intn(2) == 0 { 145 _, err = renter.DownloadByStream(rf) 146 } else { 147 _, err = renter.DownloadToDisk(rf, false) 148 } 149 if err != nil { 150 t.Error("Download failed:", err) 151 } 152 wg.Done() 153 }() 154 } 155 wg.Wait() 156 } 157 158 // testRenterLocalRepair tests if a renter correctly repairs a file from disk 159 // after a host goes offline. 160 func testRenterLocalRepair(t *testing.T, tg *siatest.TestGroup) { 161 // Grab the first of the group's renters 162 renter := tg.Renters()[0] 163 164 // Check that we have enough hosts for this test. 165 if len(tg.Hosts()) < 2 { 166 t.Fatal("This test requires at least 2 hosts") 167 } 168 169 // Set fileSize and redundancy for upload 170 fileSize := int(modules.SectorSize) 171 dataPieces := uint64(1) 172 parityPieces := uint64(len(tg.Hosts())) - dataPieces 173 174 // Upload file 175 _, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) 176 if err != nil { 177 t.Fatal(err) 178 } 179 // Get the file info of the fully uploaded file. Tha way we can compare the 180 // redundancieslater. 181 fi, err := renter.FileInfo(remoteFile) 182 if err != nil { 183 t.Fatal("failed to get file info", err) 184 } 185 186 // Take down one of the hosts and check if redundancy decreases. 187 if err := tg.RemoveNode(tg.Hosts()[0]); err != nil { 188 t.Fatal("Failed to shutdown host", err) 189 } 190 expectedRedundancy := float64(dataPieces+parityPieces-1) / float64(dataPieces) 191 if err := renter.WaitForDecreasingRedundancy(remoteFile, expectedRedundancy); err != nil { 192 t.Fatal("Redundancy isn't decreasing", err) 193 } 194 // We should still be able to download 195 if _, err := renter.DownloadByStream(remoteFile); err != nil { 196 t.Fatal("Failed to download file", err) 197 } 198 // Bring up a new host and check if redundancy increments again. 199 if err := tg.AddNodes(node.HostTemplate); err != nil { 200 t.Fatal("Failed to create a new host", err) 201 } 202 if err := renter.WaitForUploadRedundancy(remoteFile, fi.Redundancy); err != nil { 203 t.Fatal("File wasn't repaired", err) 204 } 205 // We should be able to download 206 if _, err := renter.DownloadByStream(remoteFile); err != nil { 207 t.Fatal("Failed to download file", err) 208 } 209 } 210 211 // testRenterRemoteRepair tests if a renter correctly repairs a file by 212 // downloading it after a host goes offline. 213 func testRenterRemoteRepair(t *testing.T, tg *siatest.TestGroup) { 214 // Grab the first of the group's renters 215 r := tg.Renters()[0] 216 217 // Check that we have enough hosts for this test. 218 if len(tg.Hosts()) < 2 { 219 t.Fatal("This test requires at least 2 hosts") 220 } 221 222 // Set fileSize and redundancy for upload 223 fileSize := int(modules.SectorSize) 224 dataPieces := uint64(1) 225 parityPieces := uint64(len(tg.Hosts())) - dataPieces 226 227 // Upload file 228 localFile, remoteFile, err := r.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) 229 if err != nil { 230 t.Fatal(err) 231 } 232 // Get the file info of the fully uploaded file. Tha way we can compare the 233 // redundancieslater. 234 fi, err := r.FileInfo(remoteFile) 235 if err != nil { 236 t.Fatal("failed to get file info", err) 237 } 238 239 // Delete the file locally. 240 if err := localFile.Delete(); err != nil { 241 t.Fatal("failed to delete local file", err) 242 } 243 244 // Take down all of the parity hosts and check if redundancy decreases. 245 for i := uint64(0); i < parityPieces; i++ { 246 if err := tg.RemoveNode(tg.Hosts()[0]); err != nil { 247 t.Fatal("Failed to shutdown host", err) 248 } 249 } 250 expectedRedundancy := float64(dataPieces+parityPieces-1) / float64(dataPieces) 251 if err := r.WaitForDecreasingRedundancy(remoteFile, expectedRedundancy); err != nil { 252 t.Fatal("Redundancy isn't decreasing", err) 253 } 254 // We should still be able to download 255 if _, err := r.DownloadByStream(remoteFile); err != nil { 256 t.Fatal("Failed to download file", err) 257 } 258 // Bring up new parity hosts and check if redundancy increments again. 259 if err := tg.AddNodeN(node.HostTemplate, int(parityPieces)); err != nil { 260 t.Fatal("Failed to create a new host", err) 261 } 262 // When doing remote repair the redundancy might not reach 100%. 263 expectedRedundancy = (1.0 - renter.RemoteRepairDownloadThreshold) * fi.Redundancy 264 if err := r.WaitForUploadRedundancy(remoteFile, expectedRedundancy); err != nil { 265 t.Fatal("File wasn't repaired", err) 266 } 267 // We should be able to download 268 if _, err := r.DownloadByStream(remoteFile); err != nil { 269 t.Fatal("Failed to download file", err) 270 } 271 }