github.com/avahowell/sia@v0.5.1-beta.0.20160524050156-83dcc3d37c94/modules/renter/download_test.go (about) 1 package renter 2 3 import ( 4 "bytes" 5 "io" 6 "testing" 7 "time" 8 9 "github.com/NebulousLabs/Sia/crypto" 10 ) 11 12 // a testFetcher simulates a host. It implements the fetcher interface. 13 type testFetcher struct { 14 sectors map[crypto.Hash][]byte 15 pieceMap map[uint64][]pieceData 16 pieceSize uint64 17 18 nAttempt int // total number of download attempts 19 nFetch int // number of successful download attempts 20 21 // used to simulate real-world conditions 22 delay time.Duration // transfers will take this long 23 failRate int // transfers will randomly fail with probability 1/failRate 24 } 25 26 func (f *testFetcher) pieces(chunkIndex uint64) []pieceData { 27 return f.pieceMap[chunkIndex] 28 } 29 30 func (f *testFetcher) fetch(p pieceData) ([]byte, error) { 31 f.nAttempt++ 32 time.Sleep(f.delay) 33 // randomly fail 34 if n, _ := crypto.RandIntn(f.failRate); n == 0 { 35 return nil, io.EOF 36 } 37 f.nFetch++ 38 return f.sectors[p.MerkleRoot], nil 39 } 40 41 // TestErasureDownload tests parallel downloading of erasure-coded data. It 42 // mocks the fetcher interface in order to directly test the downloading 43 // algorithm. 44 func TestErasureDownload(t *testing.T) { 45 if testing.Short() { 46 t.SkipNow() 47 } 48 49 // generate data 50 const dataSize = 777 51 data, err := crypto.RandBytes(dataSize) 52 if err != nil { 53 t.Fatal(err) 54 } 55 56 // create Reed-Solomon encoder 57 rsc, err := NewRSCode(2, 10) 58 if err != nil { 59 t.Fatal(err) 60 } 61 62 // create hosts 63 const pieceSize = 10 64 hosts := make([]fetcher, rsc.NumPieces()) 65 for i := range hosts { 66 hosts[i] = &testFetcher{ 67 sectors: make(map[crypto.Hash][]byte), 68 pieceMap: make(map[uint64][]pieceData), 69 pieceSize: pieceSize, 70 71 delay: time.Millisecond, 72 failRate: 5, // 20% failure rate 73 } 74 } 75 // make one host really slow 76 hosts[0].(*testFetcher).delay = 100 * time.Millisecond 77 // make one host always fail 78 hosts[1].(*testFetcher).failRate = 1 79 80 // upload data to hosts 81 r := bytes.NewReader(data) // makes chunking easier 82 chunk := make([]byte, pieceSize*rsc.MinPieces()) 83 var i uint64 84 for i = uint64(0); ; i++ { 85 _, err := io.ReadFull(r, chunk) 86 if err == io.EOF { 87 break 88 } else if err != nil && err != io.ErrUnexpectedEOF { 89 t.Fatal(err) 90 } 91 pieces, err := rsc.Encode(chunk) 92 if err != nil { 93 t.Fatal(err) 94 } 95 for j, p := range pieces { 96 root := crypto.MerkleRoot(p) 97 host := hosts[j%len(hosts)].(*testFetcher) // distribute evenly 98 host.pieceMap[i] = append(host.pieceMap[i], pieceData{ 99 Chunk: uint64(i), 100 Piece: uint64(j), 101 MerkleRoot: root, 102 }) 103 host.sectors[root] = p 104 } 105 } 106 107 // check hosts (not strictly necessary) 108 err = checkHosts(hosts, rsc.MinPieces(), i) 109 if err != nil { 110 t.Fatal(err) 111 } 112 113 // download data 114 d := newFile("foo", rsc, pieceSize, dataSize).newDownload(hosts, "") 115 buf := new(bytes.Buffer) 116 err = d.run(buf) 117 if err != nil { 118 t.Fatal(err) 119 } 120 121 if !bytes.Equal(buf.Bytes(), data) { 122 t.Fatal("recovered data does not match original") 123 } 124 125 /* 126 // These metrics can be used to assess the efficiency of the download 127 // algorithm. 128 129 totFetch := 0 130 for i, h := range hosts { 131 h := h.(*testHost) 132 t.Logf("Host %2d: Fetched: %v/%v", i, h.nFetch, h.nAttempt) 133 totFetch += h.nAttempt 134 } 135 t.Log("Optimal fetches:", i*uint64(rsc.MinPieces())) 136 t.Log("Total fetches: ", totFetch) 137 */ 138 }