github.com/dmmcquay/sia@v1.3.1-0.20180712220038-9f8d535311b9/modules/renter_test.go (about) 1 package modules 2 3 import ( 4 "encoding/json" 5 "os" 6 "path/filepath" 7 "testing" 8 9 "github.com/NebulousLabs/Sia/build" 10 "github.com/NebulousLabs/Sia/crypto" 11 "github.com/NebulousLabs/Sia/persist" 12 "github.com/NebulousLabs/fastrand" 13 ) 14 15 // TestMerkleRootSetCompatibility checks that the persist encoding for the 16 // MerkleRootSet type is compatible with the previous encoding for the data, 17 // which was a slice of type crypto.Hash. 18 func TestMerkleRootSetCompatibility(t *testing.T) { 19 if testing.Short() { 20 t.SkipNow() 21 } 22 23 // Create some fake headers for the files. 24 meta := persist.Metadata{ 25 Header: "Test Header", 26 Version: "1.1.1", 27 } 28 29 // Try multiple sizes of array. 30 for i := 0; i < 10; i++ { 31 // Create a []crypto.Hash of length i. 32 type chStruct struct { 33 Hashes []crypto.Hash 34 } 35 var chs chStruct 36 for j := 0; j < i; j++ { 37 var ch crypto.Hash 38 fastrand.Read(ch[:]) 39 chs.Hashes = append(chs.Hashes, ch) 40 } 41 42 // Save and load, check that they are the same. 43 dir := build.TempDir("modules", t.Name()) 44 err := os.MkdirAll(dir, 0700) 45 if err != nil { 46 t.Fatal(err) 47 } 48 filename := filepath.Join(dir, "file") 49 err = persist.SaveJSON(meta, chs, filename) 50 if err != nil { 51 t.Fatal(err) 52 } 53 54 // Load and verify equivalence. 55 var loadCHS chStruct 56 err = persist.LoadJSON(meta, &loadCHS, filename) 57 if err != nil { 58 t.Fatal(err) 59 } 60 if len(chs.Hashes) != len(loadCHS.Hashes) { 61 t.Fatal("arrays should be the same size") 62 } 63 for j := range chs.Hashes { 64 if chs.Hashes[j] != loadCHS.Hashes[j] { 65 t.Error("loading failed", i, j) 66 } 67 } 68 69 // Load into MerkleRootSet and verify equivalence. 70 type mrStruct struct { 71 Hashes MerkleRootSet 72 } 73 var loadMRS mrStruct 74 err = persist.LoadJSON(meta, &loadMRS, filename) 75 if err != nil { 76 t.Fatal(err) 77 } 78 if len(chs.Hashes) != len(loadMRS.Hashes) { 79 t.Fatal("arrays should be the same size") 80 } 81 for j := range chs.Hashes { 82 if chs.Hashes[j] != loadMRS.Hashes[j] { 83 t.Error("loading failed", i, j) 84 } 85 } 86 87 // Save as a MerkleRootSet and verify it can be loaded again. 88 var mrs mrStruct 89 mrs.Hashes = MerkleRootSet(chs.Hashes) 90 err = persist.SaveJSON(meta, mrs, filename) 91 if err != nil { 92 t.Fatal(err) 93 } 94 err = persist.LoadJSON(meta, &loadMRS, filename) 95 if err != nil { 96 t.Fatal(err) 97 } 98 if len(mrs.Hashes) != len(loadMRS.Hashes) { 99 t.Fatal("arrays should be the same size") 100 } 101 for j := range mrs.Hashes { 102 if mrs.Hashes[j] != loadMRS.Hashes[j] { 103 t.Error("loading failed", i, j) 104 } 105 } 106 } 107 } 108 109 // BenchmarkMerkleRootSetEncode clocks how fast large MerkleRootSets can be 110 // encoded and written to disk. 111 func BenchmarkMerkleRootSetEncode(b *testing.B) { 112 // Create a []crypto.Hash of length i. 113 type chStruct struct { 114 Hashes MerkleRootSet 115 } 116 var chs chStruct 117 for i := 0; i < 1e3; i++ { 118 var ch crypto.Hash 119 fastrand.Read(ch[:]) 120 chs.Hashes = append(chs.Hashes, ch) 121 } 122 123 b.ResetTimer() 124 for i := 0; i < b.N; i++ { 125 _, err := json.Marshal(chs) 126 if err != nil { 127 b.Fatal(err) 128 } 129 } 130 } 131 132 // BenchmarkSliceCryptoHashEncode clocks how fast large []crypto.Hashes can be 133 // encoded and written to disk. 134 func BenchmarkSliceCryptoHashEncode(b *testing.B) { 135 // Create a []crypto.Hash of length i. 136 type chStruct struct { 137 Hashes []crypto.Hash 138 } 139 var chs chStruct 140 for i := 0; i < 1e3; i++ { 141 var ch crypto.Hash 142 fastrand.Read(ch[:]) 143 chs.Hashes = append(chs.Hashes, ch) 144 } 145 146 b.ResetTimer() 147 for i := 0; i < b.N; i++ { 148 _, err := json.Marshal(chs) 149 if err != nil { 150 b.Fatal(err) 151 } 152 } 153 } 154 155 // BenchmarkMerkleRootSetSave clocks how fast large MerkleRootSets can be 156 // encoded and written to disk. 157 func BenchmarkMerkleRootSetSave(b *testing.B) { 158 // Create some fake headers for the files. 159 meta := persist.Metadata{ 160 Header: "Bench Header", 161 Version: "1.1.1", 162 } 163 164 // Create a []crypto.Hash of length i. 165 type chStruct struct { 166 Hashes MerkleRootSet 167 } 168 var chs chStruct 169 for i := 0; i < 1e3; i++ { 170 var ch crypto.Hash 171 fastrand.Read(ch[:]) 172 chs.Hashes = append(chs.Hashes, ch) 173 } 174 175 // Save through the persist. 176 dir := build.TempDir("modules", "BenchmarkSliceCryptoHashSave") 177 err := os.MkdirAll(dir, 0700) 178 if err != nil { 179 b.Fatal(err) 180 } 181 filename := filepath.Join(dir, "file") 182 183 b.ResetTimer() 184 for i := 0; i < b.N; i++ { 185 err = persist.SaveJSON(meta, chs, filename) 186 if err != nil { 187 b.Fatal(err) 188 } 189 } 190 } 191 192 // BenchmarkSliceCryptoHashSave clocks how fast large []crypto.Hashes can be 193 // encoded and written to disk. 194 func BenchmarkSliceCryptoHashSave(b *testing.B) { 195 // Create some fake headers for the files. 196 meta := persist.Metadata{ 197 Header: "Bench Header", 198 Version: "1.1.1", 199 } 200 201 // Create a []crypto.Hash of length i. 202 type chStruct struct { 203 Hashes []crypto.Hash 204 } 205 var chs chStruct 206 for i := 0; i < 1e3; i++ { 207 var ch crypto.Hash 208 fastrand.Read(ch[:]) 209 chs.Hashes = append(chs.Hashes, ch) 210 } 211 212 // Save through the persist. 213 dir := build.TempDir("modules", "BenchmarkSliceCryptoHashSave") 214 err := os.MkdirAll(dir, 0700) 215 if err != nil { 216 b.Fatal(err) 217 } 218 filename := filepath.Join(dir, "file") 219 220 b.ResetTimer() 221 for i := 0; i < b.N; i++ { 222 err = persist.SaveJSON(meta, chs, filename) 223 if err != nil { 224 b.Fatal(err) 225 } 226 } 227 }