github.com/oskarth/go-ethereum@v1.6.8-0.20191013093314-dac24a9d3494/swarm/network/stream/snapshot_retrieval_test.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 package stream 17 18 import ( 19 "context" 20 "fmt" 21 "os" 22 "sync" 23 "testing" 24 "time" 25 26 "github.com/ethereum/go-ethereum/node" 27 "github.com/ethereum/go-ethereum/p2p/enode" 28 "github.com/ethereum/go-ethereum/p2p/simulations/adapters" 29 "github.com/ethereum/go-ethereum/swarm/log" 30 "github.com/ethereum/go-ethereum/swarm/network" 31 "github.com/ethereum/go-ethereum/swarm/network/simulation" 32 "github.com/ethereum/go-ethereum/swarm/state" 33 "github.com/ethereum/go-ethereum/swarm/storage" 34 ) 35 36 //constants for random file generation 37 const ( 38 minFileSize = 2 39 maxFileSize = 40 40 ) 41 42 //This test is a retrieval test for nodes. 43 //A configurable number of nodes can be 44 //provided to the test. 45 //Files are uploaded to nodes, other nodes try to retrieve the file 46 //Number of nodes can be provided via commandline too. 47 func TestFileRetrieval(t *testing.T) { 48 if *nodes != 0 { 49 err := runFileRetrievalTest(*nodes) 50 if err != nil { 51 t.Fatal(err) 52 } 53 } else { 54 nodeCnt := []int{16} 55 //if the `longrunning` flag has been provided 56 //run more test combinations 57 if *longrunning { 58 nodeCnt = append(nodeCnt, 32, 64, 128) 59 } 60 for _, n := range nodeCnt { 61 err := runFileRetrievalTest(n) 62 if err != nil { 63 t.Fatal(err) 64 } 65 } 66 } 67 } 68 69 //This test is a retrieval test for nodes. 70 //One node is randomly selected to be the pivot node. 71 //A configurable number of chunks and nodes can be 72 //provided to the test, the number of chunks is uploaded 73 //to the pivot node and other nodes try to retrieve the chunk(s). 74 //Number of chunks and nodes can be provided via commandline too. 75 func TestRetrieval(t *testing.T) { 76 //if nodes/chunks have been provided via commandline, 77 //run the tests with these values 78 if *nodes != 0 && *chunks != 0 { 79 err := runRetrievalTest(*chunks, *nodes) 80 if err != nil { 81 t.Fatal(err) 82 } 83 } else { 84 var nodeCnt []int 85 var chnkCnt []int 86 //if the `longrunning` flag has been provided 87 //run more test combinations 88 if *longrunning { 89 nodeCnt = []int{16, 32, 128} 90 chnkCnt = []int{4, 32, 256} 91 } else { 92 //default test 93 nodeCnt = []int{16} 94 chnkCnt = []int{32} 95 } 96 for _, n := range nodeCnt { 97 for _, c := range chnkCnt { 98 err := runRetrievalTest(c, n) 99 if err != nil { 100 t.Fatal(err) 101 } 102 } 103 } 104 } 105 } 106 107 var retrievalSimServiceMap = map[string]simulation.ServiceFunc{ 108 "streamer": retrievalStreamerFunc, 109 } 110 111 func retrievalStreamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 112 n := ctx.Config.Node() 113 addr := network.NewAddr(n) 114 store, datadir, err := createTestLocalStorageForID(n.ID(), addr) 115 if err != nil { 116 return nil, nil, err 117 } 118 bucket.Store(bucketKeyStore, store) 119 120 localStore := store.(*storage.LocalStore) 121 netStore, err := storage.NewNetStore(localStore, nil) 122 if err != nil { 123 return nil, nil, err 124 } 125 kad := network.NewKademlia(addr.Over(), network.NewKadParams()) 126 delivery := NewDelivery(kad, netStore) 127 netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New 128 129 r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{ 130 DoSync: true, 131 SyncUpdateDelay: 3 * time.Second, 132 DoRetrieve: true, 133 DoServeRetrieve: true, 134 }) 135 136 fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams()) 137 bucket.Store(bucketKeyFileStore, fileStore) 138 139 cleanup = func() { 140 os.RemoveAll(datadir) 141 netStore.Close() 142 r.Close() 143 } 144 145 return r, cleanup, nil 146 } 147 148 /* 149 The test loads a snapshot file to construct the swarm network, 150 assuming that the snapshot file identifies a healthy 151 kademlia network. Nevertheless a health check runs in the 152 simulation's `action` function. 153 154 The snapshot should have 'streamer' in its service list. 155 */ 156 func runFileRetrievalTest(nodeCount int) error { 157 sim := simulation.New(retrievalSimServiceMap) 158 defer sim.Close() 159 160 log.Info("Initializing test config") 161 162 conf := &synctestConfig{} 163 //map of discover ID to indexes of chunks expected at that ID 164 conf.idToChunksMap = make(map[enode.ID][]int) 165 //map of overlay address to discover ID 166 conf.addrToIDMap = make(map[string]enode.ID) 167 //array where the generated chunk hashes will be stored 168 conf.hashes = make([]storage.Address, 0) 169 170 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 171 if err != nil { 172 return err 173 } 174 175 ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute) 176 defer cancelSimRun() 177 178 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 179 nodeIDs := sim.UpNodeIDs() 180 for _, n := range nodeIDs { 181 //get the kademlia overlay address from this ID 182 a := n.Bytes() 183 //append it to the array of all overlay addresses 184 conf.addrs = append(conf.addrs, a) 185 //the proximity calculation is on overlay addr, 186 //the p2p/simulations check func triggers on enode.ID, 187 //so we need to know which overlay addr maps to which nodeID 188 conf.addrToIDMap[string(a)] = n 189 } 190 191 //an array for the random files 192 var randomFiles []string 193 //channel to signal when the upload has finished 194 //uploadFinished := make(chan struct{}) 195 //channel to trigger new node checks 196 197 conf.hashes, randomFiles, err = uploadFilesToNodes(sim) 198 if err != nil { 199 return err 200 } 201 if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { 202 return err 203 } 204 205 // File retrieval check is repeated until all uploaded files are retrieved from all nodes 206 // or until the timeout is reached. 207 REPEAT: 208 for { 209 for _, id := range nodeIDs { 210 //for each expected file, check if it is in the local store 211 item, ok := sim.NodeItem(id, bucketKeyFileStore) 212 if !ok { 213 return fmt.Errorf("No filestore") 214 } 215 fileStore := item.(*storage.FileStore) 216 //check all chunks 217 for i, hash := range conf.hashes { 218 reader, _ := fileStore.Retrieve(context.TODO(), hash) 219 //check that we can read the file size and that it corresponds to the generated file size 220 if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) { 221 log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id) 222 time.Sleep(500 * time.Millisecond) 223 continue REPEAT 224 } 225 log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash)) 226 } 227 } 228 return nil 229 } 230 }) 231 232 if result.Error != nil { 233 return result.Error 234 } 235 236 return nil 237 } 238 239 /* 240 The test generates the given number of chunks. 241 242 The test loads a snapshot file to construct the swarm network, 243 assuming that the snapshot file identifies a healthy 244 kademlia network. Nevertheless a health check runs in the 245 simulation's `action` function. 246 247 The snapshot should have 'streamer' in its service list. 248 */ 249 func runRetrievalTest(chunkCount int, nodeCount int) error { 250 sim := simulation.New(retrievalSimServiceMap) 251 defer sim.Close() 252 253 conf := &synctestConfig{} 254 //map of discover ID to indexes of chunks expected at that ID 255 conf.idToChunksMap = make(map[enode.ID][]int) 256 //map of overlay address to discover ID 257 conf.addrToIDMap = make(map[string]enode.ID) 258 //array where the generated chunk hashes will be stored 259 conf.hashes = make([]storage.Address, 0) 260 261 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 262 if err != nil { 263 return err 264 } 265 266 ctx := context.Background() 267 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 268 nodeIDs := sim.UpNodeIDs() 269 for _, n := range nodeIDs { 270 //get the kademlia overlay address from this ID 271 a := n.Bytes() 272 //append it to the array of all overlay addresses 273 conf.addrs = append(conf.addrs, a) 274 //the proximity calculation is on overlay addr, 275 //the p2p/simulations check func triggers on enode.ID, 276 //so we need to know which overlay addr maps to which nodeID 277 conf.addrToIDMap[string(a)] = n 278 } 279 280 //this is the node selected for upload 281 node := sim.RandomUpNode() 282 item, ok := sim.NodeItem(node.ID, bucketKeyStore) 283 if !ok { 284 return fmt.Errorf("No localstore") 285 } 286 lstore := item.(*storage.LocalStore) 287 conf.hashes, err = uploadFileToSingleNodeStore(node.ID, chunkCount, lstore) 288 if err != nil { 289 return err 290 } 291 if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { 292 return err 293 } 294 295 // File retrieval check is repeated until all uploaded files are retrieved from all nodes 296 // or until the timeout is reached. 297 REPEAT: 298 for { 299 for _, id := range nodeIDs { 300 //for each expected chunk, check if it is in the local store 301 //check on the node's FileStore (netstore) 302 item, ok := sim.NodeItem(id, bucketKeyFileStore) 303 if !ok { 304 return fmt.Errorf("No filestore") 305 } 306 fileStore := item.(*storage.FileStore) 307 //check all chunks 308 for _, hash := range conf.hashes { 309 reader, _ := fileStore.Retrieve(context.TODO(), hash) 310 //check that we can read the chunk size and that it corresponds to the generated chunk size 311 if s, err := reader.Size(ctx, nil); err != nil || s != int64(chunkSize) { 312 log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id, "size", s) 313 time.Sleep(500 * time.Millisecond) 314 continue REPEAT 315 } 316 log.Debug(fmt.Sprintf("Chunk with root hash %x successfully retrieved", hash)) 317 } 318 } 319 // all nodes and files found, exit loop and return without error 320 return nil 321 } 322 }) 323 324 if result.Error != nil { 325 return result.Error 326 } 327 328 return nil 329 }