github.com/letterj/go-ethereum@v1.8.22-0.20190204142846-520024dfd689/swarm/network/stream/snapshot_retrieval_test.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 package stream 17 18 import ( 19 "context" 20 "fmt" 21 "sync" 22 "testing" 23 "time" 24 25 "github.com/ethereum/go-ethereum/node" 26 "github.com/ethereum/go-ethereum/p2p/enode" 27 "github.com/ethereum/go-ethereum/p2p/simulations/adapters" 28 "github.com/ethereum/go-ethereum/swarm/log" 29 "github.com/ethereum/go-ethereum/swarm/network/simulation" 30 "github.com/ethereum/go-ethereum/swarm/state" 31 "github.com/ethereum/go-ethereum/swarm/storage" 32 ) 33 34 //constants for random file generation 35 const ( 36 minFileSize = 2 37 maxFileSize = 40 38 ) 39 40 //This test is a retrieval test for nodes. 41 //A configurable number of nodes can be 42 //provided to the test. 43 //Files are uploaded to nodes, other nodes try to retrieve the file 44 //Number of nodes can be provided via commandline too. 45 func TestFileRetrieval(t *testing.T) { 46 if *nodes != 0 { 47 err := runFileRetrievalTest(*nodes) 48 if err != nil { 49 t.Fatal(err) 50 } 51 } else { 52 nodeCnt := []int{16} 53 //if the `longrunning` flag has been provided 54 //run more test combinations 55 if *longrunning { 56 nodeCnt = append(nodeCnt, 32, 64, 128) 57 } 58 for _, n := range nodeCnt { 59 err := runFileRetrievalTest(n) 60 if err != nil { 61 t.Fatal(err) 62 } 63 } 64 } 65 } 66 67 //This test is a retrieval test for nodes. 68 //One node is randomly selected to be the pivot node. 69 //A configurable number of chunks and nodes can be 70 //provided to the test, the number of chunks is uploaded 71 //to the pivot node and other nodes try to retrieve the chunk(s). 72 //Number of chunks and nodes can be provided via commandline too. 73 func TestRetrieval(t *testing.T) { 74 //if nodes/chunks have been provided via commandline, 75 //run the tests with these values 76 if *nodes != 0 && *chunks != 0 { 77 err := runRetrievalTest(*chunks, *nodes) 78 if err != nil { 79 t.Fatal(err) 80 } 81 } else { 82 var nodeCnt []int 83 var chnkCnt []int 84 //if the `longrunning` flag has been provided 85 //run more test combinations 86 if *longrunning { 87 nodeCnt = []int{16, 32, 128} 88 chnkCnt = []int{4, 32, 256} 89 } else { 90 //default test 91 nodeCnt = []int{16} 92 chnkCnt = []int{32} 93 } 94 for _, n := range nodeCnt { 95 for _, c := range chnkCnt { 96 err := runRetrievalTest(c, n) 97 if err != nil { 98 t.Fatal(err) 99 } 100 } 101 } 102 } 103 } 104 105 var retrievalSimServiceMap = map[string]simulation.ServiceFunc{ 106 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 107 addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket) 108 if err != nil { 109 return nil, nil, err 110 } 111 112 r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{ 113 Retrieval: RetrievalEnabled, 114 Syncing: SyncingAutoSubscribe, 115 SyncUpdateDelay: 3 * time.Second, 116 }, nil) 117 118 cleanup = func() { 119 r.Close() 120 clean() 121 } 122 123 return r, cleanup, nil 124 }, 125 } 126 127 /* 128 The test loads a snapshot file to construct the swarm network, 129 assuming that the snapshot file identifies a healthy 130 kademlia network. Nevertheless a health check runs in the 131 simulation's `action` function. 132 133 The snapshot should have 'streamer' in its service list. 134 */ 135 func runFileRetrievalTest(nodeCount int) error { 136 sim := simulation.New(retrievalSimServiceMap) 137 defer sim.Close() 138 139 log.Info("Initializing test config") 140 141 conf := &synctestConfig{} 142 //map of discover ID to indexes of chunks expected at that ID 143 conf.idToChunksMap = make(map[enode.ID][]int) 144 //map of overlay address to discover ID 145 conf.addrToIDMap = make(map[string]enode.ID) 146 //array where the generated chunk hashes will be stored 147 conf.hashes = make([]storage.Address, 0) 148 149 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 150 if err != nil { 151 return err 152 } 153 154 ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute) 155 defer cancelSimRun() 156 157 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 158 nodeIDs := sim.UpNodeIDs() 159 for _, n := range nodeIDs { 160 //get the kademlia overlay address from this ID 161 a := n.Bytes() 162 //append it to the array of all overlay addresses 163 conf.addrs = append(conf.addrs, a) 164 //the proximity calculation is on overlay addr, 165 //the p2p/simulations check func triggers on enode.ID, 166 //so we need to know which overlay addr maps to which nodeID 167 conf.addrToIDMap[string(a)] = n 168 } 169 170 //an array for the random files 171 var randomFiles []string 172 //channel to signal when the upload has finished 173 //uploadFinished := make(chan struct{}) 174 //channel to trigger new node checks 175 176 conf.hashes, randomFiles, err = uploadFilesToNodes(sim) 177 if err != nil { 178 return err 179 } 180 if _, err := sim.WaitTillHealthy(ctx); err != nil { 181 return err 182 } 183 184 // File retrieval check is repeated until all uploaded files are retrieved from all nodes 185 // or until the timeout is reached. 186 REPEAT: 187 for { 188 for _, id := range nodeIDs { 189 //for each expected file, check if it is in the local store 190 item, ok := sim.NodeItem(id, bucketKeyFileStore) 191 if !ok { 192 return fmt.Errorf("No filestore") 193 } 194 fileStore := item.(*storage.FileStore) 195 //check all chunks 196 for i, hash := range conf.hashes { 197 reader, _ := fileStore.Retrieve(context.TODO(), hash) 198 //check that we can read the file size and that it corresponds to the generated file size 199 if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) { 200 log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id) 201 time.Sleep(500 * time.Millisecond) 202 continue REPEAT 203 } 204 log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash)) 205 } 206 } 207 return nil 208 } 209 }) 210 211 if result.Error != nil { 212 return result.Error 213 } 214 215 return nil 216 } 217 218 /* 219 The test generates the given number of chunks. 220 221 The test loads a snapshot file to construct the swarm network, 222 assuming that the snapshot file identifies a healthy 223 kademlia network. Nevertheless a health check runs in the 224 simulation's `action` function. 225 226 The snapshot should have 'streamer' in its service list. 227 */ 228 func runRetrievalTest(chunkCount int, nodeCount int) error { 229 sim := simulation.New(retrievalSimServiceMap) 230 defer sim.Close() 231 232 conf := &synctestConfig{} 233 //map of discover ID to indexes of chunks expected at that ID 234 conf.idToChunksMap = make(map[enode.ID][]int) 235 //map of overlay address to discover ID 236 conf.addrToIDMap = make(map[string]enode.ID) 237 //array where the generated chunk hashes will be stored 238 conf.hashes = make([]storage.Address, 0) 239 240 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 241 if err != nil { 242 return err 243 } 244 245 ctx := context.Background() 246 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 247 nodeIDs := sim.UpNodeIDs() 248 for _, n := range nodeIDs { 249 //get the kademlia overlay address from this ID 250 a := n.Bytes() 251 //append it to the array of all overlay addresses 252 conf.addrs = append(conf.addrs, a) 253 //the proximity calculation is on overlay addr, 254 //the p2p/simulations check func triggers on enode.ID, 255 //so we need to know which overlay addr maps to which nodeID 256 conf.addrToIDMap[string(a)] = n 257 } 258 259 //this is the node selected for upload 260 node := sim.Net.GetRandomUpNode() 261 item, ok := sim.NodeItem(node.ID(), bucketKeyStore) 262 if !ok { 263 return fmt.Errorf("No localstore") 264 } 265 lstore := item.(*storage.LocalStore) 266 conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore) 267 if err != nil { 268 return err 269 } 270 if _, err := sim.WaitTillHealthy(ctx); err != nil { 271 return err 272 } 273 274 // File retrieval check is repeated until all uploaded files are retrieved from all nodes 275 // or until the timeout is reached. 276 REPEAT: 277 for { 278 for _, id := range nodeIDs { 279 //for each expected chunk, check if it is in the local store 280 //check on the node's FileStore (netstore) 281 item, ok := sim.NodeItem(id, bucketKeyFileStore) 282 if !ok { 283 return fmt.Errorf("No filestore") 284 } 285 fileStore := item.(*storage.FileStore) 286 //check all chunks 287 for _, hash := range conf.hashes { 288 reader, _ := fileStore.Retrieve(context.TODO(), hash) 289 //check that we can read the chunk size and that it corresponds to the generated chunk size 290 if s, err := reader.Size(ctx, nil); err != nil || s != int64(chunkSize) { 291 log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id, "size", s) 292 time.Sleep(500 * time.Millisecond) 293 continue REPEAT 294 } 295 log.Debug(fmt.Sprintf("Chunk with root hash %x successfully retrieved", hash)) 296 } 297 } 298 // all nodes and files found, exit loop and return without error 299 return nil 300 } 301 }) 302 303 if result.Error != nil { 304 return result.Error 305 } 306 307 return nil 308 }