github.com/pslzym/go-ethereum@v1.8.17-0.20180926104442-4b6824e07b1b/swarm/network/stream/snapshot_retrieval_test.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 package stream 17 18 import ( 19 "context" 20 "fmt" 21 "os" 22 "sync" 23 "testing" 24 "time" 25 26 "github.com/ethereum/go-ethereum/node" 27 "github.com/ethereum/go-ethereum/p2p/enode" 28 "github.com/ethereum/go-ethereum/p2p/simulations/adapters" 29 "github.com/ethereum/go-ethereum/swarm/log" 30 "github.com/ethereum/go-ethereum/swarm/network" 31 "github.com/ethereum/go-ethereum/swarm/network/simulation" 32 "github.com/ethereum/go-ethereum/swarm/state" 33 "github.com/ethereum/go-ethereum/swarm/storage" 34 ) 35 36 //constants for random file generation 37 const ( 38 minFileSize = 2 39 maxFileSize = 40 40 ) 41 42 //This test is a retrieval test for nodes. 43 //A configurable number of nodes can be 44 //provided to the test. 45 //Files are uploaded to nodes, other nodes try to retrieve the file 46 //Number of nodes can be provided via commandline too. 47 func TestFileRetrieval(t *testing.T) { 48 if *nodes != 0 { 49 err := runFileRetrievalTest(*nodes) 50 if err != nil { 51 t.Fatal(err) 52 } 53 } else { 54 nodeCnt := []int{16} 55 //if the `longrunning` flag has been provided 56 //run more test combinations 57 if *longrunning { 58 nodeCnt = append(nodeCnt, 32, 64, 128) 59 } 60 for _, n := range nodeCnt { 61 err := runFileRetrievalTest(n) 62 if err != nil { 63 t.Fatal(err) 64 } 65 } 66 } 67 } 68 69 //This test is a retrieval test for nodes. 70 //One node is randomly selected to be the pivot node. 71 //A configurable number of chunks and nodes can be 72 //provided to the test, the number of chunks is uploaded 73 //to the pivot node and other nodes try to retrieve the chunk(s). 74 //Number of chunks and nodes can be provided via commandline too. 75 func TestRetrieval(t *testing.T) { 76 //if nodes/chunks have been provided via commandline, 77 //run the tests with these values 78 if *nodes != 0 && *chunks != 0 { 79 err := runRetrievalTest(*chunks, *nodes) 80 if err != nil { 81 t.Fatal(err) 82 } 83 } else { 84 var nodeCnt []int 85 var chnkCnt []int 86 //if the `longrunning` flag has been provided 87 //run more test combinations 88 if *longrunning { 89 nodeCnt = []int{16, 32, 128} 90 chnkCnt = []int{4, 32, 256} 91 } else { 92 //default test 93 nodeCnt = []int{16} 94 chnkCnt = []int{32} 95 } 96 for _, n := range nodeCnt { 97 for _, c := range chnkCnt { 98 err := runRetrievalTest(c, n) 99 if err != nil { 100 t.Fatal(err) 101 } 102 } 103 } 104 } 105 } 106 107 /* 108 109 The test loads a snapshot file to construct the swarm network, 110 assuming that the snapshot file identifies a healthy 111 kademlia network. Nevertheless a health check runs in the 112 simulation's `action` function. 113 114 The snapshot should have 'streamer' in its service list. 115 */ 116 func runFileRetrievalTest(nodeCount int) error { 117 sim := simulation.New(map[string]simulation.ServiceFunc{ 118 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 119 node := ctx.Config.Node() 120 addr := network.NewAddr(node) 121 store, datadir, err := createTestLocalStorageForID(node.ID(), addr) 122 if err != nil { 123 return nil, nil, err 124 } 125 bucket.Store(bucketKeyStore, store) 126 127 localStore := store.(*storage.LocalStore) 128 netStore, err := storage.NewNetStore(localStore, nil) 129 if err != nil { 130 return nil, nil, err 131 } 132 kad := network.NewKademlia(addr.Over(), network.NewKadParams()) 133 delivery := NewDelivery(kad, netStore) 134 netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New 135 136 r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{ 137 DoSync: true, 138 SyncUpdateDelay: 3 * time.Second, 139 }) 140 141 fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams()) 142 bucket.Store(bucketKeyFileStore, fileStore) 143 144 cleanup = func() { 145 os.RemoveAll(datadir) 146 netStore.Close() 147 r.Close() 148 } 149 150 return r, cleanup, nil 151 152 }, 153 }) 154 defer sim.Close() 155 156 log.Info("Initializing test config") 157 158 conf := &synctestConfig{} 159 //map of discover ID to indexes of chunks expected at that ID 160 conf.idToChunksMap = make(map[enode.ID][]int) 161 //map of overlay address to discover ID 162 conf.addrToIDMap = make(map[string]enode.ID) 163 //array where the generated chunk hashes will be stored 164 conf.hashes = make([]storage.Address, 0) 165 166 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 167 if err != nil { 168 return err 169 } 170 171 ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute) 172 defer cancelSimRun() 173 174 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 175 nodeIDs := sim.UpNodeIDs() 176 for _, n := range nodeIDs { 177 //get the kademlia overlay address from this ID 178 a := n.Bytes() 179 //append it to the array of all overlay addresses 180 conf.addrs = append(conf.addrs, a) 181 //the proximity calculation is on overlay addr, 182 //the p2p/simulations check func triggers on enode.ID, 183 //so we need to know which overlay addr maps to which nodeID 184 conf.addrToIDMap[string(a)] = n 185 } 186 187 //an array for the random files 188 var randomFiles []string 189 //channel to signal when the upload has finished 190 //uploadFinished := make(chan struct{}) 191 //channel to trigger new node checks 192 193 conf.hashes, randomFiles, err = uploadFilesToNodes(sim) 194 if err != nil { 195 return err 196 } 197 if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { 198 return err 199 } 200 201 // File retrieval check is repeated until all uploaded files are retrieved from all nodes 202 // or until the timeout is reached. 203 allSuccess := false 204 for !allSuccess { 205 for _, id := range nodeIDs { 206 //for each expected chunk, check if it is in the local store 207 localChunks := conf.idToChunksMap[id] 208 localSuccess := true 209 for _, ch := range localChunks { 210 //get the real chunk by the index in the index array 211 chunk := conf.hashes[ch] 212 log.Trace(fmt.Sprintf("node has chunk: %s:", chunk)) 213 //check if the expected chunk is indeed in the localstore 214 var err error 215 //check on the node's FileStore (netstore) 216 item, ok := sim.NodeItem(id, bucketKeyFileStore) 217 if !ok { 218 return fmt.Errorf("No registry") 219 } 220 fileStore := item.(*storage.FileStore) 221 //check all chunks 222 for i, hash := range conf.hashes { 223 reader, _ := fileStore.Retrieve(context.TODO(), hash) 224 //check that we can read the file size and that it corresponds to the generated file size 225 if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) { 226 allSuccess = false 227 log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id) 228 } else { 229 log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash)) 230 } 231 } 232 if err != nil { 233 log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id)) 234 localSuccess = false 235 } else { 236 log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id)) 237 } 238 } 239 allSuccess = localSuccess 240 } 241 } 242 if !allSuccess { 243 return fmt.Errorf("Not all chunks succeeded!") 244 } 245 return nil 246 }) 247 248 if result.Error != nil { 249 return result.Error 250 } 251 252 return nil 253 } 254 255 /* 256 The test generates the given number of chunks. 257 258 The test loads a snapshot file to construct the swarm network, 259 assuming that the snapshot file identifies a healthy 260 kademlia network. Nevertheless a health check runs in the 261 simulation's `action` function. 262 263 The snapshot should have 'streamer' in its service list. 264 */ 265 func runRetrievalTest(chunkCount int, nodeCount int) error { 266 sim := simulation.New(map[string]simulation.ServiceFunc{ 267 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 268 node := ctx.Config.Node() 269 addr := network.NewAddr(node) 270 store, datadir, err := createTestLocalStorageForID(node.ID(), addr) 271 if err != nil { 272 return nil, nil, err 273 } 274 bucket.Store(bucketKeyStore, store) 275 276 localStore := store.(*storage.LocalStore) 277 netStore, err := storage.NewNetStore(localStore, nil) 278 if err != nil { 279 return nil, nil, err 280 } 281 kad := network.NewKademlia(addr.Over(), network.NewKadParams()) 282 delivery := NewDelivery(kad, netStore) 283 netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New 284 285 r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{ 286 DoSync: true, 287 SyncUpdateDelay: 0, 288 }) 289 290 fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams()) 291 bucketKeyFileStore = simulation.BucketKey("filestore") 292 bucket.Store(bucketKeyFileStore, fileStore) 293 294 cleanup = func() { 295 os.RemoveAll(datadir) 296 netStore.Close() 297 r.Close() 298 } 299 300 return r, cleanup, nil 301 302 }, 303 }) 304 defer sim.Close() 305 306 conf := &synctestConfig{} 307 //map of discover ID to indexes of chunks expected at that ID 308 conf.idToChunksMap = make(map[enode.ID][]int) 309 //map of overlay address to discover ID 310 conf.addrToIDMap = make(map[string]enode.ID) 311 //array where the generated chunk hashes will be stored 312 conf.hashes = make([]storage.Address, 0) 313 314 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 315 if err != nil { 316 return err 317 } 318 319 ctx := context.Background() 320 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 321 nodeIDs := sim.UpNodeIDs() 322 for _, n := range nodeIDs { 323 //get the kademlia overlay address from this ID 324 a := n.Bytes() 325 //append it to the array of all overlay addresses 326 conf.addrs = append(conf.addrs, a) 327 //the proximity calculation is on overlay addr, 328 //the p2p/simulations check func triggers on enode.ID, 329 //so we need to know which overlay addr maps to which nodeID 330 conf.addrToIDMap[string(a)] = n 331 } 332 333 //an array for the random files 334 var randomFiles []string 335 //this is the node selected for upload 336 node := sim.RandomUpNode() 337 item, ok := sim.NodeItem(node.ID, bucketKeyStore) 338 if !ok { 339 return fmt.Errorf("No localstore") 340 } 341 lstore := item.(*storage.LocalStore) 342 conf.hashes, err = uploadFileToSingleNodeStore(node.ID, chunkCount, lstore) 343 if err != nil { 344 return err 345 } 346 if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { 347 return err 348 } 349 350 // File retrieval check is repeated until all uploaded files are retrieved from all nodes 351 // or until the timeout is reached. 352 allSuccess := false 353 for !allSuccess { 354 for _, id := range nodeIDs { 355 //for each expected chunk, check if it is in the local store 356 localChunks := conf.idToChunksMap[id] 357 localSuccess := true 358 for _, ch := range localChunks { 359 //get the real chunk by the index in the index array 360 chunk := conf.hashes[ch] 361 log.Trace(fmt.Sprintf("node has chunk: %s:", chunk)) 362 //check if the expected chunk is indeed in the localstore 363 var err error 364 //check on the node's FileStore (netstore) 365 item, ok := sim.NodeItem(id, bucketKeyFileStore) 366 if !ok { 367 return fmt.Errorf("No registry") 368 } 369 fileStore := item.(*storage.FileStore) 370 //check all chunks 371 for i, hash := range conf.hashes { 372 reader, _ := fileStore.Retrieve(context.TODO(), hash) 373 //check that we can read the file size and that it corresponds to the generated file size 374 if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) { 375 allSuccess = false 376 log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id) 377 } else { 378 log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash)) 379 } 380 } 381 if err != nil { 382 log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id)) 383 localSuccess = false 384 } else { 385 log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id)) 386 } 387 } 388 allSuccess = localSuccess 389 } 390 } 391 if !allSuccess { 392 return fmt.Errorf("Not all chunks succeeded!") 393 } 394 return nil 395 }) 396 397 if result.Error != nil { 398 return result.Error 399 } 400 401 return nil 402 }