github.com/daragao/go-ethereum@v1.8.14-0.20180809141559-45eaef243198/swarm/network/stream/snapshot_retrieval_test.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 package stream 17 18 import ( 19 "context" 20 "fmt" 21 "os" 22 "sync" 23 "testing" 24 "time" 25 26 "github.com/ethereum/go-ethereum/node" 27 "github.com/ethereum/go-ethereum/p2p/discover" 28 "github.com/ethereum/go-ethereum/p2p/simulations/adapters" 29 "github.com/ethereum/go-ethereum/swarm/log" 30 "github.com/ethereum/go-ethereum/swarm/network" 31 "github.com/ethereum/go-ethereum/swarm/network/simulation" 32 "github.com/ethereum/go-ethereum/swarm/state" 33 "github.com/ethereum/go-ethereum/swarm/storage" 34 ) 35 36 //constants for random file generation 37 const ( 38 minFileSize = 2 39 maxFileSize = 40 40 ) 41 42 //This test is a retrieval test for nodes. 43 //A configurable number of nodes can be 44 //provided to the test. 45 //Files are uploaded to nodes, other nodes try to retrieve the file 46 //Number of nodes can be provided via commandline too. 47 func TestFileRetrieval(t *testing.T) { 48 if *nodes != 0 { 49 err := runFileRetrievalTest(*nodes) 50 if err != nil { 51 t.Fatal(err) 52 } 53 } else { 54 nodeCnt := []int{16} 55 //if the `longrunning` flag has been provided 56 //run more test combinations 57 if *longrunning { 58 nodeCnt = append(nodeCnt, 32, 64, 128) 59 } 60 for _, n := range nodeCnt { 61 err := runFileRetrievalTest(n) 62 if err != nil { 63 t.Fatal(err) 64 } 65 } 66 } 67 } 68 69 //This test is a retrieval test for nodes. 70 //One node is randomly selected to be the pivot node. 71 //A configurable number of chunks and nodes can be 72 //provided to the test, the number of chunks is uploaded 73 //to the pivot node and other nodes try to retrieve the chunk(s). 74 //Number of chunks and nodes can be provided via commandline too. 75 func TestRetrieval(t *testing.T) { 76 //if nodes/chunks have been provided via commandline, 77 //run the tests with these values 78 if *nodes != 0 && *chunks != 0 { 79 err := runRetrievalTest(*chunks, *nodes) 80 if err != nil { 81 t.Fatal(err) 82 } 83 } else { 84 var nodeCnt []int 85 var chnkCnt []int 86 //if the `longrunning` flag has been provided 87 //run more test combinations 88 if *longrunning { 89 nodeCnt = []int{16, 32, 128} 90 chnkCnt = []int{4, 32, 256} 91 } else { 92 //default test 93 nodeCnt = []int{16} 94 chnkCnt = []int{32} 95 } 96 for _, n := range nodeCnt { 97 for _, c := range chnkCnt { 98 err := runRetrievalTest(c, n) 99 if err != nil { 100 t.Fatal(err) 101 } 102 } 103 } 104 } 105 } 106 107 /* 108 109 The test loads a snapshot file to construct the swarm network, 110 assuming that the snapshot file identifies a healthy 111 kademlia network. Nevertheless a health check runs in the 112 simulation's `action` function. 113 114 The snapshot should have 'streamer' in its service list. 115 */ 116 func runFileRetrievalTest(nodeCount int) error { 117 sim := simulation.New(map[string]simulation.ServiceFunc{ 118 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 119 120 id := ctx.Config.ID 121 addr := network.NewAddrFromNodeID(id) 122 store, datadir, err := createTestLocalStorageForID(id, addr) 123 if err != nil { 124 return nil, nil, err 125 } 126 bucket.Store(bucketKeyStore, store) 127 cleanup = func() { 128 os.RemoveAll(datadir) 129 store.Close() 130 } 131 localStore := store.(*storage.LocalStore) 132 db := storage.NewDBAPI(localStore) 133 kad := network.NewKademlia(addr.Over(), network.NewKadParams()) 134 delivery := NewDelivery(kad, db) 135 136 r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ 137 DoSync: true, 138 SyncUpdateDelay: 3 * time.Second, 139 }) 140 141 fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams()) 142 bucket.Store(bucketKeyFileStore, fileStore) 143 144 return r, cleanup, nil 145 146 }, 147 }) 148 defer sim.Close() 149 150 log.Info("Initializing test config") 151 152 conf := &synctestConfig{} 153 //map of discover ID to indexes of chunks expected at that ID 154 conf.idToChunksMap = make(map[discover.NodeID][]int) 155 //map of overlay address to discover ID 156 conf.addrToIDMap = make(map[string]discover.NodeID) 157 //array where the generated chunk hashes will be stored 158 conf.hashes = make([]storage.Address, 0) 159 160 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 161 if err != nil { 162 return err 163 } 164 165 ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute) 166 defer cancelSimRun() 167 168 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 169 nodeIDs := sim.UpNodeIDs() 170 for _, n := range nodeIDs { 171 //get the kademlia overlay address from this ID 172 a := network.ToOverlayAddr(n.Bytes()) 173 //append it to the array of all overlay addresses 174 conf.addrs = append(conf.addrs, a) 175 //the proximity calculation is on overlay addr, 176 //the p2p/simulations check func triggers on discover.NodeID, 177 //so we need to know which overlay addr maps to which nodeID 178 conf.addrToIDMap[string(a)] = n 179 } 180 181 //an array for the random files 182 var randomFiles []string 183 //channel to signal when the upload has finished 184 //uploadFinished := make(chan struct{}) 185 //channel to trigger new node checks 186 187 conf.hashes, randomFiles, err = uploadFilesToNodes(sim) 188 if err != nil { 189 return err 190 } 191 if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { 192 return err 193 } 194 195 // File retrieval check is repeated until all uploaded files are retrieved from all nodes 196 // or until the timeout is reached. 197 allSuccess := false 198 for !allSuccess { 199 for _, id := range nodeIDs { 200 //for each expected chunk, check if it is in the local store 201 localChunks := conf.idToChunksMap[id] 202 localSuccess := true 203 for _, ch := range localChunks { 204 //get the real chunk by the index in the index array 205 chunk := conf.hashes[ch] 206 log.Trace(fmt.Sprintf("node has chunk: %s:", chunk)) 207 //check if the expected chunk is indeed in the localstore 208 var err error 209 //check on the node's FileStore (netstore) 210 item, ok := sim.NodeItem(id, bucketKeyFileStore) 211 if !ok { 212 return fmt.Errorf("No registry") 213 } 214 fileStore := item.(*storage.FileStore) 215 //check all chunks 216 for i, hash := range conf.hashes { 217 reader, _ := fileStore.Retrieve(context.TODO(), hash) 218 //check that we can read the file size and that it corresponds to the generated file size 219 if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) { 220 allSuccess = false 221 log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id) 222 } else { 223 log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash)) 224 } 225 } 226 if err != nil { 227 log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id)) 228 localSuccess = false 229 } else { 230 log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id)) 231 } 232 } 233 allSuccess = localSuccess 234 } 235 } 236 if !allSuccess { 237 return fmt.Errorf("Not all chunks succeeded!") 238 } 239 return nil 240 }) 241 242 if result.Error != nil { 243 return result.Error 244 } 245 246 return nil 247 } 248 249 /* 250 The test generates the given number of chunks. 251 252 The test loads a snapshot file to construct the swarm network, 253 assuming that the snapshot file identifies a healthy 254 kademlia network. Nevertheless a health check runs in the 255 simulation's `action` function. 256 257 The snapshot should have 'streamer' in its service list. 258 */ 259 func runRetrievalTest(chunkCount int, nodeCount int) error { 260 sim := simulation.New(map[string]simulation.ServiceFunc{ 261 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 262 263 id := ctx.Config.ID 264 addr := network.NewAddrFromNodeID(id) 265 store, datadir, err := createTestLocalStorageForID(id, addr) 266 if err != nil { 267 return nil, nil, err 268 } 269 bucket.Store(bucketKeyStore, store) 270 cleanup = func() { 271 os.RemoveAll(datadir) 272 store.Close() 273 } 274 localStore := store.(*storage.LocalStore) 275 db := storage.NewDBAPI(localStore) 276 kad := network.NewKademlia(addr.Over(), network.NewKadParams()) 277 delivery := NewDelivery(kad, db) 278 279 r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ 280 DoSync: true, 281 SyncUpdateDelay: 0, 282 }) 283 284 fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams()) 285 bucketKeyFileStore = simulation.BucketKey("filestore") 286 bucket.Store(bucketKeyFileStore, fileStore) 287 288 return r, cleanup, nil 289 290 }, 291 }) 292 defer sim.Close() 293 294 conf := &synctestConfig{} 295 //map of discover ID to indexes of chunks expected at that ID 296 conf.idToChunksMap = make(map[discover.NodeID][]int) 297 //map of overlay address to discover ID 298 conf.addrToIDMap = make(map[string]discover.NodeID) 299 //array where the generated chunk hashes will be stored 300 conf.hashes = make([]storage.Address, 0) 301 302 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 303 if err != nil { 304 return err 305 } 306 307 ctx := context.Background() 308 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 309 nodeIDs := sim.UpNodeIDs() 310 for _, n := range nodeIDs { 311 //get the kademlia overlay address from this ID 312 a := network.ToOverlayAddr(n.Bytes()) 313 //append it to the array of all overlay addresses 314 conf.addrs = append(conf.addrs, a) 315 //the proximity calculation is on overlay addr, 316 //the p2p/simulations check func triggers on discover.NodeID, 317 //so we need to know which overlay addr maps to which nodeID 318 conf.addrToIDMap[string(a)] = n 319 } 320 321 //an array for the random files 322 var randomFiles []string 323 //this is the node selected for upload 324 node := sim.RandomUpNode() 325 item, ok := sim.NodeItem(node.ID, bucketKeyStore) 326 if !ok { 327 return fmt.Errorf("No localstore") 328 } 329 lstore := item.(*storage.LocalStore) 330 conf.hashes, err = uploadFileToSingleNodeStore(node.ID, chunkCount, lstore) 331 if err != nil { 332 return err 333 } 334 if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { 335 return err 336 } 337 338 // File retrieval check is repeated until all uploaded files are retrieved from all nodes 339 // or until the timeout is reached. 340 allSuccess := false 341 for !allSuccess { 342 for _, id := range nodeIDs { 343 //for each expected chunk, check if it is in the local store 344 localChunks := conf.idToChunksMap[id] 345 localSuccess := true 346 for _, ch := range localChunks { 347 //get the real chunk by the index in the index array 348 chunk := conf.hashes[ch] 349 log.Trace(fmt.Sprintf("node has chunk: %s:", chunk)) 350 //check if the expected chunk is indeed in the localstore 351 var err error 352 //check on the node's FileStore (netstore) 353 item, ok := sim.NodeItem(id, bucketKeyFileStore) 354 if !ok { 355 return fmt.Errorf("No registry") 356 } 357 fileStore := item.(*storage.FileStore) 358 //check all chunks 359 for i, hash := range conf.hashes { 360 reader, _ := fileStore.Retrieve(context.TODO(), hash) 361 //check that we can read the file size and that it corresponds to the generated file size 362 if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) { 363 allSuccess = false 364 log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id) 365 } else { 366 log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash)) 367 } 368 } 369 if err != nil { 370 log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id)) 371 localSuccess = false 372 } else { 373 log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id)) 374 } 375 } 376 allSuccess = localSuccess 377 } 378 } 379 if !allSuccess { 380 return fmt.Errorf("Not all chunks succeeded!") 381 } 382 return nil 383 }) 384 385 if result.Error != nil { 386 return result.Error 387 } 388 389 return nil 390 }