github.com/codingfuture/orig-energi3@v0.8.4/swarm/network/stream/snapshot_retrieval_test.go (about) 1 // Copyright 2018 The Energi Core Authors 2 // Copyright 2018 The go-ethereum Authors 3 // This file is part of the Energi Core library. 4 // 5 // The Energi Core library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The Energi Core library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the Energi Core library. If not, see <http://www.gnu.org/licenses/>. 17 18 package stream 19 20 import ( 21 "context" 22 "fmt" 23 "sync" 24 "testing" 25 "time" 26 27 "github.com/ethereum/go-ethereum/node" 28 "github.com/ethereum/go-ethereum/p2p/enode" 29 "github.com/ethereum/go-ethereum/p2p/simulations/adapters" 30 "github.com/ethereum/go-ethereum/swarm/log" 31 "github.com/ethereum/go-ethereum/swarm/network/simulation" 32 "github.com/ethereum/go-ethereum/swarm/state" 33 "github.com/ethereum/go-ethereum/swarm/storage" 34 ) 35 36 //constants for random file generation 37 const ( 38 minFileSize = 2 39 maxFileSize = 40 40 ) 41 42 //This test is a retrieval test for nodes. 43 //A configurable number of nodes can be 44 //provided to the test. 45 //Files are uploaded to nodes, other nodes try to retrieve the file 46 //Number of nodes can be provided via commandline too. 47 func TestFileRetrieval(t *testing.T) { 48 if *nodes != 0 { 49 err := runFileRetrievalTest(*nodes) 50 if err != nil { 51 t.Fatal(err) 52 } 53 } else { 54 nodeCnt := []int{16} 55 //if the `longrunning` flag has been provided 56 //run more test combinations 57 if *longrunning { 58 nodeCnt = append(nodeCnt, 32, 64, 128) 59 } 60 for _, n := range nodeCnt { 61 err := runFileRetrievalTest(n) 62 if err != nil { 63 t.Fatal(err) 64 } 65 } 66 } 67 } 68 69 //This test is a retrieval test for nodes. 70 //One node is randomly selected to be the pivot node. 71 //A configurable number of chunks and nodes can be 72 //provided to the test, the number of chunks is uploaded 73 //to the pivot node and other nodes try to retrieve the chunk(s). 74 //Number of chunks and nodes can be provided via commandline too. 75 func TestRetrieval(t *testing.T) { 76 //if nodes/chunks have been provided via commandline, 77 //run the tests with these values 78 if *nodes != 0 && *chunks != 0 { 79 err := runRetrievalTest(t, *chunks, *nodes) 80 if err != nil { 81 t.Fatal(err) 82 } 83 } else { 84 var nodeCnt []int 85 var chnkCnt []int 86 //if the `longrunning` flag has been provided 87 //run more test combinations 88 if *longrunning { 89 nodeCnt = []int{16, 32, 128} 90 chnkCnt = []int{4, 32, 256} 91 } else { 92 //default test 93 nodeCnt = []int{16} 94 chnkCnt = []int{32} 95 } 96 for _, n := range nodeCnt { 97 for _, c := range chnkCnt { 98 t.Run(fmt.Sprintf("TestRetrieval_%d_%d", n, c), func(t *testing.T) { 99 err := runRetrievalTest(t, c, n) 100 if err != nil { 101 t.Fatal(err) 102 } 103 }) 104 } 105 } 106 } 107 } 108 109 var retrievalSimServiceMap = map[string]simulation.ServiceFunc{ 110 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 111 addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket) 112 if err != nil { 113 return nil, nil, err 114 } 115 116 r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{ 117 Retrieval: RetrievalEnabled, 118 Syncing: SyncingAutoSubscribe, 119 SyncUpdateDelay: 3 * time.Second, 120 }, nil) 121 122 cleanup = func() { 123 r.Close() 124 clean() 125 } 126 127 return r, cleanup, nil 128 }, 129 } 130 131 /* 132 The test loads a snapshot file to construct the swarm network, 133 assuming that the snapshot file identifies a healthy 134 kademlia network. Nevertheless a health check runs in the 135 simulation's `action` function. 136 137 The snapshot should have 'streamer' in its service list. 138 */ 139 func runFileRetrievalTest(nodeCount int) error { 140 sim := simulation.New(retrievalSimServiceMap) 141 defer sim.Close() 142 143 log.Info("Initializing test config") 144 145 conf := &synctestConfig{} 146 //map of discover ID to indexes of chunks expected at that ID 147 conf.idToChunksMap = make(map[enode.ID][]int) 148 //map of overlay address to discover ID 149 conf.addrToIDMap = make(map[string]enode.ID) 150 //array where the generated chunk hashes will be stored 151 conf.hashes = make([]storage.Address, 0) 152 153 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 154 if err != nil { 155 return err 156 } 157 158 ctx, cancelSimRun := context.WithTimeout(context.Background(), 3*time.Minute) 159 defer cancelSimRun() 160 161 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 162 nodeIDs := sim.UpNodeIDs() 163 for _, n := range nodeIDs { 164 //get the kademlia overlay address from this ID 165 a := n.Bytes() 166 //append it to the array of all overlay addresses 167 conf.addrs = append(conf.addrs, a) 168 //the proximity calculation is on overlay addr, 169 //the p2p/simulations check func triggers on enode.ID, 170 //so we need to know which overlay addr maps to which nodeID 171 conf.addrToIDMap[string(a)] = n 172 } 173 174 //an array for the random files 175 var randomFiles []string 176 //channel to signal when the upload has finished 177 //uploadFinished := make(chan struct{}) 178 //channel to trigger new node checks 179 180 conf.hashes, randomFiles, err = uploadFilesToNodes(sim) 181 if err != nil { 182 return err 183 } 184 if _, err := sim.WaitTillHealthy(ctx); err != nil { 185 return err 186 } 187 188 // File retrieval check is repeated until all uploaded files are retrieved from all nodes 189 // or until the timeout is reached. 190 REPEAT: 191 for { 192 for _, id := range nodeIDs { 193 //for each expected file, check if it is in the local store 194 item, ok := sim.NodeItem(id, bucketKeyFileStore) 195 if !ok { 196 return fmt.Errorf("No filestore") 197 } 198 fileStore := item.(*storage.FileStore) 199 //check all chunks 200 for i, hash := range conf.hashes { 201 reader, _ := fileStore.Retrieve(context.TODO(), hash) 202 //check that we can read the file size and that it corresponds to the generated file size 203 if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) { 204 log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id) 205 time.Sleep(500 * time.Millisecond) 206 continue REPEAT 207 } 208 log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash)) 209 } 210 } 211 return nil 212 } 213 }) 214 215 if result.Error != nil { 216 return result.Error 217 } 218 219 return nil 220 } 221 222 /* 223 The test generates the given number of chunks. 224 225 The test loads a snapshot file to construct the swarm network, 226 assuming that the snapshot file identifies a healthy 227 kademlia network. Nevertheless a health check runs in the 228 simulation's `action` function. 229 230 The snapshot should have 'streamer' in its service list. 231 */ 232 func runRetrievalTest(t *testing.T, chunkCount int, nodeCount int) error { 233 t.Helper() 234 sim := simulation.New(retrievalSimServiceMap) 235 defer sim.Close() 236 237 conf := &synctestConfig{} 238 //map of discover ID to indexes of chunks expected at that ID 239 conf.idToChunksMap = make(map[enode.ID][]int) 240 //map of overlay address to discover ID 241 conf.addrToIDMap = make(map[string]enode.ID) 242 //array where the generated chunk hashes will be stored 243 conf.hashes = make([]storage.Address, 0) 244 245 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 246 if err != nil { 247 return err 248 } 249 250 ctx := context.Background() 251 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 252 nodeIDs := sim.UpNodeIDs() 253 for _, n := range nodeIDs { 254 //get the kademlia overlay address from this ID 255 a := n.Bytes() 256 //append it to the array of all overlay addresses 257 conf.addrs = append(conf.addrs, a) 258 //the proximity calculation is on overlay addr, 259 //the p2p/simulations check func triggers on enode.ID, 260 //so we need to know which overlay addr maps to which nodeID 261 conf.addrToIDMap[string(a)] = n 262 } 263 264 //this is the node selected for upload 265 node := sim.Net.GetRandomUpNode() 266 item, ok := sim.NodeItem(node.ID(), bucketKeyStore) 267 if !ok { 268 return fmt.Errorf("No localstore") 269 } 270 lstore := item.(*storage.LocalStore) 271 conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore) 272 if err != nil { 273 return err 274 } 275 if _, err := sim.WaitTillHealthy(ctx); err != nil { 276 return err 277 } 278 279 // File retrieval check is repeated until all uploaded files are retrieved from all nodes 280 // or until the timeout is reached. 281 REPEAT: 282 for { 283 for _, id := range nodeIDs { 284 //for each expected chunk, check if it is in the local store 285 //check on the node's FileStore (netstore) 286 item, ok := sim.NodeItem(id, bucketKeyFileStore) 287 if !ok { 288 return fmt.Errorf("No filestore") 289 } 290 fileStore := item.(*storage.FileStore) 291 //check all chunks 292 for _, hash := range conf.hashes { 293 reader, _ := fileStore.Retrieve(context.TODO(), hash) 294 //check that we can read the chunk size and that it corresponds to the generated chunk size 295 if s, err := reader.Size(ctx, nil); err != nil || s != int64(chunkSize) { 296 log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id, "size", s) 297 time.Sleep(500 * time.Millisecond) 298 continue REPEAT 299 } 300 log.Debug(fmt.Sprintf("Chunk with root hash %x successfully retrieved", hash)) 301 } 302 } 303 // all nodes and files found, exit loop and return without error 304 return nil 305 } 306 }) 307 308 if result.Error != nil { 309 return result.Error 310 } 311 312 return nil 313 }