github.com/dotlike13/wemix30_go@v1.8.23/swarm/network/stream/snapshot_retrieval_test.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 package stream 17 18 import ( 19 "context" 20 "fmt" 21 "sync" 22 "testing" 23 "time" 24 25 "github.com/ethereum/go-ethereum/node" 26 "github.com/ethereum/go-ethereum/p2p/enode" 27 "github.com/ethereum/go-ethereum/p2p/simulations/adapters" 28 "github.com/ethereum/go-ethereum/swarm/log" 29 "github.com/ethereum/go-ethereum/swarm/network/simulation" 30 "github.com/ethereum/go-ethereum/swarm/state" 31 "github.com/ethereum/go-ethereum/swarm/storage" 32 ) 33 34 //constants for random file generation 35 const ( 36 minFileSize = 2 37 maxFileSize = 40 38 ) 39 40 //This test is a retrieval test for nodes. 41 //A configurable number of nodes can be 42 //provided to the test. 43 //Files are uploaded to nodes, other nodes try to retrieve the file 44 //Number of nodes can be provided via commandline too. 45 func TestFileRetrieval(t *testing.T) { 46 if *nodes != 0 { 47 err := runFileRetrievalTest(*nodes) 48 if err != nil { 49 t.Fatal(err) 50 } 51 } else { 52 nodeCnt := []int{16} 53 //if the `longrunning` flag has been provided 54 //run more test combinations 55 if *longrunning { 56 nodeCnt = append(nodeCnt, 32, 64, 128) 57 } 58 for _, n := range nodeCnt { 59 err := runFileRetrievalTest(n) 60 if err != nil { 61 t.Fatal(err) 62 } 63 } 64 } 65 } 66 67 //This test is a retrieval test for nodes. 68 //One node is randomly selected to be the pivot node. 69 //A configurable number of chunks and nodes can be 70 //provided to the test, the number of chunks is uploaded 71 //to the pivot node and other nodes try to retrieve the chunk(s). 72 //Number of chunks and nodes can be provided via commandline too. 73 func TestRetrieval(t *testing.T) { 74 //if nodes/chunks have been provided via commandline, 75 //run the tests with these values 76 if *nodes != 0 && *chunks != 0 { 77 err := runRetrievalTest(t, *chunks, *nodes) 78 if err != nil { 79 t.Fatal(err) 80 } 81 } else { 82 var nodeCnt []int 83 var chnkCnt []int 84 //if the `longrunning` flag has been provided 85 //run more test combinations 86 if *longrunning { 87 nodeCnt = []int{16, 32, 128} 88 chnkCnt = []int{4, 32, 256} 89 } else { 90 //default test 91 nodeCnt = []int{16} 92 chnkCnt = []int{32} 93 } 94 for _, n := range nodeCnt { 95 for _, c := range chnkCnt { 96 t.Run(fmt.Sprintf("TestRetrieval_%d_%d", n, c), func(t *testing.T) { 97 err := runRetrievalTest(t, c, n) 98 if err != nil { 99 t.Fatal(err) 100 } 101 }) 102 } 103 } 104 } 105 } 106 107 var retrievalSimServiceMap = map[string]simulation.ServiceFunc{ 108 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 109 addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket) 110 if err != nil { 111 return nil, nil, err 112 } 113 114 r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{ 115 Retrieval: RetrievalEnabled, 116 Syncing: SyncingAutoSubscribe, 117 SyncUpdateDelay: 3 * time.Second, 118 }, nil) 119 120 cleanup = func() { 121 r.Close() 122 clean() 123 } 124 125 return r, cleanup, nil 126 }, 127 } 128 129 /* 130 The test loads a snapshot file to construct the swarm network, 131 assuming that the snapshot file identifies a healthy 132 kademlia network. Nevertheless a health check runs in the 133 simulation's `action` function. 134 135 The snapshot should have 'streamer' in its service list. 136 */ 137 func runFileRetrievalTest(nodeCount int) error { 138 sim := simulation.New(retrievalSimServiceMap) 139 defer sim.Close() 140 141 log.Info("Initializing test config") 142 143 conf := &synctestConfig{} 144 //map of discover ID to indexes of chunks expected at that ID 145 conf.idToChunksMap = make(map[enode.ID][]int) 146 //map of overlay address to discover ID 147 conf.addrToIDMap = make(map[string]enode.ID) 148 //array where the generated chunk hashes will be stored 149 conf.hashes = make([]storage.Address, 0) 150 151 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 152 if err != nil { 153 return err 154 } 155 156 ctx, cancelSimRun := context.WithTimeout(context.Background(), 3*time.Minute) 157 defer cancelSimRun() 158 159 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 160 nodeIDs := sim.UpNodeIDs() 161 for _, n := range nodeIDs { 162 //get the kademlia overlay address from this ID 163 a := n.Bytes() 164 //append it to the array of all overlay addresses 165 conf.addrs = append(conf.addrs, a) 166 //the proximity calculation is on overlay addr, 167 //the p2p/simulations check func triggers on enode.ID, 168 //so we need to know which overlay addr maps to which nodeID 169 conf.addrToIDMap[string(a)] = n 170 } 171 172 //an array for the random files 173 var randomFiles []string 174 //channel to signal when the upload has finished 175 //uploadFinished := make(chan struct{}) 176 //channel to trigger new node checks 177 178 conf.hashes, randomFiles, err = uploadFilesToNodes(sim) 179 if err != nil { 180 return err 181 } 182 if _, err := sim.WaitTillHealthy(ctx); err != nil { 183 return err 184 } 185 186 // File retrieval check is repeated until all uploaded files are retrieved from all nodes 187 // or until the timeout is reached. 188 REPEAT: 189 for { 190 for _, id := range nodeIDs { 191 //for each expected file, check if it is in the local store 192 item, ok := sim.NodeItem(id, bucketKeyFileStore) 193 if !ok { 194 return fmt.Errorf("No filestore") 195 } 196 fileStore := item.(*storage.FileStore) 197 //check all chunks 198 for i, hash := range conf.hashes { 199 reader, _ := fileStore.Retrieve(context.TODO(), hash) 200 //check that we can read the file size and that it corresponds to the generated file size 201 if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) { 202 log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id) 203 time.Sleep(500 * time.Millisecond) 204 continue REPEAT 205 } 206 log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash)) 207 } 208 } 209 return nil 210 } 211 }) 212 213 if result.Error != nil { 214 return result.Error 215 } 216 217 return nil 218 } 219 220 /* 221 The test generates the given number of chunks. 222 223 The test loads a snapshot file to construct the swarm network, 224 assuming that the snapshot file identifies a healthy 225 kademlia network. Nevertheless a health check runs in the 226 simulation's `action` function. 227 228 The snapshot should have 'streamer' in its service list. 229 */ 230 func runRetrievalTest(t *testing.T, chunkCount int, nodeCount int) error { 231 t.Helper() 232 sim := simulation.New(retrievalSimServiceMap) 233 defer sim.Close() 234 235 conf := &synctestConfig{} 236 //map of discover ID to indexes of chunks expected at that ID 237 conf.idToChunksMap = make(map[enode.ID][]int) 238 //map of overlay address to discover ID 239 conf.addrToIDMap = make(map[string]enode.ID) 240 //array where the generated chunk hashes will be stored 241 conf.hashes = make([]storage.Address, 0) 242 243 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 244 if err != nil { 245 return err 246 } 247 248 ctx := context.Background() 249 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 250 nodeIDs := sim.UpNodeIDs() 251 for _, n := range nodeIDs { 252 //get the kademlia overlay address from this ID 253 a := n.Bytes() 254 //append it to the array of all overlay addresses 255 conf.addrs = append(conf.addrs, a) 256 //the proximity calculation is on overlay addr, 257 //the p2p/simulations check func triggers on enode.ID, 258 //so we need to know which overlay addr maps to which nodeID 259 conf.addrToIDMap[string(a)] = n 260 } 261 262 //this is the node selected for upload 263 node := sim.Net.GetRandomUpNode() 264 item, ok := sim.NodeItem(node.ID(), bucketKeyStore) 265 if !ok { 266 return fmt.Errorf("No localstore") 267 } 268 lstore := item.(*storage.LocalStore) 269 conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore) 270 if err != nil { 271 return err 272 } 273 if _, err := sim.WaitTillHealthy(ctx); err != nil { 274 return err 275 } 276 277 // File retrieval check is repeated until all uploaded files are retrieved from all nodes 278 // or until the timeout is reached. 279 REPEAT: 280 for { 281 for _, id := range nodeIDs { 282 //for each expected chunk, check if it is in the local store 283 //check on the node's FileStore (netstore) 284 item, ok := sim.NodeItem(id, bucketKeyFileStore) 285 if !ok { 286 return fmt.Errorf("No filestore") 287 } 288 fileStore := item.(*storage.FileStore) 289 //check all chunks 290 for _, hash := range conf.hashes { 291 reader, _ := fileStore.Retrieve(context.TODO(), hash) 292 //check that we can read the chunk size and that it corresponds to the generated chunk size 293 if s, err := reader.Size(ctx, nil); err != nil || s != int64(chunkSize) { 294 log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id, "size", s) 295 time.Sleep(500 * time.Millisecond) 296 continue REPEAT 297 } 298 log.Debug(fmt.Sprintf("Chunk with root hash %x successfully retrieved", hash)) 299 } 300 } 301 // all nodes and files found, exit loop and return without error 302 return nil 303 } 304 }) 305 306 if result.Error != nil { 307 return result.Error 308 } 309 310 return nil 311 }