github.com/jincm/wesharechain@v0.0.0-20210122032815-1537409ce26a/chain/swarm/network/stream/snapshot_sync_test.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 package stream 17 18 import ( 19 "context" 20 "errors" 21 "fmt" 22 "os" 23 "runtime" 24 "sync" 25 "testing" 26 "time" 27 28 "github.com/ethereum/go-ethereum/common" 29 "github.com/ethereum/go-ethereum/log" 30 "github.com/ethereum/go-ethereum/node" 31 "github.com/ethereum/go-ethereum/p2p/enode" 32 "github.com/ethereum/go-ethereum/p2p/simulations" 33 "github.com/ethereum/go-ethereum/p2p/simulations/adapters" 34 "github.com/ethereum/go-ethereum/swarm/network" 35 "github.com/ethereum/go-ethereum/swarm/network/simulation" 36 "github.com/ethereum/go-ethereum/swarm/pot" 37 "github.com/ethereum/go-ethereum/swarm/state" 38 "github.com/ethereum/go-ethereum/swarm/storage" 39 "github.com/ethereum/go-ethereum/swarm/storage/mock" 40 mockmem "github.com/ethereum/go-ethereum/swarm/storage/mock/mem" 41 "github.com/ethereum/go-ethereum/swarm/testutil" 42 ) 43 44 type synctestConfig struct { 45 addrs [][]byte 46 hashes []storage.Address 47 idToChunksMap map[enode.ID][]int 48 //chunksToNodesMap map[string][]int 49 addrToIDMap map[string]enode.ID 50 } 51 52 const ( 53 // EventTypeNode is the type of event emitted when a node is either 54 // created, started or stopped 55 EventTypeChunkCreated simulations.EventType = "chunkCreated" 56 EventTypeChunkOffered simulations.EventType = "chunkOffered" 57 EventTypeChunkWanted simulations.EventType = "chunkWanted" 58 EventTypeChunkDelivered simulations.EventType = "chunkDelivered" 59 EventTypeChunkArrived simulations.EventType = "chunkArrived" 60 EventTypeSimTerminated simulations.EventType = "simTerminated" 61 ) 62 63 // Tests in this file should not request chunks from peers. 64 // This function will panic indicating that there is a problem if request has been made. 65 func dummyRequestFromPeers(_ context.Context, req *network.Request) (*enode.ID, chan struct{}, error) { 66 panic(fmt.Sprintf("unexpected request: address %s, source %s", req.Addr.String(), req.Source.String())) 67 } 68 69 //This test is a syncing test for nodes. 70 //One node is randomly selected to be the pivot node. 71 //A configurable number of chunks and nodes can be 72 //provided to the test, the number of chunks is uploaded 73 //to the pivot node, and we check that nodes get the chunks 74 //they are expected to store based on the syncing protocol. 75 //Number of chunks and nodes can be provided via commandline too. 76 func TestSyncingViaGlobalSync(t *testing.T) { 77 if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" { 78 t.Skip("Flaky on mac on travis") 79 } 80 81 if testutil.RaceEnabled { 82 t.Skip("Segfaults on Travis with -race") 83 } 84 85 //if nodes/chunks have been provided via commandline, 86 //run the tests with these values 87 if *nodes != 0 && *chunks != 0 { 88 log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes)) 89 testSyncingViaGlobalSync(t, *chunks, *nodes) 90 } else { 91 chunkCounts := []int{4, 32} 92 nodeCounts := []int{32, 16} 93 94 //if the `longrunning` flag has been provided 95 //run more test combinations 96 if *longrunning { 97 chunkCounts = []int{64, 128} 98 nodeCounts = []int{32, 64} 99 } 100 101 for _, chunkCount := range chunkCounts { 102 for _, n := range nodeCounts { 103 log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chunkCount, n)) 104 testSyncingViaGlobalSync(t, chunkCount, n) 105 } 106 } 107 } 108 } 109 110 var simServiceMap = map[string]simulation.ServiceFunc{ 111 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 112 addr, netStore, delivery, clean, err := newNetStoreAndDeliveryWithRequestFunc(ctx, bucket, dummyRequestFromPeers) 113 if err != nil { 114 return nil, nil, err 115 } 116 117 store := state.NewInmemoryStore() 118 119 r := NewRegistry(addr.ID(), delivery, netStore, store, &RegistryOptions{ 120 Retrieval: RetrievalDisabled, 121 Syncing: SyncingAutoSubscribe, 122 SyncUpdateDelay: 3 * time.Second, 123 }, nil) 124 125 bucket.Store(bucketKeyRegistry, r) 126 127 cleanup = func() { 128 r.Close() 129 clean() 130 } 131 132 return r, cleanup, nil 133 }, 134 } 135 136 func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) { 137 sim := simulation.New(simServiceMap) 138 defer sim.Close() 139 140 log.Info("Initializing test config") 141 142 conf := &synctestConfig{} 143 //map of discover ID to indexes of chunks expected at that ID 144 conf.idToChunksMap = make(map[enode.ID][]int) 145 //map of overlay address to discover ID 146 conf.addrToIDMap = make(map[string]enode.ID) 147 //array where the generated chunk hashes will be stored 148 conf.hashes = make([]storage.Address, 0) 149 150 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 151 if err != nil { 152 t.Fatal(err) 153 } 154 155 ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute) 156 defer cancelSimRun() 157 158 if _, err := sim.WaitTillHealthy(ctx); err != nil { 159 t.Fatal(err) 160 } 161 162 result := runSim(conf, ctx, sim, chunkCount) 163 164 if result.Error != nil { 165 t.Fatal(result.Error) 166 } 167 log.Info("Simulation ended") 168 } 169 170 func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulation, chunkCount int) simulation.Result { 171 172 return sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) { 173 disconnected := watchDisconnections(ctx, sim) 174 defer func() { 175 if err != nil && disconnected.bool() { 176 err = errors.New("disconnect events received") 177 } 178 }() 179 180 nodeIDs := sim.UpNodeIDs() 181 for _, n := range nodeIDs { 182 //get the kademlia overlay address from this ID 183 a := n.Bytes() 184 //append it to the array of all overlay addresses 185 conf.addrs = append(conf.addrs, a) 186 //the proximity calculation is on overlay addr, 187 //the p2p/simulations check func triggers on enode.ID, 188 //so we need to know which overlay addr maps to which nodeID 189 conf.addrToIDMap[string(a)] = n 190 } 191 192 //get the node at that index 193 //this is the node selected for upload 194 node := sim.Net.GetRandomUpNode() 195 item, ok := sim.NodeItem(node.ID(), bucketKeyStore) 196 if !ok { 197 return fmt.Errorf("No localstore") 198 } 199 lstore := item.(*storage.LocalStore) 200 hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore) 201 if err != nil { 202 return err 203 } 204 for _, h := range hashes { 205 evt := &simulations.Event{ 206 Type: EventTypeChunkCreated, 207 Node: sim.Net.GetNode(node.ID()), 208 Data: h.String(), 209 } 210 sim.Net.Events().Send(evt) 211 } 212 conf.hashes = append(conf.hashes, hashes...) 213 mapKeysToNodes(conf) 214 215 // File retrieval check is repeated until all uploaded files are retrieved from all nodes 216 // or until the timeout is reached. 217 var globalStore mock.GlobalStorer 218 if *useMockStore { 219 globalStore = mockmem.NewGlobalStore() 220 } 221 REPEAT: 222 for { 223 for _, id := range nodeIDs { 224 //for each expected chunk, check if it is in the local store 225 localChunks := conf.idToChunksMap[id] 226 for _, ch := range localChunks { 227 //get the real chunk by the index in the index array 228 chunk := conf.hashes[ch] 229 log.Trace(fmt.Sprintf("node has chunk: %s:", chunk)) 230 //check if the expected chunk is indeed in the localstore 231 var err error 232 if *useMockStore { 233 //use the globalStore if the mockStore should be used; in that case, 234 //the complete localStore stack is bypassed for getting the chunk 235 _, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk) 236 } else { 237 //use the actual localstore 238 item, ok := sim.NodeItem(id, bucketKeyStore) 239 if !ok { 240 return fmt.Errorf("Error accessing localstore") 241 } 242 lstore := item.(*storage.LocalStore) 243 _, err = lstore.Get(ctx, chunk) 244 } 245 if err != nil { 246 log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id)) 247 // Do not get crazy with logging the warn message 248 time.Sleep(500 * time.Millisecond) 249 continue REPEAT 250 } 251 evt := &simulations.Event{ 252 Type: EventTypeChunkArrived, 253 Node: sim.Net.GetNode(id), 254 Data: chunk.String(), 255 } 256 sim.Net.Events().Send(evt) 257 log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id)) 258 } 259 } 260 return nil 261 } 262 }) 263 } 264 265 //map chunk keys to addresses which are responsible 266 func mapKeysToNodes(conf *synctestConfig) { 267 nodemap := make(map[string][]int) 268 //build a pot for chunk hashes 269 np := pot.NewPot(nil, 0) 270 indexmap := make(map[string]int) 271 for i, a := range conf.addrs { 272 indexmap[string(a)] = i 273 np, _, _ = pot.Add(np, a, pof) 274 } 275 276 ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, conf.addrs) 277 278 //for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes 279 log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes)) 280 for i := 0; i < len(conf.hashes); i++ { 281 var a []byte 282 np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool { 283 // take the first address 284 a = val.([]byte) 285 return false 286 }) 287 288 nns := ppmap[common.Bytes2Hex(a)].NNSet 289 nns = append(nns, a) 290 291 for _, p := range nns { 292 nodemap[string(p)] = append(nodemap[string(p)], i) 293 } 294 } 295 for addr, chunks := range nodemap { 296 //this selects which chunks are expected to be found with the given node 297 conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks 298 } 299 log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap)) 300 } 301 302 //upload a file(chunks) to a single local node store 303 func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) { 304 log.Debug(fmt.Sprintf("Uploading to node id: %s", id)) 305 fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams()) 306 size := chunkSize 307 var rootAddrs []storage.Address 308 for i := 0; i < chunkCount; i++ { 309 rk, wait, err := fileStore.Store(context.TODO(), testutil.RandomReader(i, size), int64(size), false) 310 if err != nil { 311 return nil, err 312 } 313 err = wait(context.TODO()) 314 if err != nil { 315 return nil, err 316 } 317 rootAddrs = append(rootAddrs, (rk)) 318 } 319 320 return rootAddrs, nil 321 }