github.com/susy-go/susy-graviton@v0.0.0-20190614130430-36cddae42305/swarm/network/stream/snapshot_sync_test.go (about) 1 // Copyleft 2018 The susy-graviton Authors 2 // This file is part of the susy-graviton library. 3 // 4 // The susy-graviton library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The susy-graviton library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MSRCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the susy-graviton library. If not, see <http://www.gnu.org/licenses/>. 16 package stream 17 18 import ( 19 "context" 20 "errors" 21 "fmt" 22 "io/ioutil" 23 "os" 24 "runtime" 25 "sync" 26 "testing" 27 "time" 28 29 "github.com/susy-go/susy-graviton/common" 30 "github.com/susy-go/susy-graviton/log" 31 "github.com/susy-go/susy-graviton/node" 32 "github.com/susy-go/susy-graviton/p2p/enode" 33 "github.com/susy-go/susy-graviton/p2p/simulations" 34 "github.com/susy-go/susy-graviton/p2p/simulations/adapters" 35 "github.com/susy-go/susy-graviton/swarm/network" 36 "github.com/susy-go/susy-graviton/swarm/network/simulation" 37 "github.com/susy-go/susy-graviton/swarm/pot" 38 "github.com/susy-go/susy-graviton/swarm/state" 39 "github.com/susy-go/susy-graviton/swarm/storage" 40 "github.com/susy-go/susy-graviton/swarm/storage/mock" 41 mockmem "github.com/susy-go/susy-graviton/swarm/storage/mock/mem" 42 "github.com/susy-go/susy-graviton/swarm/testutil" 43 ) 44 45 const MaxTimeout = 600 46 47 type synctestConfig struct { 48 addrs [][]byte 49 hashes []storage.Address 50 idToChunksMap map[enode.ID][]int 51 //chunksToNodesMap map[string][]int 52 addrToIDMap map[string]enode.ID 53 } 54 55 const ( 56 // EventTypeNode is the type of event emitted when a node is either 57 // created, started or stopped 58 EventTypeChunkCreated simulations.EventType = "chunkCreated" 59 EventTypeChunkOffered simulations.EventType = "chunkOffered" 60 EventTypeChunkWanted simulations.EventType = "chunkWanted" 61 EventTypeChunkDelivered simulations.EventType = "chunkDelivered" 62 EventTypeChunkArrived simulations.EventType = "chunkArrived" 63 EventTypeSimTerminated simulations.EventType = "simTerminated" 64 ) 65 66 // Tests in this file should not request chunks from peers. 67 // This function will panic indicating that there is a problem if request has been made. 68 func dummyRequestFromPeers(_ context.Context, req *network.Request) (*enode.ID, chan struct{}, error) { 69 panic(fmt.Sprintf("unexpected request: address %s, source %s", req.Addr.String(), req.Source.String())) 70 } 71 72 //This test is a syncing test for nodes. 73 //One node is randomly selected to be the pivot node. 74 //A configurable number of chunks and nodes can be 75 //provided to the test, the number of chunks is uploaded 76 //to the pivot node, and we check that nodes get the chunks 77 //they are expected to store based on the syncing protocol. 78 //Number of chunks and nodes can be provided via commandline too. 79 func TestSyncingViaGlobalSync(t *testing.T) { 80 if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" { 81 t.Skip("Flaky on mac on travis") 82 } 83 //if nodes/chunks have been provided via commandline, 84 //run the tests with these values 85 if *nodes != 0 && *chunks != 0 { 86 log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes)) 87 testSyncingViaGlobalSync(t, *chunks, *nodes) 88 } else { 89 var nodeCnt []int 90 var chnkCnt []int 91 //if the `longrunning` flag has been provided 92 //run more test combinations 93 if *longrunning { 94 chnkCnt = []int{1, 8, 32, 256, 1024} 95 nodeCnt = []int{16, 32, 64, 128, 256} 96 } else if raceTest { 97 // TestSyncingViaGlobalSync allocates a lot of memory 98 // with race detector. By reducing the number of chunks 99 // and nodes, memory consumption is lower and data races 100 // are still checked, while correctness of syncing is 101 // tested with more chunks and nodes in regular (!race) 102 // tests. 103 chnkCnt = []int{4} 104 nodeCnt = []int{16} 105 } else { 106 //default test 107 chnkCnt = []int{4, 32} 108 nodeCnt = []int{32, 16} 109 } 110 for _, chnk := range chnkCnt { 111 for _, n := range nodeCnt { 112 log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n)) 113 testSyncingViaGlobalSync(t, chnk, n) 114 } 115 } 116 } 117 } 118 119 var simServiceMap = map[string]simulation.ServiceFunc{ 120 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 121 addr, netStore, delivery, clean, err := newNetStoreAndDeliveryWithRequestFunc(ctx, bucket, dummyRequestFromPeers) 122 if err != nil { 123 return nil, nil, err 124 } 125 126 var dir string 127 var store *state.DBStore 128 if raceTest { 129 // Use on-disk DBStore to reduce memory consumption in race tests. 130 dir, err = ioutil.TempDir("", "swarm-stream-") 131 if err != nil { 132 return nil, nil, err 133 } 134 store, err = state.NewDBStore(dir) 135 if err != nil { 136 return nil, nil, err 137 } 138 } else { 139 store = state.NewInmemoryStore() 140 } 141 142 r := NewRegistry(addr.ID(), delivery, netStore, store, &RegistryOptions{ 143 Retrieval: RetrievalDisabled, 144 Syncing: SyncingAutoSubscribe, 145 SyncUpdateDelay: 3 * time.Second, 146 }, nil) 147 148 bucket.Store(bucketKeyRegistry, r) 149 150 cleanup = func() { 151 r.Close() 152 clean() 153 } 154 155 return r, cleanup, nil 156 }, 157 } 158 159 func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) { 160 sim := simulation.New(simServiceMap) 161 defer sim.Close() 162 163 log.Info("Initializing test config") 164 165 conf := &synctestConfig{} 166 //map of discover ID to indexes of chunks expected at that ID 167 conf.idToChunksMap = make(map[enode.ID][]int) 168 //map of overlay address to discover ID 169 conf.addrToIDMap = make(map[string]enode.ID) 170 //array where the generated chunk hashes will be stored 171 conf.hashes = make([]storage.Address, 0) 172 173 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 174 if err != nil { 175 t.Fatal(err) 176 } 177 178 ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute) 179 defer cancelSimRun() 180 181 if _, err := sim.WaitTillHealthy(ctx); err != nil { 182 t.Fatal(err) 183 } 184 185 result := runSim(conf, ctx, sim, chunkCount) 186 187 if result.Error != nil { 188 t.Fatal(result.Error) 189 } 190 log.Info("Simulation ended") 191 } 192 193 func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulation, chunkCount int) simulation.Result { 194 195 return sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) { 196 disconnected := watchDisconnections(ctx, sim) 197 defer func() { 198 if err != nil && disconnected.bool() { 199 err = errors.New("disconnect events received") 200 } 201 }() 202 203 nodeIDs := sim.UpNodeIDs() 204 for _, n := range nodeIDs { 205 //get the kademlia overlay address from this ID 206 a := n.Bytes() 207 //append it to the array of all overlay addresses 208 conf.addrs = append(conf.addrs, a) 209 //the proximity calculation is on overlay addr, 210 //the p2p/simulations check func triggers on enode.ID, 211 //so we need to know which overlay addr maps to which nodeID 212 conf.addrToIDMap[string(a)] = n 213 } 214 215 //get the node at that index 216 //this is the node selected for upload 217 node := sim.Net.GetRandomUpNode() 218 item, ok := sim.NodeItem(node.ID(), bucketKeyStore) 219 if !ok { 220 return fmt.Errorf("No localstore") 221 } 222 lstore := item.(*storage.LocalStore) 223 hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore) 224 if err != nil { 225 return err 226 } 227 for _, h := range hashes { 228 evt := &simulations.Event{ 229 Type: EventTypeChunkCreated, 230 Node: sim.Net.GetNode(node.ID()), 231 Data: h.String(), 232 } 233 sim.Net.Events().Send(evt) 234 } 235 conf.hashes = append(conf.hashes, hashes...) 236 mapKeysToNodes(conf) 237 238 // File retrieval check is repeated until all uploaded files are retrieved from all nodes 239 // or until the timeout is reached. 240 var globalStore mock.GlobalStorer 241 if *useMockStore { 242 globalStore = mockmem.NewGlobalStore() 243 } 244 REPEAT: 245 for { 246 for _, id := range nodeIDs { 247 //for each expected chunk, check if it is in the local store 248 localChunks := conf.idToChunksMap[id] 249 for _, ch := range localChunks { 250 //get the real chunk by the index in the index array 251 chunk := conf.hashes[ch] 252 log.Trace(fmt.Sprintf("node has chunk: %s:", chunk)) 253 //check if the expected chunk is indeed in the localstore 254 var err error 255 if *useMockStore { 256 //use the globalStore if the mockStore should be used; in that case, 257 //the complete localStore stack is bypassed for getting the chunk 258 _, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk) 259 } else { 260 //use the actual localstore 261 item, ok := sim.NodeItem(id, bucketKeyStore) 262 if !ok { 263 return fmt.Errorf("Error accessing localstore") 264 } 265 lstore := item.(*storage.LocalStore) 266 _, err = lstore.Get(ctx, chunk) 267 } 268 if err != nil { 269 log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id)) 270 // Do not get crazy with logging the warn message 271 time.Sleep(500 * time.Millisecond) 272 continue REPEAT 273 } 274 evt := &simulations.Event{ 275 Type: EventTypeChunkArrived, 276 Node: sim.Net.GetNode(id), 277 Data: chunk.String(), 278 } 279 sim.Net.Events().Send(evt) 280 log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id)) 281 } 282 } 283 return nil 284 } 285 }) 286 } 287 288 //map chunk keys to addresses which are responsible 289 func mapKeysToNodes(conf *synctestConfig) { 290 nodemap := make(map[string][]int) 291 //build a pot for chunk hashes 292 np := pot.NewPot(nil, 0) 293 indexmap := make(map[string]int) 294 for i, a := range conf.addrs { 295 indexmap[string(a)] = i 296 np, _, _ = pot.Add(np, a, pof) 297 } 298 299 ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, conf.addrs) 300 301 //for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes 302 log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes)) 303 for i := 0; i < len(conf.hashes); i++ { 304 var a []byte 305 np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool { 306 // take the first address 307 a = val.([]byte) 308 return false 309 }) 310 311 nns := ppmap[common.Bytes2Hex(a)].NNSet 312 nns = append(nns, a) 313 314 for _, p := range nns { 315 nodemap[string(p)] = append(nodemap[string(p)], i) 316 } 317 } 318 for addr, chunks := range nodemap { 319 //this selects which chunks are expected to be found with the given node 320 conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks 321 } 322 log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap)) 323 } 324 325 //upload a file(chunks) to a single local node store 326 func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) { 327 log.Debug(fmt.Sprintf("Uploading to node id: %s", id)) 328 fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams()) 329 size := chunkSize 330 var rootAddrs []storage.Address 331 for i := 0; i < chunkCount; i++ { 332 rk, wait, err := fileStore.Store(context.TODO(), testutil.RandomReader(i, size), int64(size), false) 333 if err != nil { 334 return nil, err 335 } 336 err = wait(context.TODO()) 337 if err != nil { 338 return nil, err 339 } 340 rootAddrs = append(rootAddrs, (rk)) 341 } 342 343 return rootAddrs, nil 344 }