github.com/insight-chain/inb-go@v1.1.3-0.20191221022159-da049980ae38/swarm/network/stream/syncer_test.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software MiningReward, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package stream 18 19 import ( 20 "context" 21 "fmt" 22 "io/ioutil" 23 "math" 24 "os" 25 "sync" 26 "testing" 27 "time" 28 29 "github.com/insight-chain/inb-go/common" 30 "github.com/insight-chain/inb-go/node" 31 "github.com/insight-chain/inb-go/p2p/enode" 32 "github.com/insight-chain/inb-go/p2p/simulations/adapters" 33 "github.com/insight-chain/inb-go/swarm/log" 34 "github.com/insight-chain/inb-go/swarm/network" 35 "github.com/insight-chain/inb-go/swarm/network/simulation" 36 "github.com/insight-chain/inb-go/swarm/state" 37 "github.com/insight-chain/inb-go/swarm/storage" 38 "github.com/insight-chain/inb-go/swarm/storage/mock" 39 mockmem "github.com/insight-chain/inb-go/swarm/storage/mock/mem" 40 "github.com/insight-chain/inb-go/swarm/testutil" 41 ) 42 43 const dataChunkCount = 200 44 45 func TestSyncerSimulation(t *testing.T) { 46 testSyncBetweenNodes(t, 2, 1, dataChunkCount, true, 1) 47 testSyncBetweenNodes(t, 4, 1, dataChunkCount, true, 1) 48 testSyncBetweenNodes(t, 8, 1, dataChunkCount, true, 1) 49 testSyncBetweenNodes(t, 16, 1, dataChunkCount, true, 1) 50 } 51 52 func createMockStore(globalStore mock.GlobalStorer, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) { 53 address := common.BytesToAddress(id.Bytes()) 54 mockStore := globalStore.NewNodeStore(address) 55 params := storage.NewDefaultLocalStoreParams() 56 57 datadir, err = ioutil.TempDir("", "localMockStore-"+id.TerminalString()) 58 if err != nil { 59 return nil, "", err 60 } 61 params.Init(datadir) 62 params.BaseKey = addr.Over() 63 lstore, err = storage.NewLocalStore(params, mockStore) 64 if err != nil { 65 return nil, "", err 66 } 67 return lstore, datadir, nil 68 } 69 70 func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool, po uint8) { 71 72 t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted") 73 sim := simulation.New(map[string]simulation.ServiceFunc{ 74 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 75 var store storage.ChunkStore 76 var datadir string 77 78 node := ctx.Config.Node() 79 addr := network.NewAddr(node) 80 //hack to put addresses in same space 81 addr.OAddr[0] = byte(0) 82 83 if *useMockStore { 84 store, datadir, err = createMockStore(mockmem.NewGlobalStore(), node.ID(), addr) 85 } else { 86 store, datadir, err = createTestLocalStorageForID(node.ID(), addr) 87 } 88 if err != nil { 89 return nil, nil, err 90 } 91 bucket.Store(bucketKeyStore, store) 92 cleanup = func() { 93 store.Close() 94 os.RemoveAll(datadir) 95 } 96 localStore := store.(*storage.LocalStore) 97 netStore, err := storage.NewNetStore(localStore, nil) 98 if err != nil { 99 return nil, nil, err 100 } 101 bucket.Store(bucketKeyDB, netStore) 102 kad := network.NewKademlia(addr.Over(), network.NewKadParams()) 103 delivery := NewDelivery(kad, netStore) 104 netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New 105 106 bucket.Store(bucketKeyDelivery, delivery) 107 108 r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{ 109 Retrieval: RetrievalDisabled, 110 Syncing: SyncingAutoSubscribe, 111 SkipCheck: skipCheck, 112 }, nil) 113 114 fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams()) 115 bucket.Store(bucketKeyFileStore, fileStore) 116 117 return r, cleanup, nil 118 119 }, 120 }) 121 defer sim.Close() 122 123 // create context for simulation run 124 timeout := 30 * time.Second 125 ctx, cancel := context.WithTimeout(context.Background(), timeout) 126 // defer cancel should come before defer simulation teardown 127 defer cancel() 128 129 _, err := sim.AddNodesAndConnectChain(nodes) 130 if err != nil { 131 t.Fatal(err) 132 } 133 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 134 nodeIDs := sim.UpNodeIDs() 135 136 nodeIndex := make(map[enode.ID]int) 137 for i, id := range nodeIDs { 138 nodeIndex[id] = i 139 } 140 141 disconnections := sim.PeerEvents( 142 context.Background(), 143 sim.NodeIDs(), 144 simulation.NewPeerEventsFilter().Drop(), 145 ) 146 147 go func() { 148 for d := range disconnections { 149 if d.Error != nil { 150 log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID) 151 t.Fatal(d.Error) 152 } 153 } 154 }() 155 156 // each node Subscribes to each other's swarmChunkServerStreamName 157 for j := 0; j < nodes-1; j++ { 158 id := nodeIDs[j] 159 client, err := sim.Net.GetNode(id).Client() 160 if err != nil { 161 t.Fatal(err) 162 } 163 sid := nodeIDs[j+1] 164 client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream("SYNC", FormatSyncBinKey(1), false), NewRange(0, 0), Top) 165 if err != nil { 166 return err 167 } 168 if j > 0 || nodes == 2 { 169 item, ok := sim.NodeItem(nodeIDs[j], bucketKeyFileStore) 170 if !ok { 171 return fmt.Errorf("No filestore") 172 } 173 fileStore := item.(*storage.FileStore) 174 size := chunkCount * chunkSize 175 _, wait, err := fileStore.Store(ctx, testutil.RandomReader(j, size), int64(size), false) 176 if err != nil { 177 t.Fatal(err.Error()) 178 } 179 wait(ctx) 180 } 181 } 182 // here we distribute chunks of a random file into stores 1...nodes 183 if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { 184 return err 185 } 186 187 // collect hashes in po 1 bin for each node 188 hashes := make([][]storage.Address, nodes) 189 totalHashes := 0 190 hashCounts := make([]int, nodes) 191 for i := nodes - 1; i >= 0; i-- { 192 if i < nodes-1 { 193 hashCounts[i] = hashCounts[i+1] 194 } 195 item, ok := sim.NodeItem(nodeIDs[i], bucketKeyDB) 196 if !ok { 197 return fmt.Errorf("No DB") 198 } 199 netStore := item.(*storage.NetStore) 200 netStore.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool { 201 hashes[i] = append(hashes[i], addr) 202 totalHashes++ 203 hashCounts[i]++ 204 return true 205 }) 206 } 207 var total, found int 208 for _, node := range nodeIDs { 209 i := nodeIndex[node] 210 211 for j := i; j < nodes; j++ { 212 total += len(hashes[j]) 213 for _, key := range hashes[j] { 214 item, ok := sim.NodeItem(nodeIDs[j], bucketKeyDB) 215 if !ok { 216 return fmt.Errorf("No DB") 217 } 218 db := item.(*storage.NetStore) 219 _, err := db.Get(ctx, key) 220 if err == nil { 221 found++ 222 } 223 } 224 } 225 log.Debug("sync check", "node", node, "index", i, "bin", po, "found", found, "total", total) 226 } 227 if total == found && total > 0 { 228 return nil 229 } 230 return fmt.Errorf("Total not equallying found: total is %d", total) 231 }) 232 233 if result.Error != nil { 234 t.Fatal(result.Error) 235 } 236 } 237 238 //TestSameVersionID just checks that if the version is not changed, 239 //then streamer peers see each other 240 func TestSameVersionID(t *testing.T) { 241 //test version ID 242 v := uint(1) 243 sim := simulation.New(map[string]simulation.ServiceFunc{ 244 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 245 var store storage.ChunkStore 246 var datadir string 247 248 node := ctx.Config.Node() 249 addr := network.NewAddr(node) 250 251 store, datadir, err = createTestLocalStorageForID(node.ID(), addr) 252 if err != nil { 253 return nil, nil, err 254 } 255 bucket.Store(bucketKeyStore, store) 256 cleanup = func() { 257 store.Close() 258 os.RemoveAll(datadir) 259 } 260 localStore := store.(*storage.LocalStore) 261 netStore, err := storage.NewNetStore(localStore, nil) 262 if err != nil { 263 return nil, nil, err 264 } 265 bucket.Store(bucketKeyDB, netStore) 266 kad := network.NewKademlia(addr.Over(), network.NewKadParams()) 267 delivery := NewDelivery(kad, netStore) 268 netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New 269 270 bucket.Store(bucketKeyDelivery, delivery) 271 272 r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{ 273 Retrieval: RetrievalDisabled, 274 Syncing: SyncingAutoSubscribe, 275 }, nil) 276 //assign to each node the same version ID 277 r.spec.Version = v 278 279 bucket.Store(bucketKeyRegistry, r) 280 281 return r, cleanup, nil 282 283 }, 284 }) 285 defer sim.Close() 286 287 //connect just two nodes 288 log.Info("Adding nodes to simulation") 289 _, err := sim.AddNodesAndConnectChain(2) 290 if err != nil { 291 t.Fatal(err) 292 } 293 294 log.Info("Starting simulation") 295 ctx := context.Background() 296 //make sure they have time to connect 297 time.Sleep(200 * time.Millisecond) 298 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 299 //get the pivot node's filestore 300 nodes := sim.UpNodeIDs() 301 302 item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry) 303 if !ok { 304 return fmt.Errorf("No filestore") 305 } 306 registry := item.(*Registry) 307 308 //the peers should connect, thus getting the peer should not return nil 309 if registry.getPeer(nodes[1]) == nil { 310 t.Fatal("Expected the peer to not be nil, but it is") 311 } 312 return nil 313 }) 314 if result.Error != nil { 315 t.Fatal(result.Error) 316 } 317 log.Info("Simulation ended") 318 } 319 320 //TestDifferentVersionID proves that if the streamer protocol version doesn't match, 321 //then the peers are not connected at streamer level 322 func TestDifferentVersionID(t *testing.T) { 323 //create a variable to hold the version ID 324 v := uint(0) 325 sim := simulation.New(map[string]simulation.ServiceFunc{ 326 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 327 var store storage.ChunkStore 328 var datadir string 329 330 node := ctx.Config.Node() 331 addr := network.NewAddr(node) 332 333 store, datadir, err = createTestLocalStorageForID(node.ID(), addr) 334 if err != nil { 335 return nil, nil, err 336 } 337 bucket.Store(bucketKeyStore, store) 338 cleanup = func() { 339 store.Close() 340 os.RemoveAll(datadir) 341 } 342 localStore := store.(*storage.LocalStore) 343 netStore, err := storage.NewNetStore(localStore, nil) 344 if err != nil { 345 return nil, nil, err 346 } 347 bucket.Store(bucketKeyDB, netStore) 348 kad := network.NewKademlia(addr.Over(), network.NewKadParams()) 349 delivery := NewDelivery(kad, netStore) 350 netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New 351 352 bucket.Store(bucketKeyDelivery, delivery) 353 354 r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{ 355 Retrieval: RetrievalDisabled, 356 Syncing: SyncingAutoSubscribe, 357 }, nil) 358 359 //increase the version ID for each node 360 v++ 361 r.spec.Version = v 362 363 bucket.Store(bucketKeyRegistry, r) 364 365 return r, cleanup, nil 366 367 }, 368 }) 369 defer sim.Close() 370 371 //connect the nodes 372 log.Info("Adding nodes to simulation") 373 _, err := sim.AddNodesAndConnectChain(2) 374 if err != nil { 375 t.Fatal(err) 376 } 377 378 log.Info("Starting simulation") 379 ctx := context.Background() 380 //make sure they have time to connect 381 time.Sleep(200 * time.Millisecond) 382 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 383 //get the pivot node's filestore 384 nodes := sim.UpNodeIDs() 385 386 item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry) 387 if !ok { 388 return fmt.Errorf("No filestore") 389 } 390 registry := item.(*Registry) 391 392 //getting the other peer should fail due to the different version numbers 393 if registry.getPeer(nodes[1]) != nil { 394 t.Fatal("Expected the peer to be nil, but it is not") 395 } 396 return nil 397 }) 398 if result.Error != nil { 399 t.Fatal(result.Error) 400 } 401 log.Info("Simulation ended") 402 403 }