github.com/jincm/wesharechain@v0.0.0-20210122032815-1537409ce26a/chain/swarm/network/stream/syncer_test.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package stream 18 19 import ( 20 "context" 21 "errors" 22 "fmt" 23 "io/ioutil" 24 "math" 25 "os" 26 "sync" 27 "testing" 28 "time" 29 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/node" 32 "github.com/ethereum/go-ethereum/p2p/enode" 33 "github.com/ethereum/go-ethereum/p2p/simulations/adapters" 34 "github.com/ethereum/go-ethereum/swarm/log" 35 "github.com/ethereum/go-ethereum/swarm/network" 36 "github.com/ethereum/go-ethereum/swarm/network/simulation" 37 "github.com/ethereum/go-ethereum/swarm/state" 38 "github.com/ethereum/go-ethereum/swarm/storage" 39 "github.com/ethereum/go-ethereum/swarm/storage/mock" 40 "github.com/ethereum/go-ethereum/swarm/testutil" 41 ) 42 43 const dataChunkCount = 200 44 45 func TestSyncerSimulation(t *testing.T) { 46 testSyncBetweenNodes(t, 2, dataChunkCount, true, 1) 47 // This test uses much more memory when running with 48 // race detector. Allow it to finish successfully by 49 // reducing its scope, and still check for data races 50 // with the smallest number of nodes. 51 if !testutil.RaceEnabled { 52 testSyncBetweenNodes(t, 4, dataChunkCount, true, 1) 53 testSyncBetweenNodes(t, 8, dataChunkCount, true, 1) 54 testSyncBetweenNodes(t, 16, dataChunkCount, true, 1) 55 } 56 } 57 58 func createMockStore(globalStore mock.GlobalStorer, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) { 59 address := common.BytesToAddress(id.Bytes()) 60 mockStore := globalStore.NewNodeStore(address) 61 params := storage.NewDefaultLocalStoreParams() 62 63 datadir, err = ioutil.TempDir("", "localMockStore-"+id.TerminalString()) 64 if err != nil { 65 return nil, "", err 66 } 67 params.Init(datadir) 68 params.BaseKey = addr.Over() 69 lstore, err = storage.NewLocalStore(params, mockStore) 70 if err != nil { 71 return nil, "", err 72 } 73 return lstore, datadir, nil 74 } 75 76 func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, po uint8) { 77 78 sim := simulation.New(map[string]simulation.ServiceFunc{ 79 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 80 addr := network.NewAddr(ctx.Config.Node()) 81 //hack to put addresses in same space 82 addr.OAddr[0] = byte(0) 83 84 netStore, delivery, clean, err := newNetStoreAndDeliveryWithBzzAddr(ctx, bucket, addr) 85 if err != nil { 86 return nil, nil, err 87 } 88 89 var dir string 90 var store *state.DBStore 91 if testutil.RaceEnabled { 92 // Use on-disk DBStore to reduce memory consumption in race tests. 93 dir, err = ioutil.TempDir("", "swarm-stream-") 94 if err != nil { 95 return nil, nil, err 96 } 97 store, err = state.NewDBStore(dir) 98 if err != nil { 99 return nil, nil, err 100 } 101 } else { 102 store = state.NewInmemoryStore() 103 } 104 105 r := NewRegistry(addr.ID(), delivery, netStore, store, &RegistryOptions{ 106 Retrieval: RetrievalDisabled, 107 Syncing: SyncingAutoSubscribe, 108 SkipCheck: skipCheck, 109 }, nil) 110 111 cleanup = func() { 112 r.Close() 113 clean() 114 if dir != "" { 115 os.RemoveAll(dir) 116 } 117 } 118 119 return r, cleanup, nil 120 }, 121 }) 122 defer sim.Close() 123 124 // create context for simulation run 125 timeout := 30 * time.Second 126 ctx, cancel := context.WithTimeout(context.Background(), timeout) 127 // defer cancel should come before defer simulation teardown 128 defer cancel() 129 130 _, err := sim.AddNodesAndConnectChain(nodes) 131 if err != nil { 132 t.Fatal(err) 133 } 134 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) { 135 nodeIDs := sim.UpNodeIDs() 136 137 nodeIndex := make(map[enode.ID]int) 138 for i, id := range nodeIDs { 139 nodeIndex[id] = i 140 } 141 142 disconnected := watchDisconnections(ctx, sim) 143 defer func() { 144 if err != nil && disconnected.bool() { 145 err = errors.New("disconnect events received") 146 } 147 }() 148 149 // each node Subscribes to each other's swarmChunkServerStreamName 150 for j := 0; j < nodes-1; j++ { 151 id := nodeIDs[j] 152 client, err := sim.Net.GetNode(id).Client() 153 if err != nil { 154 return fmt.Errorf("node %s client: %v", id, err) 155 } 156 sid := nodeIDs[j+1] 157 client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream("SYNC", FormatSyncBinKey(1), false), NewRange(0, 0), Top) 158 if err != nil { 159 return err 160 } 161 if j > 0 || nodes == 2 { 162 item, ok := sim.NodeItem(nodeIDs[j], bucketKeyFileStore) 163 if !ok { 164 return fmt.Errorf("No filestore") 165 } 166 fileStore := item.(*storage.FileStore) 167 size := chunkCount * chunkSize 168 _, wait, err := fileStore.Store(ctx, testutil.RandomReader(j, size), int64(size), false) 169 if err != nil { 170 return fmt.Errorf("fileStore.Store: %v", err) 171 } 172 wait(ctx) 173 } 174 } 175 // here we distribute chunks of a random file into stores 1...nodes 176 // collect hashes in po 1 bin for each node 177 hashes := make([][]storage.Address, nodes) 178 totalHashes := 0 179 hashCounts := make([]int, nodes) 180 for i := nodes - 1; i >= 0; i-- { 181 if i < nodes-1 { 182 hashCounts[i] = hashCounts[i+1] 183 } 184 item, ok := sim.NodeItem(nodeIDs[i], bucketKeyDB) 185 if !ok { 186 return fmt.Errorf("No DB") 187 } 188 netStore := item.(*storage.NetStore) 189 netStore.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool { 190 hashes[i] = append(hashes[i], addr) 191 totalHashes++ 192 hashCounts[i]++ 193 return true 194 }) 195 } 196 var total, found int 197 for _, node := range nodeIDs { 198 i := nodeIndex[node] 199 200 for j := i; j < nodes; j++ { 201 total += len(hashes[j]) 202 for _, key := range hashes[j] { 203 item, ok := sim.NodeItem(nodeIDs[j], bucketKeyDB) 204 if !ok { 205 return fmt.Errorf("No DB") 206 } 207 db := item.(*storage.NetStore) 208 _, err := db.Get(ctx, key) 209 if err == nil { 210 found++ 211 } 212 } 213 } 214 log.Debug("sync check", "node", node, "index", i, "bin", po, "found", found, "total", total) 215 } 216 if total == found && total > 0 { 217 return nil 218 } 219 return fmt.Errorf("Total not equallying found: total is %d", total) 220 }) 221 222 if result.Error != nil { 223 t.Fatal(result.Error) 224 } 225 } 226 227 //TestSameVersionID just checks that if the version is not changed, 228 //then streamer peers see each other 229 func TestSameVersionID(t *testing.T) { 230 //test version ID 231 v := uint(1) 232 sim := simulation.New(map[string]simulation.ServiceFunc{ 233 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 234 addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket) 235 if err != nil { 236 return nil, nil, err 237 } 238 239 r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{ 240 Retrieval: RetrievalDisabled, 241 Syncing: SyncingAutoSubscribe, 242 }, nil) 243 bucket.Store(bucketKeyRegistry, r) 244 245 //assign to each node the same version ID 246 r.spec.Version = v 247 248 cleanup = func() { 249 r.Close() 250 clean() 251 } 252 253 return r, cleanup, nil 254 }, 255 }) 256 defer sim.Close() 257 258 //connect just two nodes 259 log.Info("Adding nodes to simulation") 260 _, err := sim.AddNodesAndConnectChain(2) 261 if err != nil { 262 t.Fatal(err) 263 } 264 265 log.Info("Starting simulation") 266 ctx := context.Background() 267 //make sure they have time to connect 268 time.Sleep(200 * time.Millisecond) 269 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 270 //get the pivot node's filestore 271 nodes := sim.UpNodeIDs() 272 273 item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry) 274 if !ok { 275 return fmt.Errorf("No filestore") 276 } 277 registry := item.(*Registry) 278 279 //the peers should connect, thus getting the peer should not return nil 280 if registry.getPeer(nodes[1]) == nil { 281 return errors.New("Expected the peer to not be nil, but it is") 282 } 283 return nil 284 }) 285 if result.Error != nil { 286 t.Fatal(result.Error) 287 } 288 log.Info("Simulation ended") 289 } 290 291 //TestDifferentVersionID proves that if the streamer protocol version doesn't match, 292 //then the peers are not connected at streamer level 293 func TestDifferentVersionID(t *testing.T) { 294 //create a variable to hold the version ID 295 v := uint(0) 296 sim := simulation.New(map[string]simulation.ServiceFunc{ 297 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 298 addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket) 299 if err != nil { 300 return nil, nil, err 301 } 302 303 r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{ 304 Retrieval: RetrievalDisabled, 305 Syncing: SyncingAutoSubscribe, 306 }, nil) 307 bucket.Store(bucketKeyRegistry, r) 308 309 //increase the version ID for each node 310 v++ 311 r.spec.Version = v 312 313 cleanup = func() { 314 r.Close() 315 clean() 316 } 317 318 return r, cleanup, nil 319 }, 320 }) 321 defer sim.Close() 322 323 //connect the nodes 324 log.Info("Adding nodes to simulation") 325 _, err := sim.AddNodesAndConnectChain(2) 326 if err != nil { 327 t.Fatal(err) 328 } 329 330 log.Info("Starting simulation") 331 ctx := context.Background() 332 //make sure they have time to connect 333 time.Sleep(200 * time.Millisecond) 334 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 335 //get the pivot node's filestore 336 nodes := sim.UpNodeIDs() 337 338 item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry) 339 if !ok { 340 return fmt.Errorf("No filestore") 341 } 342 registry := item.(*Registry) 343 344 //getting the other peer should fail due to the different version numbers 345 if registry.getPeer(nodes[1]) != nil { 346 return errors.New("Expected the peer to be nil, but it is not") 347 } 348 return nil 349 }) 350 if result.Error != nil { 351 t.Fatal(result.Error) 352 } 353 log.Info("Simulation ended") 354 355 }