github.com/hyperion-hyn/go-ethereum@v2.4.0+incompatible/swarm/network/stream/syncer_test.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package stream 18 19 import ( 20 "context" 21 "fmt" 22 "io/ioutil" 23 "math" 24 "os" 25 "sync" 26 "testing" 27 "time" 28 29 "github.com/ethereum/go-ethereum/common" 30 "github.com/ethereum/go-ethereum/node" 31 "github.com/ethereum/go-ethereum/p2p" 32 "github.com/ethereum/go-ethereum/p2p/enode" 33 "github.com/ethereum/go-ethereum/p2p/simulations/adapters" 34 "github.com/ethereum/go-ethereum/swarm/log" 35 "github.com/ethereum/go-ethereum/swarm/network" 36 "github.com/ethereum/go-ethereum/swarm/network/simulation" 37 "github.com/ethereum/go-ethereum/swarm/state" 38 "github.com/ethereum/go-ethereum/swarm/storage" 39 mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db" 40 "github.com/ethereum/go-ethereum/swarm/testutil" 41 ) 42 43 const dataChunkCount = 200 44 45 func TestSyncerSimulation(t *testing.T) { 46 testSyncBetweenNodes(t, 2, 1, dataChunkCount, true, 1) 47 testSyncBetweenNodes(t, 4, 1, dataChunkCount, true, 1) 48 testSyncBetweenNodes(t, 8, 1, dataChunkCount, true, 1) 49 testSyncBetweenNodes(t, 16, 1, dataChunkCount, true, 1) 50 } 51 52 func createMockStore(globalStore *mockdb.GlobalStore, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) { 53 address := common.BytesToAddress(id.Bytes()) 54 mockStore := globalStore.NewNodeStore(address) 55 params := storage.NewDefaultLocalStoreParams() 56 57 datadir, err = ioutil.TempDir("", "localMockStore-"+id.TerminalString()) 58 if err != nil { 59 return nil, "", err 60 } 61 params.Init(datadir) 62 params.BaseKey = addr.Over() 63 lstore, err = storage.NewLocalStore(params, mockStore) 64 if err != nil { 65 return nil, "", err 66 } 67 return lstore, datadir, nil 68 } 69 70 func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool, po uint8) { 71 sim := simulation.New(map[string]simulation.ServiceFunc{ 72 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 73 var store storage.ChunkStore 74 var globalStore *mockdb.GlobalStore 75 var gDir, datadir string 76 77 node := ctx.Config.Node() 78 addr := network.NewAddr(node) 79 //hack to put addresses in same space 80 addr.OAddr[0] = byte(0) 81 82 if *useMockStore { 83 gDir, globalStore, err = createGlobalStore() 84 if err != nil { 85 return nil, nil, fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil") 86 } 87 store, datadir, err = createMockStore(globalStore, node.ID(), addr) 88 } else { 89 store, datadir, err = createTestLocalStorageForID(node.ID(), addr) 90 } 91 if err != nil { 92 return nil, nil, err 93 } 94 bucket.Store(bucketKeyStore, store) 95 cleanup = func() { 96 store.Close() 97 os.RemoveAll(datadir) 98 if *useMockStore { 99 err := globalStore.Close() 100 if err != nil { 101 log.Error("Error closing global store! %v", "err", err) 102 } 103 os.RemoveAll(gDir) 104 } 105 } 106 localStore := store.(*storage.LocalStore) 107 netStore, err := storage.NewNetStore(localStore, nil) 108 if err != nil { 109 return nil, nil, err 110 } 111 bucket.Store(bucketKeyDB, netStore) 112 kad := network.NewKademlia(addr.Over(), network.NewKadParams()) 113 delivery := NewDelivery(kad, netStore) 114 netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New 115 116 bucket.Store(bucketKeyDelivery, delivery) 117 118 r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{ 119 Retrieval: RetrievalDisabled, 120 Syncing: SyncingAutoSubscribe, 121 SkipCheck: skipCheck, 122 }) 123 124 fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams()) 125 bucket.Store(bucketKeyFileStore, fileStore) 126 127 return r, cleanup, nil 128 129 }, 130 }) 131 defer sim.Close() 132 133 // create context for simulation run 134 timeout := 30 * time.Second 135 ctx, cancel := context.WithTimeout(context.Background(), timeout) 136 // defer cancel should come before defer simulation teardown 137 defer cancel() 138 139 _, err := sim.AddNodesAndConnectChain(nodes) 140 if err != nil { 141 t.Fatal(err) 142 } 143 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 144 nodeIDs := sim.UpNodeIDs() 145 146 nodeIndex := make(map[enode.ID]int) 147 for i, id := range nodeIDs { 148 nodeIndex[id] = i 149 } 150 151 disconnections := sim.PeerEvents( 152 context.Background(), 153 sim.NodeIDs(), 154 simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop), 155 ) 156 157 go func() { 158 for d := range disconnections { 159 if d.Error != nil { 160 log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer) 161 t.Fatal(d.Error) 162 } 163 } 164 }() 165 166 // each node Subscribes to each other's swarmChunkServerStreamName 167 for j := 0; j < nodes-1; j++ { 168 id := nodeIDs[j] 169 client, err := sim.Net.GetNode(id).Client() 170 if err != nil { 171 t.Fatal(err) 172 } 173 sid := nodeIDs[j+1] 174 client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream("SYNC", FormatSyncBinKey(1), false), NewRange(0, 0), Top) 175 if err != nil { 176 return err 177 } 178 if j > 0 || nodes == 2 { 179 item, ok := sim.NodeItem(nodeIDs[j], bucketKeyFileStore) 180 if !ok { 181 return fmt.Errorf("No filestore") 182 } 183 fileStore := item.(*storage.FileStore) 184 size := chunkCount * chunkSize 185 _, wait, err := fileStore.Store(ctx, testutil.RandomReader(j, size), int64(size), false) 186 if err != nil { 187 t.Fatal(err.Error()) 188 } 189 wait(ctx) 190 } 191 } 192 // here we distribute chunks of a random file into stores 1...nodes 193 if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { 194 return err 195 } 196 197 // collect hashes in po 1 bin for each node 198 hashes := make([][]storage.Address, nodes) 199 totalHashes := 0 200 hashCounts := make([]int, nodes) 201 for i := nodes - 1; i >= 0; i-- { 202 if i < nodes-1 { 203 hashCounts[i] = hashCounts[i+1] 204 } 205 item, ok := sim.NodeItem(nodeIDs[i], bucketKeyDB) 206 if !ok { 207 return fmt.Errorf("No DB") 208 } 209 netStore := item.(*storage.NetStore) 210 netStore.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool { 211 hashes[i] = append(hashes[i], addr) 212 totalHashes++ 213 hashCounts[i]++ 214 return true 215 }) 216 } 217 var total, found int 218 for _, node := range nodeIDs { 219 i := nodeIndex[node] 220 221 for j := i; j < nodes; j++ { 222 total += len(hashes[j]) 223 for _, key := range hashes[j] { 224 item, ok := sim.NodeItem(nodeIDs[j], bucketKeyDB) 225 if !ok { 226 return fmt.Errorf("No DB") 227 } 228 db := item.(*storage.NetStore) 229 _, err := db.Get(ctx, key) 230 if err == nil { 231 found++ 232 } 233 } 234 } 235 log.Debug("sync check", "node", node, "index", i, "bin", po, "found", found, "total", total) 236 } 237 if total == found && total > 0 { 238 return nil 239 } 240 return fmt.Errorf("Total not equallying found: total is %d", total) 241 }) 242 243 if result.Error != nil { 244 t.Fatal(result.Error) 245 } 246 }