github.com/yinchengtsinghua/golang-Eos-dpos-Ethereum@v0.0.0-20190121132951-92cc4225ed8e/swarm/network/stream/syncer_test.go (about) 1 2 //此源码被清华学神尹成大魔王专业翻译分析并修改 3 //尹成QQ77025077 4 //尹成微信18510341407 5 //尹成所在QQ群721929980 6 //尹成邮箱 yinc13@mails.tsinghua.edu.cn 7 //尹成毕业于清华大学,微软区块链领域全球最有价值专家 8 //https://mvp.microsoft.com/zh-cn/PublicProfile/4033620 9 // 10 // 11 // 12 // 13 // 14 // 15 // 16 // 17 // 18 // 19 // 20 // 21 // 22 // 23 // 24 25 package stream 26 27 import ( 28 "context" 29 crand "crypto/rand" 30 "fmt" 31 "io" 32 "io/ioutil" 33 "math" 34 "os" 35 "sync" 36 "testing" 37 "time" 38 39 "github.com/ethereum/go-ethereum/common" 40 "github.com/ethereum/go-ethereum/node" 41 "github.com/ethereum/go-ethereum/p2p" 42 "github.com/ethereum/go-ethereum/p2p/discover" 43 "github.com/ethereum/go-ethereum/p2p/simulations/adapters" 44 "github.com/ethereum/go-ethereum/swarm/log" 45 "github.com/ethereum/go-ethereum/swarm/network" 46 "github.com/ethereum/go-ethereum/swarm/network/simulation" 47 "github.com/ethereum/go-ethereum/swarm/state" 48 "github.com/ethereum/go-ethereum/swarm/storage" 49 mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db" 50 ) 51 52 const dataChunkCount = 200 53 54 func TestSyncerSimulation(t *testing.T) { 55 testSyncBetweenNodes(t, 2, 1, dataChunkCount, true, 1) 56 testSyncBetweenNodes(t, 4, 1, dataChunkCount, true, 1) 57 testSyncBetweenNodes(t, 8, 1, dataChunkCount, true, 1) 58 testSyncBetweenNodes(t, 16, 1, dataChunkCount, true, 1) 59 } 60 61 func createMockStore(globalStore *mockdb.GlobalStore, id discover.NodeID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) { 62 address := common.BytesToAddress(id.Bytes()) 63 mockStore := globalStore.NewNodeStore(address) 64 params := storage.NewDefaultLocalStoreParams() 65 66 datadir, err = ioutil.TempDir("", "localMockStore-"+id.TerminalString()) 67 if err != nil { 68 return nil, "", err 69 } 70 params.Init(datadir) 71 params.BaseKey = addr.Over() 72 lstore, err = storage.NewLocalStore(params, mockStore) 73 return lstore, datadir, nil 74 } 75 76 func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool, po uint8) { 77 sim := simulation.New(map[string]simulation.ServiceFunc{ 78 "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 79 var store storage.ChunkStore 80 var globalStore *mockdb.GlobalStore 81 var gDir, datadir string 82 83 id := ctx.Config.ID 84 addr := network.NewAddrFromNodeID(id) 85 // 86 addr.OAddr[0] = byte(0) 87 88 if *useMockStore { 89 gDir, globalStore, err = createGlobalStore() 90 if err != nil { 91 return nil, nil, fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil") 92 } 93 store, datadir, err = createMockStore(globalStore, id, addr) 94 } else { 95 store, datadir, err = createTestLocalStorageForID(id, addr) 96 } 97 if err != nil { 98 return nil, nil, err 99 } 100 bucket.Store(bucketKeyStore, store) 101 cleanup = func() { 102 store.Close() 103 os.RemoveAll(datadir) 104 if *useMockStore { 105 err := globalStore.Close() 106 if err != nil { 107 log.Error("Error closing global store! %v", "err", err) 108 } 109 os.RemoveAll(gDir) 110 } 111 } 112 localStore := store.(*storage.LocalStore) 113 db := storage.NewDBAPI(localStore) 114 bucket.Store(bucketKeyDB, db) 115 kad := network.NewKademlia(addr.Over(), network.NewKadParams()) 116 delivery := NewDelivery(kad, db) 117 bucket.Store(bucketKeyDelivery, delivery) 118 119 r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ 120 SkipCheck: skipCheck, 121 }) 122 123 fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams()) 124 bucket.Store(bucketKeyFileStore, fileStore) 125 126 return r, cleanup, nil 127 128 }, 129 }) 130 defer sim.Close() 131 132 // 133 timeout := 30 * time.Second 134 ctx, cancel := context.WithTimeout(context.Background(), timeout) 135 // 136 defer cancel() 137 138 _, err := sim.AddNodesAndConnectChain(nodes) 139 if err != nil { 140 t.Fatal(err) 141 } 142 result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 143 nodeIDs := sim.UpNodeIDs() 144 145 nodeIndex := make(map[discover.NodeID]int) 146 for i, id := range nodeIDs { 147 nodeIndex[id] = i 148 } 149 150 disconnections := sim.PeerEvents( 151 context.Background(), 152 sim.NodeIDs(), 153 simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop), 154 ) 155 156 go func() { 157 for d := range disconnections { 158 if d.Error != nil { 159 log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer) 160 t.Fatal(d.Error) 161 } 162 } 163 }() 164 165 // 166 for j := 0; j < nodes-1; j++ { 167 id := nodeIDs[j] 168 client, err := sim.Net.GetNode(id).Client() 169 if err != nil { 170 t.Fatal(err) 171 } 172 sid := nodeIDs[j+1] 173 client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream("SYNC", FormatSyncBinKey(1), false), NewRange(0, 0), Top) 174 if err != nil { 175 return err 176 } 177 if j > 0 || nodes == 2 { 178 item, ok := sim.NodeItem(nodeIDs[j], bucketKeyFileStore) 179 if !ok { 180 return fmt.Errorf("No filestore") 181 } 182 fileStore := item.(*storage.FileStore) 183 size := chunkCount * chunkSize 184 _, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false) 185 if err != nil { 186 t.Fatal(err.Error()) 187 } 188 wait(ctx) 189 } 190 } 191 // 192 if _, err := sim.WaitTillHealthy(ctx, 2); err != nil { 193 return err 194 } 195 196 // 197 hashes := make([][]storage.Address, nodes) 198 totalHashes := 0 199 hashCounts := make([]int, nodes) 200 for i := nodes - 1; i >= 0; i-- { 201 if i < nodes-1 { 202 hashCounts[i] = hashCounts[i+1] 203 } 204 item, ok := sim.NodeItem(nodeIDs[i], bucketKeyDB) 205 if !ok { 206 return fmt.Errorf("No DB") 207 } 208 db := item.(*storage.DBAPI) 209 db.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool { 210 hashes[i] = append(hashes[i], addr) 211 totalHashes++ 212 hashCounts[i]++ 213 return true 214 }) 215 } 216 var total, found int 217 for _, node := range nodeIDs { 218 i := nodeIndex[node] 219 220 for j := i; j < nodes; j++ { 221 total += len(hashes[j]) 222 for _, key := range hashes[j] { 223 item, ok := sim.NodeItem(nodeIDs[j], bucketKeyDB) 224 if !ok { 225 return fmt.Errorf("No DB") 226 } 227 db := item.(*storage.DBAPI) 228 chunk, err := db.Get(ctx, key) 229 if err == storage.ErrFetching { 230 <-chunk.ReqC 231 } else if err != nil { 232 continue 233 } 234 // 235 // 236 found++ 237 } 238 } 239 log.Debug("sync check", "node", node, "index", i, "bin", po, "found", found, "total", total) 240 } 241 if total == found && total > 0 { 242 return nil 243 } 244 return fmt.Errorf("Total not equallying found: total is %d", total) 245 }) 246 247 if result.Error != nil { 248 t.Fatal(result.Error) 249 } 250 }