github.com/linapex/ethereum-go-chinese@v0.0.0-20190316121929-f8b7a73c3fa1/swarm/network/stream/snapshot_sync_test.go (about) 1 2 //<developer> 3 // <name>linapex 曹一峰</name> 4 // <email>linapex@163.com</email> 5 // <wx>superexc</wx> 6 // <qqgroup>128148617</qqgroup> 7 // <url>https://jsq.ink</url> 8 // <role>pku engineer</role> 9 // <date>2019-03-16 19:16:44</date> 10 //</624450115716780032> 11 12 package stream 13 14 import ( 15 "context" 16 "fmt" 17 "os" 18 "runtime" 19 "sync" 20 "sync/atomic" 21 "testing" 22 "time" 23 24 "github.com/ethereum/go-ethereum/common" 25 "github.com/ethereum/go-ethereum/log" 26 "github.com/ethereum/go-ethereum/node" 27 "github.com/ethereum/go-ethereum/p2p/enode" 28 "github.com/ethereum/go-ethereum/p2p/simulations" 29 "github.com/ethereum/go-ethereum/p2p/simulations/adapters" 30 "github.com/ethereum/go-ethereum/swarm/network" 31 "github.com/ethereum/go-ethereum/swarm/network/simulation" 32 "github.com/ethereum/go-ethereum/swarm/pot" 33 "github.com/ethereum/go-ethereum/swarm/state" 34 "github.com/ethereum/go-ethereum/swarm/storage" 35 "github.com/ethereum/go-ethereum/swarm/storage/mock" 36 mockmem "github.com/ethereum/go-ethereum/swarm/storage/mock/mem" 37 "github.com/ethereum/go-ethereum/swarm/testutil" 38 ) 39 40 const MaxTimeout = 600 41 42 type synctestConfig struct { 43 addrs [][]byte 44 hashes []storage.Address 45 idToChunksMap map[enode.ID][]int 46 //chunkstonodesmap map[string][]int 47 addrToIDMap map[string]enode.ID 48 } 49 50 const ( 51 //EventTypeNode是当节点为 52 //创建、启动或停止 53 EventTypeChunkCreated simulations.EventType = "chunkCreated" 54 EventTypeChunkOffered simulations.EventType = "chunkOffered" 55 EventTypeChunkWanted simulations.EventType = "chunkWanted" 56 EventTypeChunkDelivered simulations.EventType = "chunkDelivered" 57 EventTypeChunkArrived simulations.EventType = "chunkArrived" 58 EventTypeSimTerminated simulations.EventType = "simTerminated" 59 ) 60 61 //此文件中的测试不应向对等方请求块。 62 //此函数将死机,表示如果发出请求,则存在问题。 63 func dummyRequestFromPeers(_ context.Context, req *network.Request) (*enode.ID, chan struct{}, error) { 64 panic(fmt.Sprintf("unexpected request: address %s, source %s", req.Addr.String(), req.Source.String())) 65 } 66 67 //此测试是节点的同步测试。 68 //随机选择一个节点作为轴节点。 69 //块和节点的可配置数量可以是 70 //提供给测试时,将上载块的数量 71 //到透视节点,我们检查节点是否获取块 72 //它们将根据同步协议进行存储。 73 //块和节点的数量也可以通过命令行提供。 74 func TestSyncingViaGlobalSync(t *testing.T) { 75 if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" { 76 t.Skip("Flaky on mac on travis") 77 } 78 //如果节点/块是通过命令行提供的, 79 //使用这些值运行测试 80 if *nodes != 0 && *chunks != 0 { 81 log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes)) 82 testSyncingViaGlobalSync(t, *chunks, *nodes) 83 } else { 84 var nodeCnt []int 85 var chnkCnt []int 86 //如果已提供“longrunning”标志 87 //运行更多测试组合 88 if *longrunning { 89 chnkCnt = []int{1, 8, 32, 256, 1024} 90 nodeCnt = []int{16, 32, 64, 128, 256} 91 } else { 92 //缺省测试 93 chnkCnt = []int{4, 32} 94 nodeCnt = []int{32, 16} 95 } 96 for _, chnk := range chnkCnt { 97 for _, n := range nodeCnt { 98 log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n)) 99 testSyncingViaGlobalSync(t, chnk, n) 100 } 101 } 102 } 103 } 104 105 var simServiceMap = map[string]simulation.ServiceFunc{ 106 "streamer": streamerFunc, 107 } 108 109 func streamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { 110 n := ctx.Config.Node() 111 addr := network.NewAddr(n) 112 store, datadir, err := createTestLocalStorageForID(n.ID(), addr) 113 if err != nil { 114 return nil, nil, err 115 } 116 bucket.Store(bucketKeyStore, store) 117 localStore := store.(*storage.LocalStore) 118 netStore, err := storage.NewNetStore(localStore, nil) 119 if err != nil { 120 return nil, nil, err 121 } 122 kad := network.NewKademlia(addr.Over(), network.NewKadParams()) 123 delivery := NewDelivery(kad, netStore) 124 netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New 125 126 r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{ 127 Retrieval: RetrievalDisabled, 128 Syncing: SyncingAutoSubscribe, 129 SyncUpdateDelay: 3 * time.Second, 130 }, nil) 131 132 bucket.Store(bucketKeyRegistry, r) 133 134 cleanup = func() { 135 os.RemoveAll(datadir) 136 netStore.Close() 137 r.Close() 138 } 139 140 return r, cleanup, nil 141 142 } 143 144 func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) { 145 sim := simulation.New(simServiceMap) 146 defer sim.Close() 147 148 log.Info("Initializing test config") 149 150 conf := &synctestConfig{} 151 //发现ID到该ID处预期的块索引的映射 152 conf.idToChunksMap = make(map[enode.ID][]int) 153 //发现ID的覆盖地址映射 154 conf.addrToIDMap = make(map[string]enode.ID) 155 //存储生成的块哈希的数组 156 conf.hashes = make([]storage.Address, 0) 157 158 err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount)) 159 if err != nil { 160 t.Fatal(err) 161 } 162 163 ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute) 164 defer cancelSimRun() 165 166 if _, err := sim.WaitTillHealthy(ctx); err != nil { 167 t.Fatal(err) 168 } 169 170 disconnections := sim.PeerEvents( 171 context.Background(), 172 sim.NodeIDs(), 173 simulation.NewPeerEventsFilter().Drop(), 174 ) 175 176 var disconnected atomic.Value 177 go func() { 178 for d := range disconnections { 179 if d.Error != nil { 180 log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID) 181 disconnected.Store(true) 182 } 183 } 184 }() 185 186 result := runSim(conf, ctx, sim, chunkCount) 187 188 if result.Error != nil { 189 t.Fatal(result.Error) 190 } 191 if yes, ok := disconnected.Load().(bool); ok && yes { 192 t.Fatal("disconnect events received") 193 } 194 log.Info("Simulation ended") 195 } 196 197 func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulation, chunkCount int) simulation.Result { 198 199 return sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { 200 nodeIDs := sim.UpNodeIDs() 201 for _, n := range nodeIDs { 202 //从此ID获取Kademlia覆盖地址 203 a := n.Bytes() 204 //将它附加到所有覆盖地址的数组中 205 conf.addrs = append(conf.addrs, a) 206 //邻近度计算在叠加地址上, 207 //p2p/simulations检查enode.id上的func触发器, 208 //所以我们需要知道哪个overlay addr映射到哪个nodeid 209 conf.addrToIDMap[string(a)] = n 210 } 211 212 //获取该索引处的节点 213 //这是选择上载的节点 214 node := sim.Net.GetRandomUpNode() 215 item, ok := sim.NodeItem(node.ID(), bucketKeyStore) 216 if !ok { 217 return fmt.Errorf("No localstore") 218 } 219 lstore := item.(*storage.LocalStore) 220 hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore) 221 if err != nil { 222 return err 223 } 224 for _, h := range hashes { 225 evt := &simulations.Event{ 226 Type: EventTypeChunkCreated, 227 Node: sim.Net.GetNode(node.ID()), 228 Data: h.String(), 229 } 230 sim.Net.Events().Send(evt) 231 } 232 conf.hashes = append(conf.hashes, hashes...) 233 mapKeysToNodes(conf) 234 235 //重复文件检索检查,直到从所有节点检索所有上载的文件 236 //或者直到超时。 237 var globalStore mock.GlobalStorer 238 if *useMockStore { 239 globalStore = mockmem.NewGlobalStore() 240 } 241 REPEAT: 242 for { 243 for _, id := range nodeIDs { 244 //对于每个预期的块,检查它是否在本地存储区中 245 localChunks := conf.idToChunksMap[id] 246 for _, ch := range localChunks { 247 //通过索引数组中的索引获取实际块 248 chunk := conf.hashes[ch] 249 log.Trace(fmt.Sprintf("node has chunk: %s:", chunk)) 250 //检查本地存储区中是否确实存在预期的块。 251 var err error 252 if *useMockStore { 253 //如果应使用MockStore,请使用GlobalStore;在这种情况下, 254 //完整的本地存储堆栈将被绕过以获取块。 255 _, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk) 256 } else { 257 //使用实际的本地存储 258 item, ok := sim.NodeItem(id, bucketKeyStore) 259 if !ok { 260 return fmt.Errorf("Error accessing localstore") 261 } 262 lstore := item.(*storage.LocalStore) 263 _, err = lstore.Get(ctx, chunk) 264 } 265 if err != nil { 266 log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id)) 267 //不要因为记录警告信息而发疯 268 time.Sleep(500 * time.Millisecond) 269 continue REPEAT 270 } 271 evt := &simulations.Event{ 272 Type: EventTypeChunkArrived, 273 Node: sim.Net.GetNode(id), 274 Data: chunk.String(), 275 } 276 sim.Net.Events().Send(evt) 277 log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id)) 278 } 279 } 280 return nil 281 } 282 }) 283 } 284 285 //将区块键映射到负责的地址 286 func mapKeysToNodes(conf *synctestConfig) { 287 nodemap := make(map[string][]int) 288 //为大块散列构建一个容器 289 np := pot.NewPot(nil, 0) 290 indexmap := make(map[string]int) 291 for i, a := range conf.addrs { 292 indexmap[string(a)] = i 293 np, _, _ = pot.Add(np, a, pof) 294 } 295 296 ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, conf.addrs) 297 298 //对于每个地址,在chunk hashes pot上运行eachneighbour以标识最近的节点 299 log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes)) 300 for i := 0; i < len(conf.hashes); i++ { 301 var a []byte 302 np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool { 303 //取第一个地址 304 a = val.([]byte) 305 return false 306 }) 307 308 nns := ppmap[common.Bytes2Hex(a)].NNSet 309 nns = append(nns, a) 310 311 for _, p := range nns { 312 nodemap[string(p)] = append(nodemap[string(p)], i) 313 } 314 } 315 for addr, chunks := range nodemap { 316 //这将选择希望在给定节点中找到的块 317 conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks 318 } 319 log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap)) 320 } 321 322 //将文件(块)上载到单个本地节点存储区 323 func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) { 324 log.Debug(fmt.Sprintf("Uploading to node id: %s", id)) 325 fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams()) 326 size := chunkSize 327 var rootAddrs []storage.Address 328 for i := 0; i < chunkCount; i++ { 329 rk, wait, err := fileStore.Store(context.TODO(), testutil.RandomReader(i, size), int64(size), false) 330 if err != nil { 331 return nil, err 332 } 333 err = wait(context.TODO()) 334 if err != nil { 335 return nil, err 336 } 337 rootAddrs = append(rootAddrs, (rk)) 338 } 339 340 return rootAddrs, nil 341 } 342