github.com/yinchengtsinghua/golang-Eos-dpos-Ethereum@v0.0.0-20190121132951-92cc4225ed8e/swarm/network/stream/snapshot_sync_test.go (about)

     1  
     2  //此源码被清华学神尹成大魔王专业翻译分析并修改
     3  //尹成QQ77025077
     4  //尹成微信18510341407
     5  //尹成所在QQ群721929980
     6  //尹成邮箱 yinc13@mails.tsinghua.edu.cn
     7  //尹成毕业于清华大学,微软区块链领域全球最有价值专家
     8  //https://mvp.microsoft.com/zh-cn/PublicProfile/4033620
     9  //
    10  //
    11  //
    12  //
    13  //
    14  //
    15  //
    16  //
    17  //
    18  //
    19  //
    20  //
    21  //
    22  //
    23  //
    24  package stream
    25  
    26  import (
    27  	"context"
    28  	crand "crypto/rand"
    29  	"fmt"
    30  	"io"
    31  	"os"
    32  	"sync"
    33  	"testing"
    34  	"time"
    35  
    36  	"github.com/ethereum/go-ethereum/common"
    37  	"github.com/ethereum/go-ethereum/log"
    38  	"github.com/ethereum/go-ethereum/node"
    39  	"github.com/ethereum/go-ethereum/p2p"
    40  	"github.com/ethereum/go-ethereum/p2p/discover"
    41  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    42  	"github.com/ethereum/go-ethereum/swarm/network"
    43  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    44  	"github.com/ethereum/go-ethereum/swarm/pot"
    45  	"github.com/ethereum/go-ethereum/swarm/state"
    46  	"github.com/ethereum/go-ethereum/swarm/storage"
    47  	mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
    48  )
    49  
    50  const testMinProxBinSize = 2
    51  const MaxTimeout = 600
    52  
    53  type synctestConfig struct {
    54  	addrs            [][]byte
    55  	hashes           []storage.Address
    56  	idToChunksMap    map[discover.NodeID][]int
    57  	chunksToNodesMap map[string][]int
    58  	addrToIDMap      map[string]discover.NodeID
    59  }
    60  
    61  //
    62  //
    63  //
    64  //
    65  //
    66  //
    67  //
    68  func TestSyncingViaGlobalSync(t *testing.T) {
    69  //
    70  //
    71  	if *nodes != 0 && *chunks != 0 {
    72  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
    73  		testSyncingViaGlobalSync(t, *chunks, *nodes)
    74  	} else {
    75  		var nodeCnt []int
    76  		var chnkCnt []int
    77  //
    78  //
    79  		if *longrunning {
    80  			chnkCnt = []int{1, 8, 32, 256, 1024}
    81  			nodeCnt = []int{16, 32, 64, 128, 256}
    82  		} else {
    83  //
    84  			chnkCnt = []int{4, 32}
    85  			nodeCnt = []int{32, 16}
    86  		}
    87  		for _, chnk := range chnkCnt {
    88  			for _, n := range nodeCnt {
    89  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
    90  				testSyncingViaGlobalSync(t, chnk, n)
    91  			}
    92  		}
    93  	}
    94  }
    95  
    96  func TestSyncingViaDirectSubscribe(t *testing.T) {
    97  //
    98  //
    99  	if *nodes != 0 && *chunks != 0 {
   100  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
   101  		err := testSyncingViaDirectSubscribe(*chunks, *nodes)
   102  		if err != nil {
   103  			t.Fatal(err)
   104  		}
   105  	} else {
   106  		var nodeCnt []int
   107  		var chnkCnt []int
   108  //
   109  //
   110  		if *longrunning {
   111  			chnkCnt = []int{1, 8, 32, 256, 1024}
   112  			nodeCnt = []int{32, 16}
   113  		} else {
   114  //
   115  			chnkCnt = []int{4, 32}
   116  			nodeCnt = []int{32, 16}
   117  		}
   118  		for _, chnk := range chnkCnt {
   119  			for _, n := range nodeCnt {
   120  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
   121  				err := testSyncingViaDirectSubscribe(chnk, n)
   122  				if err != nil {
   123  					t.Fatal(err)
   124  				}
   125  			}
   126  		}
   127  	}
   128  }
   129  
   130  func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
   131  	sim := simulation.New(map[string]simulation.ServiceFunc{
   132  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   133  
   134  			id := ctx.Config.ID
   135  			addr := network.NewAddrFromNodeID(id)
   136  			store, datadir, err := createTestLocalStorageForID(id, addr)
   137  			if err != nil {
   138  				return nil, nil, err
   139  			}
   140  			bucket.Store(bucketKeyStore, store)
   141  			cleanup = func() {
   142  				os.RemoveAll(datadir)
   143  				store.Close()
   144  			}
   145  			localStore := store.(*storage.LocalStore)
   146  			db := storage.NewDBAPI(localStore)
   147  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   148  			delivery := NewDelivery(kad, db)
   149  
   150  			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
   151  				DoSync:          true,
   152  				SyncUpdateDelay: 3 * time.Second,
   153  			})
   154  			bucket.Store(bucketKeyRegistry, r)
   155  
   156  			return r, cleanup, nil
   157  
   158  		},
   159  	})
   160  	defer sim.Close()
   161  
   162  	log.Info("Initializing test config")
   163  
   164  	conf := &synctestConfig{}
   165  //
   166  	conf.idToChunksMap = make(map[discover.NodeID][]int)
   167  //
   168  	conf.addrToIDMap = make(map[string]discover.NodeID)
   169  //
   170  	conf.hashes = make([]storage.Address, 0)
   171  
   172  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   173  	if err != nil {
   174  		t.Fatal(err)
   175  	}
   176  
   177  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
   178  	defer cancelSimRun()
   179  
   180  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   181  		nodeIDs := sim.UpNodeIDs()
   182  		for _, n := range nodeIDs {
   183  //
   184  			a := network.ToOverlayAddr(n.Bytes())
   185  //
   186  			conf.addrs = append(conf.addrs, a)
   187  //
   188  //
   189  //
   190  			conf.addrToIDMap[string(a)] = n
   191  		}
   192  
   193  //
   194  //
   195  		node := sim.RandomUpNode()
   196  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   197  		if !ok {
   198  			return fmt.Errorf("No localstore")
   199  		}
   200  		lstore := item.(*storage.LocalStore)
   201  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   202  		if err != nil {
   203  			return err
   204  		}
   205  		conf.hashes = append(conf.hashes, hashes...)
   206  		mapKeysToNodes(conf)
   207  
   208  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   209  			return err
   210  		}
   211  
   212  //
   213  //
   214  		allSuccess := false
   215  		var gDir string
   216  		var globalStore *mockdb.GlobalStore
   217  		if *useMockStore {
   218  			gDir, globalStore, err = createGlobalStore()
   219  			if err != nil {
   220  				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
   221  			}
   222  			defer func() {
   223  				os.RemoveAll(gDir)
   224  				err := globalStore.Close()
   225  				if err != nil {
   226  					log.Error("Error closing global store! %v", "err", err)
   227  				}
   228  			}()
   229  		}
   230  		for !allSuccess {
   231  			for _, id := range nodeIDs {
   232  //
   233  				localChunks := conf.idToChunksMap[id]
   234  				localSuccess := true
   235  				for _, ch := range localChunks {
   236  //
   237  					chunk := conf.hashes[ch]
   238  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   239  //
   240  					var err error
   241  					if *useMockStore {
   242  //
   243  //
   244  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   245  					} else {
   246  //
   247  						item, ok := sim.NodeItem(id, bucketKeyStore)
   248  						if !ok {
   249  							return fmt.Errorf("Error accessing localstore")
   250  						}
   251  						lstore := item.(*storage.LocalStore)
   252  						_, err = lstore.Get(ctx, chunk)
   253  					}
   254  					if err != nil {
   255  						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   256  						localSuccess = false
   257  //
   258  						time.Sleep(500 * time.Millisecond)
   259  					} else {
   260  						log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   261  					}
   262  				}
   263  				allSuccess = localSuccess
   264  			}
   265  		}
   266  		if !allSuccess {
   267  			return fmt.Errorf("Not all chunks succeeded!")
   268  		}
   269  		return nil
   270  	})
   271  
   272  	if result.Error != nil {
   273  		t.Fatal(result.Error)
   274  	}
   275  }
   276  
   277  /*
   278  
   279  
   280  
   281  
   282  
   283  
   284  
   285  
   286  
   287  */
   288  
   289  func testSyncingViaDirectSubscribe(chunkCount int, nodeCount int) error {
   290  	sim := simulation.New(map[string]simulation.ServiceFunc{
   291  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   292  
   293  			id := ctx.Config.ID
   294  			addr := network.NewAddrFromNodeID(id)
   295  			store, datadir, err := createTestLocalStorageForID(id, addr)
   296  			if err != nil {
   297  				return nil, nil, err
   298  			}
   299  			bucket.Store(bucketKeyStore, store)
   300  			cleanup = func() {
   301  				os.RemoveAll(datadir)
   302  				store.Close()
   303  			}
   304  			localStore := store.(*storage.LocalStore)
   305  			db := storage.NewDBAPI(localStore)
   306  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   307  			delivery := NewDelivery(kad, db)
   308  
   309  			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), nil)
   310  			bucket.Store(bucketKeyRegistry, r)
   311  
   312  			fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams())
   313  			bucket.Store(bucketKeyFileStore, fileStore)
   314  
   315  			return r, cleanup, nil
   316  
   317  		},
   318  	})
   319  	defer sim.Close()
   320  
   321  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
   322  	defer cancelSimRun()
   323  
   324  	conf := &synctestConfig{}
   325  //
   326  	conf.idToChunksMap = make(map[discover.NodeID][]int)
   327  //
   328  	conf.addrToIDMap = make(map[string]discover.NodeID)
   329  //
   330  	conf.hashes = make([]storage.Address, 0)
   331  
   332  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   333  	if err != nil {
   334  		return err
   335  	}
   336  
   337  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   338  		nodeIDs := sim.UpNodeIDs()
   339  		for _, n := range nodeIDs {
   340  //
   341  			a := network.ToOverlayAddr(n.Bytes())
   342  //
   343  			conf.addrs = append(conf.addrs, a)
   344  //
   345  //
   346  //
   347  			conf.addrToIDMap[string(a)] = n
   348  		}
   349  
   350  		var subscriptionCount int
   351  
   352  		filter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(4)
   353  		eventC := sim.PeerEvents(ctx, nodeIDs, filter)
   354  
   355  		for j, node := range nodeIDs {
   356  			log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j))
   357  //
   358  			item, ok := sim.NodeItem(node, bucketKeyRegistry)
   359  			if !ok {
   360  				return fmt.Errorf("No registry")
   361  			}
   362  			registry := item.(*Registry)
   363  
   364  			var cnt int
   365  			cnt, err = startSyncing(registry, conf)
   366  			if err != nil {
   367  				return err
   368  			}
   369  //
   370  //
   371  			subscriptionCount += cnt
   372  		}
   373  
   374  		for e := range eventC {
   375  			if e.Error != nil {
   376  				return e.Error
   377  			}
   378  			subscriptionCount--
   379  			if subscriptionCount == 0 {
   380  				break
   381  			}
   382  		}
   383  //
   384  		node := sim.RandomUpNode()
   385  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   386  		if !ok {
   387  			return fmt.Errorf("No localstore")
   388  		}
   389  		lstore := item.(*storage.LocalStore)
   390  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   391  		if err != nil {
   392  			return err
   393  		}
   394  		conf.hashes = append(conf.hashes, hashes...)
   395  		mapKeysToNodes(conf)
   396  
   397  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   398  			return err
   399  		}
   400  
   401  		var gDir string
   402  		var globalStore *mockdb.GlobalStore
   403  		if *useMockStore {
   404  			gDir, globalStore, err = createGlobalStore()
   405  			if err != nil {
   406  				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
   407  			}
   408  			defer os.RemoveAll(gDir)
   409  		}
   410  //
   411  //
   412  		allSuccess := false
   413  		for !allSuccess {
   414  			for _, id := range nodeIDs {
   415  //
   416  				localChunks := conf.idToChunksMap[id]
   417  				localSuccess := true
   418  				for _, ch := range localChunks {
   419  //
   420  					chunk := conf.hashes[ch]
   421  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   422  //
   423  					var err error
   424  					if *useMockStore {
   425  //
   426  //
   427  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   428  					} else {
   429  //
   430  						item, ok := sim.NodeItem(id, bucketKeyStore)
   431  						if !ok {
   432  							return fmt.Errorf("Error accessing localstore")
   433  						}
   434  						lstore := item.(*storage.LocalStore)
   435  						_, err = lstore.Get(ctx, chunk)
   436  					}
   437  					if err != nil {
   438  						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   439  						localSuccess = false
   440  //
   441  						time.Sleep(500 * time.Millisecond)
   442  					} else {
   443  						log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   444  					}
   445  				}
   446  				allSuccess = localSuccess
   447  			}
   448  		}
   449  		if !allSuccess {
   450  			return fmt.Errorf("Not all chunks succeeded!")
   451  		}
   452  		return nil
   453  	})
   454  
   455  	if result.Error != nil {
   456  		return result.Error
   457  	}
   458  
   459  	log.Info("Simulation terminated")
   460  	return nil
   461  }
   462  
   463  //
   464  //
   465  //
   466  //
   467  func startSyncing(r *Registry, conf *synctestConfig) (int, error) {
   468  	var err error
   469  
   470  	kad, ok := r.delivery.overlay.(*network.Kademlia)
   471  	if !ok {
   472  		return 0, fmt.Errorf("Not a Kademlia!")
   473  	}
   474  
   475  	subCnt := 0
   476  //
   477  	kad.EachBin(r.addr.Over(), pof, 0, func(conn network.OverlayConn, po int) bool {
   478  //
   479  		histRange := &Range{}
   480  
   481  		subCnt++
   482  		err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), histRange, Top)
   483  		if err != nil {
   484  			log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
   485  			return false
   486  		}
   487  		return true
   488  
   489  	})
   490  	return subCnt, nil
   491  }
   492  
   493  //
   494  func mapKeysToNodes(conf *synctestConfig) {
   495  	kmap := make(map[string][]int)
   496  	nodemap := make(map[string][]int)
   497  //
   498  	np := pot.NewPot(nil, 0)
   499  	indexmap := make(map[string]int)
   500  	for i, a := range conf.addrs {
   501  		indexmap[string(a)] = i
   502  		np, _, _ = pot.Add(np, a, pof)
   503  	}
   504  //
   505  	log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
   506  	for i := 0; i < len(conf.hashes); i++ {
   507  pl := 256 //
   508  		var nns []int
   509  		np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool {
   510  			a := val.([]byte)
   511  			if pl < 256 && pl != po {
   512  				return false
   513  			}
   514  			if pl == 256 || pl == po {
   515  				log.Trace(fmt.Sprintf("appending %s", conf.addrToIDMap[string(a)]))
   516  				nns = append(nns, indexmap[string(a)])
   517  				nodemap[string(a)] = append(nodemap[string(a)], i)
   518  			}
   519  			if pl == 256 && len(nns) >= testMinProxBinSize {
   520  //
   521  //
   522  				pl = po
   523  			}
   524  			return true
   525  		})
   526  		kmap[string(conf.hashes[i])] = nns
   527  	}
   528  	for addr, chunks := range nodemap {
   529  //
   530  		conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks
   531  	}
   532  	log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
   533  	conf.chunksToNodesMap = kmap
   534  }
   535  
   536  //
   537  func uploadFileToSingleNodeStore(id discover.NodeID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) {
   538  	log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
   539  	fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
   540  	size := chunkSize
   541  	var rootAddrs []storage.Address
   542  	for i := 0; i < chunkCount; i++ {
   543  		rk, wait, err := fileStore.Store(context.TODO(), io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   544  		if err != nil {
   545  			return nil, err
   546  		}
   547  		err = wait(context.TODO())
   548  		if err != nil {
   549  			return nil, err
   550  		}
   551  		rootAddrs = append(rootAddrs, (rk))
   552  	}
   553  
   554  	return rootAddrs, nil
   555  }