github.com/linapex/ethereum-dpos-chinese@v0.0.0-20190316121959-b78b3a4a1ece/swarm/network/stream/snapshot_sync_test.go (about)

     1  
     2  //<developer>
     3  //    <name>linapex 曹一峰</name>
     4  //    <email>linapex@163.com</email>
     5  //    <wx>superexc</wx>
     6  //    <qqgroup>128148617</qqgroup>
     7  //    <url>https://jsq.ink</url>
     8  //    <role>pku engineer</role>
     9  //    <date>2019-03-16 12:09:48</date>
    10  //</624342675956436992>
    11  
    12  //
    13  //
    14  //
    15  //
    16  //
    17  //
    18  //
    19  //
    20  //
    21  //
    22  //
    23  //
    24  //
    25  //
    26  //
    27  package stream
    28  
    29  import (
    30  	"context"
    31  	crand "crypto/rand"
    32  	"fmt"
    33  	"io"
    34  	"os"
    35  	"sync"
    36  	"testing"
    37  	"time"
    38  
    39  	"github.com/ethereum/go-ethereum/common"
    40  	"github.com/ethereum/go-ethereum/log"
    41  	"github.com/ethereum/go-ethereum/node"
    42  	"github.com/ethereum/go-ethereum/p2p"
    43  	"github.com/ethereum/go-ethereum/p2p/discover"
    44  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    45  	"github.com/ethereum/go-ethereum/swarm/network"
    46  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    47  	"github.com/ethereum/go-ethereum/swarm/pot"
    48  	"github.com/ethereum/go-ethereum/swarm/state"
    49  	"github.com/ethereum/go-ethereum/swarm/storage"
    50  	mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
    51  )
    52  
    53  const testMinProxBinSize = 2
    54  const MaxTimeout = 600
    55  
    56  type synctestConfig struct {
    57  	addrs            [][]byte
    58  	hashes           []storage.Address
    59  	idToChunksMap    map[discover.NodeID][]int
    60  	chunksToNodesMap map[string][]int
    61  	addrToIDMap      map[string]discover.NodeID
    62  }
    63  
    64  //
    65  //
    66  //
    67  //
    68  //
    69  //
    70  //
    71  func TestSyncingViaGlobalSync(t *testing.T) {
    72  //
    73  //
    74  	if *nodes != 0 && *chunks != 0 {
    75  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
    76  		testSyncingViaGlobalSync(t, *chunks, *nodes)
    77  	} else {
    78  		var nodeCnt []int
    79  		var chnkCnt []int
    80  //
    81  //
    82  		if *longrunning {
    83  			chnkCnt = []int{1, 8, 32, 256, 1024}
    84  			nodeCnt = []int{16, 32, 64, 128, 256}
    85  		} else {
    86  //
    87  			chnkCnt = []int{4, 32}
    88  			nodeCnt = []int{32, 16}
    89  		}
    90  		for _, chnk := range chnkCnt {
    91  			for _, n := range nodeCnt {
    92  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
    93  				testSyncingViaGlobalSync(t, chnk, n)
    94  			}
    95  		}
    96  	}
    97  }
    98  
    99  func TestSyncingViaDirectSubscribe(t *testing.T) {
   100  //
   101  //
   102  	if *nodes != 0 && *chunks != 0 {
   103  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
   104  		err := testSyncingViaDirectSubscribe(*chunks, *nodes)
   105  		if err != nil {
   106  			t.Fatal(err)
   107  		}
   108  	} else {
   109  		var nodeCnt []int
   110  		var chnkCnt []int
   111  //
   112  //
   113  		if *longrunning {
   114  			chnkCnt = []int{1, 8, 32, 256, 1024}
   115  			nodeCnt = []int{32, 16}
   116  		} else {
   117  //
   118  			chnkCnt = []int{4, 32}
   119  			nodeCnt = []int{32, 16}
   120  		}
   121  		for _, chnk := range chnkCnt {
   122  			for _, n := range nodeCnt {
   123  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
   124  				err := testSyncingViaDirectSubscribe(chnk, n)
   125  				if err != nil {
   126  					t.Fatal(err)
   127  				}
   128  			}
   129  		}
   130  	}
   131  }
   132  
   133  func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
   134  	sim := simulation.New(map[string]simulation.ServiceFunc{
   135  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   136  
   137  			id := ctx.Config.ID
   138  			addr := network.NewAddrFromNodeID(id)
   139  			store, datadir, err := createTestLocalStorageForID(id, addr)
   140  			if err != nil {
   141  				return nil, nil, err
   142  			}
   143  			bucket.Store(bucketKeyStore, store)
   144  			cleanup = func() {
   145  				os.RemoveAll(datadir)
   146  				store.Close()
   147  			}
   148  			localStore := store.(*storage.LocalStore)
   149  			db := storage.NewDBAPI(localStore)
   150  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   151  			delivery := NewDelivery(kad, db)
   152  
   153  			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
   154  				DoSync:          true,
   155  				SyncUpdateDelay: 3 * time.Second,
   156  			})
   157  			bucket.Store(bucketKeyRegistry, r)
   158  
   159  			return r, cleanup, nil
   160  
   161  		},
   162  	})
   163  	defer sim.Close()
   164  
   165  	log.Info("Initializing test config")
   166  
   167  	conf := &synctestConfig{}
   168  //
   169  	conf.idToChunksMap = make(map[discover.NodeID][]int)
   170  //
   171  	conf.addrToIDMap = make(map[string]discover.NodeID)
   172  //
   173  	conf.hashes = make([]storage.Address, 0)
   174  
   175  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   176  	if err != nil {
   177  		t.Fatal(err)
   178  	}
   179  
   180  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
   181  	defer cancelSimRun()
   182  
   183  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   184  		nodeIDs := sim.UpNodeIDs()
   185  		for _, n := range nodeIDs {
   186  //
   187  			a := network.ToOverlayAddr(n.Bytes())
   188  //
   189  			conf.addrs = append(conf.addrs, a)
   190  //
   191  //
   192  //
   193  			conf.addrToIDMap[string(a)] = n
   194  		}
   195  
   196  //
   197  //
   198  		node := sim.RandomUpNode()
   199  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   200  		if !ok {
   201  			return fmt.Errorf("No localstore")
   202  		}
   203  		lstore := item.(*storage.LocalStore)
   204  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   205  		if err != nil {
   206  			return err
   207  		}
   208  		conf.hashes = append(conf.hashes, hashes...)
   209  		mapKeysToNodes(conf)
   210  
   211  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   212  			return err
   213  		}
   214  
   215  //
   216  //
   217  		allSuccess := false
   218  		var gDir string
   219  		var globalStore *mockdb.GlobalStore
   220  		if *useMockStore {
   221  			gDir, globalStore, err = createGlobalStore()
   222  			if err != nil {
   223  				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
   224  			}
   225  			defer func() {
   226  				os.RemoveAll(gDir)
   227  				err := globalStore.Close()
   228  				if err != nil {
   229  					log.Error("Error closing global store! %v", "err", err)
   230  				}
   231  			}()
   232  		}
   233  		for !allSuccess {
   234  			for _, id := range nodeIDs {
   235  //
   236  				localChunks := conf.idToChunksMap[id]
   237  				localSuccess := true
   238  				for _, ch := range localChunks {
   239  //
   240  					chunk := conf.hashes[ch]
   241  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   242  //
   243  					var err error
   244  					if *useMockStore {
   245  //
   246  //
   247  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   248  					} else {
   249  //
   250  						item, ok := sim.NodeItem(id, bucketKeyStore)
   251  						if !ok {
   252  							return fmt.Errorf("Error accessing localstore")
   253  						}
   254  						lstore := item.(*storage.LocalStore)
   255  						_, err = lstore.Get(ctx, chunk)
   256  					}
   257  					if err != nil {
   258  						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   259  						localSuccess = false
   260  //
   261  						time.Sleep(500 * time.Millisecond)
   262  					} else {
   263  						log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   264  					}
   265  				}
   266  				allSuccess = localSuccess
   267  			}
   268  		}
   269  		if !allSuccess {
   270  			return fmt.Errorf("Not all chunks succeeded!")
   271  		}
   272  		return nil
   273  	})
   274  
   275  	if result.Error != nil {
   276  		t.Fatal(result.Error)
   277  	}
   278  }
   279  
   280  /*
   281  
   282  
   283  
   284  
   285  
   286  
   287  
   288  
   289  
   290  */
   291  
   292  func testSyncingViaDirectSubscribe(chunkCount int, nodeCount int) error {
   293  	sim := simulation.New(map[string]simulation.ServiceFunc{
   294  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   295  
   296  			id := ctx.Config.ID
   297  			addr := network.NewAddrFromNodeID(id)
   298  			store, datadir, err := createTestLocalStorageForID(id, addr)
   299  			if err != nil {
   300  				return nil, nil, err
   301  			}
   302  			bucket.Store(bucketKeyStore, store)
   303  			cleanup = func() {
   304  				os.RemoveAll(datadir)
   305  				store.Close()
   306  			}
   307  			localStore := store.(*storage.LocalStore)
   308  			db := storage.NewDBAPI(localStore)
   309  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   310  			delivery := NewDelivery(kad, db)
   311  
   312  			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), nil)
   313  			bucket.Store(bucketKeyRegistry, r)
   314  
   315  			fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams())
   316  			bucket.Store(bucketKeyFileStore, fileStore)
   317  
   318  			return r, cleanup, nil
   319  
   320  		},
   321  	})
   322  	defer sim.Close()
   323  
   324  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
   325  	defer cancelSimRun()
   326  
   327  	conf := &synctestConfig{}
   328  //
   329  	conf.idToChunksMap = make(map[discover.NodeID][]int)
   330  //
   331  	conf.addrToIDMap = make(map[string]discover.NodeID)
   332  //
   333  	conf.hashes = make([]storage.Address, 0)
   334  
   335  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   336  	if err != nil {
   337  		return err
   338  	}
   339  
   340  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   341  		nodeIDs := sim.UpNodeIDs()
   342  		for _, n := range nodeIDs {
   343  //
   344  			a := network.ToOverlayAddr(n.Bytes())
   345  //
   346  			conf.addrs = append(conf.addrs, a)
   347  //
   348  //
   349  //
   350  			conf.addrToIDMap[string(a)] = n
   351  		}
   352  
   353  		var subscriptionCount int
   354  
   355  		filter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(4)
   356  		eventC := sim.PeerEvents(ctx, nodeIDs, filter)
   357  
   358  		for j, node := range nodeIDs {
   359  			log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j))
   360  //
   361  			item, ok := sim.NodeItem(node, bucketKeyRegistry)
   362  			if !ok {
   363  				return fmt.Errorf("No registry")
   364  			}
   365  			registry := item.(*Registry)
   366  
   367  			var cnt int
   368  			cnt, err = startSyncing(registry, conf)
   369  			if err != nil {
   370  				return err
   371  			}
   372  //
   373  //
   374  			subscriptionCount += cnt
   375  		}
   376  
   377  		for e := range eventC {
   378  			if e.Error != nil {
   379  				return e.Error
   380  			}
   381  			subscriptionCount--
   382  			if subscriptionCount == 0 {
   383  				break
   384  			}
   385  		}
   386  //
   387  		node := sim.RandomUpNode()
   388  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   389  		if !ok {
   390  			return fmt.Errorf("No localstore")
   391  		}
   392  		lstore := item.(*storage.LocalStore)
   393  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   394  		if err != nil {
   395  			return err
   396  		}
   397  		conf.hashes = append(conf.hashes, hashes...)
   398  		mapKeysToNodes(conf)
   399  
   400  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   401  			return err
   402  		}
   403  
   404  		var gDir string
   405  		var globalStore *mockdb.GlobalStore
   406  		if *useMockStore {
   407  			gDir, globalStore, err = createGlobalStore()
   408  			if err != nil {
   409  				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
   410  			}
   411  			defer os.RemoveAll(gDir)
   412  		}
   413  //
   414  //
   415  		allSuccess := false
   416  		for !allSuccess {
   417  			for _, id := range nodeIDs {
   418  //
   419  				localChunks := conf.idToChunksMap[id]
   420  				localSuccess := true
   421  				for _, ch := range localChunks {
   422  //
   423  					chunk := conf.hashes[ch]
   424  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   425  //
   426  					var err error
   427  					if *useMockStore {
   428  //
   429  //
   430  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   431  					} else {
   432  //
   433  						item, ok := sim.NodeItem(id, bucketKeyStore)
   434  						if !ok {
   435  							return fmt.Errorf("Error accessing localstore")
   436  						}
   437  						lstore := item.(*storage.LocalStore)
   438  						_, err = lstore.Get(ctx, chunk)
   439  					}
   440  					if err != nil {
   441  						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   442  						localSuccess = false
   443  //
   444  						time.Sleep(500 * time.Millisecond)
   445  					} else {
   446  						log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   447  					}
   448  				}
   449  				allSuccess = localSuccess
   450  			}
   451  		}
   452  		if !allSuccess {
   453  			return fmt.Errorf("Not all chunks succeeded!")
   454  		}
   455  		return nil
   456  	})
   457  
   458  	if result.Error != nil {
   459  		return result.Error
   460  	}
   461  
   462  	log.Info("Simulation terminated")
   463  	return nil
   464  }
   465  
   466  //
   467  //
   468  //
   469  //
   470  func startSyncing(r *Registry, conf *synctestConfig) (int, error) {
   471  	var err error
   472  
   473  	kad, ok := r.delivery.overlay.(*network.Kademlia)
   474  	if !ok {
   475  		return 0, fmt.Errorf("Not a Kademlia!")
   476  	}
   477  
   478  	subCnt := 0
   479  //
   480  	kad.EachBin(r.addr.Over(), pof, 0, func(conn network.OverlayConn, po int) bool {
   481  //
   482  		histRange := &Range{}
   483  
   484  		subCnt++
   485  		err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), histRange, Top)
   486  		if err != nil {
   487  			log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
   488  			return false
   489  		}
   490  		return true
   491  
   492  	})
   493  	return subCnt, nil
   494  }
   495  
   496  //
   497  func mapKeysToNodes(conf *synctestConfig) {
   498  	kmap := make(map[string][]int)
   499  	nodemap := make(map[string][]int)
   500  //
   501  	np := pot.NewPot(nil, 0)
   502  	indexmap := make(map[string]int)
   503  	for i, a := range conf.addrs {
   504  		indexmap[string(a)] = i
   505  		np, _, _ = pot.Add(np, a, pof)
   506  	}
   507  //
   508  	log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
   509  	for i := 0; i < len(conf.hashes); i++ {
   510  pl := 256 //
   511  		var nns []int
   512  		np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool {
   513  			a := val.([]byte)
   514  			if pl < 256 && pl != po {
   515  				return false
   516  			}
   517  			if pl == 256 || pl == po {
   518  				log.Trace(fmt.Sprintf("appending %s", conf.addrToIDMap[string(a)]))
   519  				nns = append(nns, indexmap[string(a)])
   520  				nodemap[string(a)] = append(nodemap[string(a)], i)
   521  			}
   522  			if pl == 256 && len(nns) >= testMinProxBinSize {
   523  //
   524  //
   525  				pl = po
   526  			}
   527  			return true
   528  		})
   529  		kmap[string(conf.hashes[i])] = nns
   530  	}
   531  	for addr, chunks := range nodemap {
   532  //
   533  		conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks
   534  	}
   535  	log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
   536  	conf.chunksToNodesMap = kmap
   537  }
   538  
   539  //
   540  func uploadFileToSingleNodeStore(id discover.NodeID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) {
   541  	log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
   542  	fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
   543  	size := chunkSize
   544  	var rootAddrs []storage.Address
   545  	for i := 0; i < chunkCount; i++ {
   546  		rk, wait, err := fileStore.Store(context.TODO(), io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   547  		if err != nil {
   548  			return nil, err
   549  		}
   550  		err = wait(context.TODO())
   551  		if err != nil {
   552  			return nil, err
   553  		}
   554  		rootAddrs = append(rootAddrs, (rk))
   555  	}
   556  
   557  	return rootAddrs, nil
   558  }
   559