github.com/aaa256/atlantis@v0.0.0-20210707112435-42ee889287a2/swarm/network/stream/snapshot_sync_test.go (about)

     1  // Copyright 2018 The go-athereum Authors
     2  // This file is part of the go-athereum library.
     3  //
     4  // The go-athereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-athereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-athereum library. If not, see <http://www.gnu.org/licenses/>.
    16  package stream
    17  
    18  import (
    19  	"context"
    20  	crand "crypto/rand"
    21  	"encoding/json"
    22  	"flag"
    23  	"fmt"
    24  	"io"
    25  	"io/ioutil"
    26  	"math/rand"
    27  	"os"
    28  	"sync"
    29  	"testing"
    30  	"time"
    31  
    32  	"github.com/athereum/go-athereum/common"
    33  	"github.com/athereum/go-athereum/log"
    34  	"github.com/athereum/go-athereum/p2p"
    35  	"github.com/athereum/go-athereum/p2p/discover"
    36  	"github.com/athereum/go-athereum/p2p/simulations"
    37  	"github.com/athereum/go-athereum/p2p/simulations/adapters"
    38  	"github.com/athereum/go-athereum/rpc"
    39  	"github.com/athereum/go-athereum/swarm/network"
    40  	streamTesting "github.com/athereum/go-athereum/swarm/network/stream/testing"
    41  	"github.com/athereum/go-athereum/swarm/pot"
    42  	"github.com/athereum/go-athereum/swarm/storage"
    43  )
    44  
    45  const testMinProxBinSize = 2
    46  const MaxTimeout = 600
    47  
    48  var (
    49  	pof = pot.DefaultPof(256)
    50  
    51  	conf     *synctestConfig
    52  	ids      []discover.NodeID
    53  	datadirs map[discover.NodeID]string
    54  	ppmap    map[string]*network.PeerPot
    55  
    56  	live    bool
    57  	history bool
    58  
    59  	longrunning = flag.Bool("longrunning", false, "do run long-running tests")
    60  )
    61  
    62  type synctestConfig struct {
    63  	addrs            [][]byte
    64  	hashes           []storage.Address
    65  	idToChunksMap    map[discover.NodeID][]int
    66  	chunksToNodesMap map[string][]int
    67  	addrToIdMap      map[string]discover.NodeID
    68  }
    69  
    70  func init() {
    71  	rand.Seed(time.Now().Unix())
    72  }
    73  
    74  //common_test needs to initialize the test in a init() func
    75  //in order for adapters to register the NewStreamerService;
    76  //this service is dependent on some global variables
    77  //we thus need to initialize first as init() as well.
    78  func initSyncTest() {
    79  	//assign the toAddr func so NewStreamerService can build the addr
    80  	toAddr = func(id discover.NodeID) *network.BzzAddr {
    81  		addr := network.NewAddrFromNodeID(id)
    82  		return addr
    83  	}
    84  	//global func to create local store
    85  	if *useMockStore {
    86  		createStoreFunc = createMockStore
    87  	} else {
    88  		createStoreFunc = createTestLocalStorageForId
    89  	}
    90  	//local stores
    91  	stores = make(map[discover.NodeID]storage.ChunkStore)
    92  	//data directories for each node and store
    93  	datadirs = make(map[discover.NodeID]string)
    94  	//deliveries for each node
    95  	deliveries = make(map[discover.NodeID]*Delivery)
    96  	//registries, map of discover.NodeID to its streamer
    97  	registries = make(map[discover.NodeID]*TestRegistry)
    98  	//not needed for this test but required from common_test for NewStreamService
    99  	waitPeerErrC = make(chan error)
   100  	//also not needed for this test but required for NewStreamService
   101  	peerCount = func(id discover.NodeID) int {
   102  		if ids[0] == id || ids[len(ids)-1] == id {
   103  			return 1
   104  		}
   105  		return 2
   106  	}
   107  	if *useMockStore {
   108  		createGlobalStore()
   109  	}
   110  }
   111  
   112  //This test is a syncing test for nodes.
   113  //One node is randomly selected to be the pivot node.
   114  //A configurable number of chunks and nodes can be
   115  //provided to the test, the number of chunks is uploaded
   116  //to the pivot node, and we check that nodes get the chunks
   117  //they are expected to store based on the syncing protocol.
   118  //Number of chunks and nodes can be provided via commandline too.
   119  func TestSyncing(t *testing.T) {
   120  	//if nodes/chunks have been provided via commandline,
   121  	//run the tests with these values
   122  	if *nodes != 0 && *chunks != 0 {
   123  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
   124  		testSyncing(t, *chunks, *nodes)
   125  	} else {
   126  		var nodeCnt []int
   127  		var chnkCnt []int
   128  		//if the `longrunning` flag has been provided
   129  		//run more test combinations
   130  		if *longrunning {
   131  			chnkCnt = []int{1, 8, 32, 256, 1024}
   132  			nodeCnt = []int{16, 32, 64, 128, 256}
   133  		} else {
   134  			//default test
   135  			chnkCnt = []int{4, 32}
   136  			nodeCnt = []int{32, 16}
   137  		}
   138  		for _, chnk := range chnkCnt {
   139  			for _, n := range nodeCnt {
   140  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
   141  				testSyncing(t, chnk, n)
   142  			}
   143  		}
   144  	}
   145  }
   146  
   147  //Do run the tests
   148  //Every test runs 3 times, a live, a history, and a live AND history
   149  func testSyncing(t *testing.T, chunkCount int, nodeCount int) {
   150  	//test live and NO history
   151  	log.Info("Testing live and no history")
   152  	live = true
   153  	history = false
   154  	err := runSyncTest(chunkCount, nodeCount, live, history)
   155  	if err != nil {
   156  		t.Fatal(err)
   157  	}
   158  	//test history only
   159  	log.Info("Testing history only")
   160  	live = false
   161  	history = true
   162  	err = runSyncTest(chunkCount, nodeCount, live, history)
   163  	if err != nil {
   164  		t.Fatal(err)
   165  	}
   166  	//finally test live and history
   167  	log.Info("Testing live and history")
   168  	live = true
   169  	err = runSyncTest(chunkCount, nodeCount, live, history)
   170  	if err != nil {
   171  		t.Fatal(err)
   172  	}
   173  }
   174  
   175  /*
   176  The test generates the given number of chunks
   177  
   178  The upload is done by dependency to the global
   179  `live` and `history` variables;
   180  
   181  If `live` is set, first stream subscriptions are established, then
   182  upload to a random node.
   183  
   184  If `history` is enabled, first upload then build up subscriptions.
   185  
   186  For every chunk generated, the nearest node addresses
   187  are identified, we verify that the nodes closer to the
   188  chunk addresses actually do have the chunks in their local stores.
   189  
   190  The test loads a snapshot file to construct the swarm network,
   191  assuming that the snapshot file identifies a healthy
   192  kademlia network. The snapshot should have 'streamer' in its service list.
   193  
   194  For every test run, a series of three tests will be executed:
   195  - a LIVE test first, where first subscriptions are established,
   196    then a file (random chunks) is uploaded
   197  - a HISTORY test, where the file is uploaded first, and then
   198    the subscriptions are established
   199  - a crude LIVE AND HISTORY test last, where (different) chunks
   200    are uploaded twice, once before and once after subscriptions
   201  */
   202  func runSyncTest(chunkCount int, nodeCount int, live bool, history bool) error {
   203  	initSyncTest()
   204  	//the ids of the snapshot nodes, initiate only now as we need nodeCount
   205  	ids = make([]discover.NodeID, nodeCount)
   206  	//initialize the test struct
   207  	conf = &synctestConfig{}
   208  	//map of discover ID to indexes of chunks expected at that ID
   209  	conf.idToChunksMap = make(map[discover.NodeID][]int)
   210  	//map of overlay address to discover ID
   211  	conf.addrToIdMap = make(map[string]discover.NodeID)
   212  	//array where the generated chunk hashes will be stored
   213  	conf.hashes = make([]storage.Address, 0)
   214  	//channel to trigger node checks in the simulation
   215  	trigger := make(chan discover.NodeID)
   216  	//channel to check for disconnection errors
   217  	disconnectC := make(chan error)
   218  	//channel to close disconnection watcher routine
   219  	quitC := make(chan struct{})
   220  
   221  	//load nodes from the snapshot file
   222  	net, err := initNetWithSnapshot(nodeCount)
   223  	if err != nil {
   224  		return err
   225  	}
   226  	var rpcSubscriptionsWg sync.WaitGroup
   227  	//do cleanup after test is terminated
   228  	defer func() {
   229  		// close quitC channel to signall all goroutines to clanup
   230  		// before calling simulation network shutdown.
   231  		close(quitC)
   232  		//wait for all rpc subscriptions to unsubscribe
   233  		rpcSubscriptionsWg.Wait()
   234  		//shutdown the snapshot network
   235  		net.Shutdown()
   236  		//after the test, clean up local stores initialized with createLocalStoreForId
   237  		localStoreCleanup()
   238  		//finally clear all data directories
   239  		datadirsCleanup()
   240  	}()
   241  	//get the nodes of the network
   242  	nodes := net.GetNodes()
   243  	//select one index at random...
   244  	idx := rand.Intn(len(nodes))
   245  	//...and get the the node at that index
   246  	//this is the node selected for upload
   247  	node := nodes[idx]
   248  
   249  	log.Info("Initializing test config")
   250  	//iterate over all nodes...
   251  	for c := 0; c < len(nodes); c++ {
   252  		//create an array of discovery node IDs
   253  		ids[c] = nodes[c].ID()
   254  		//get the kademlia overlay address from this ID
   255  		a := network.ToOverlayAddr(ids[c].Bytes())
   256  		//append it to the array of all overlay addresses
   257  		conf.addrs = append(conf.addrs, a)
   258  		//the proximity calculation is on overlay addr,
   259  		//the p2p/simulations check func triggers on discover.NodeID,
   260  		//so we need to know which overlay addr maps to which nodeID
   261  		conf.addrToIdMap[string(a)] = ids[c]
   262  	}
   263  	log.Info("Test config successfully initialized")
   264  
   265  	//only needed for healthy call when debugging
   266  	ppmap = network.NewPeerPotMap(testMinProxBinSize, conf.addrs)
   267  
   268  	//define the action to be performed before the test checks: start syncing
   269  	action := func(ctx context.Context) error {
   270  		//first run the health check on all nodes,
   271  		//wait until nodes are all healthy
   272  		ticker := time.NewTicker(200 * time.Millisecond)
   273  		defer ticker.Stop()
   274  		for range ticker.C {
   275  			healthy := true
   276  			for _, id := range ids {
   277  				r := registries[id]
   278  				//PeerPot for this node
   279  				addr := common.Bytes2Hex(network.ToOverlayAddr(id.Bytes()))
   280  				pp := ppmap[addr]
   281  				//call Healthy RPC
   282  				h := r.delivery.overlay.Healthy(pp)
   283  				//print info
   284  				log.Debug(r.delivery.overlay.String())
   285  				log.Debug(fmt.Sprintf("IS HEALTHY: %t", h.GotNN && h.KnowNN && h.Full))
   286  				if !h.GotNN || !h.Full {
   287  					healthy = false
   288  					break
   289  				}
   290  			}
   291  			if healthy {
   292  				break
   293  			}
   294  		}
   295  
   296  		if history {
   297  			log.Info("Uploading for history")
   298  			//If testing only history, we upload the chunk(s) first
   299  			chunks, err := uploadFileToSingleNodeStore(node.ID(), chunkCount)
   300  			if err != nil {
   301  				return err
   302  			}
   303  			conf.hashes = append(conf.hashes, chunks...)
   304  			//finally map chunks to the closest addresses
   305  			mapKeysToNodes(conf)
   306  		}
   307  
   308  		//variables needed to wait for all subscriptions established before uploading
   309  		errc := make(chan error)
   310  
   311  		//now setup and start event watching in order to know when we can upload
   312  		ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout*time.Second)
   313  		defer watchCancel()
   314  
   315  		log.Info("Setting up stream subscription")
   316  
   317  		//We need two iterations, one to subscribe to the subscription events
   318  		//(so we know when setup phase is finished), and one to
   319  		//actually run the stream subscriptions. We can't do it in the same iteration,
   320  		//because while the first nodes in the loop are setting up subscriptions,
   321  		//the latter ones have not subscribed to listen to peer events yet,
   322  		//and then we miss events.
   323  
   324  		//first iteration: setup disconnection watcher and subscribe to peer events
   325  		for j, id := range ids {
   326  			log.Trace(fmt.Sprintf("Subscribe to subscription events: %d", j))
   327  			client, err := net.GetNode(id).Client()
   328  			if err != nil {
   329  				return err
   330  			}
   331  
   332  			wsDoneC := watchSubscriptionEvents(ctx, id, client, errc, quitC)
   333  			// doneC is nil, the error happened which is sent to errc channel, already
   334  			if wsDoneC == nil {
   335  				continue
   336  			}
   337  			rpcSubscriptionsWg.Add(1)
   338  			go func() {
   339  				<-wsDoneC
   340  				rpcSubscriptionsWg.Done()
   341  			}()
   342  
   343  			//watch for peers disconnecting
   344  			wdDoneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC)
   345  			if err != nil {
   346  				return err
   347  			}
   348  			rpcSubscriptionsWg.Add(1)
   349  			go func() {
   350  				<-wdDoneC
   351  				rpcSubscriptionsWg.Done()
   352  			}()
   353  		}
   354  
   355  		//second iteration: start syncing
   356  		for j, id := range ids {
   357  			log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j))
   358  			client, err := net.GetNode(id).Client()
   359  			if err != nil {
   360  				return err
   361  			}
   362  			//start syncing!
   363  			var cnt int
   364  			err = client.CallContext(ctx, &cnt, "stream_startSyncing")
   365  			if err != nil {
   366  				return err
   367  			}
   368  			//increment the number of subscriptions we need to wait for
   369  			//by the count returned from startSyncing (SYNC subscriptions)
   370  			subscriptionCount += cnt
   371  		}
   372  
   373  		//now wait until the number of expected subscriptions has been finished
   374  		//`watchSubscriptionEvents` will write with a `nil` value to errc
   375  		for err := range errc {
   376  			if err != nil {
   377  				return err
   378  			}
   379  			//`nil` received, decrement count
   380  			subscriptionCount--
   381  			//all subscriptions received
   382  			if subscriptionCount == 0 {
   383  				break
   384  			}
   385  		}
   386  
   387  		log.Info("Stream subscriptions successfully requested")
   388  		if live {
   389  			//now upload the chunks to the selected random single node
   390  			hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount)
   391  			if err != nil {
   392  				return err
   393  			}
   394  			conf.hashes = append(conf.hashes, hashes...)
   395  			//finally map chunks to the closest addresses
   396  			log.Debug(fmt.Sprintf("Uploaded chunks for live syncing: %v", conf.hashes))
   397  			mapKeysToNodes(conf)
   398  			log.Info(fmt.Sprintf("Uploaded %d chunks to random single node", chunkCount))
   399  		}
   400  
   401  		log.Info("Action terminated")
   402  
   403  		return nil
   404  	}
   405  
   406  	//check defines what will be checked during the test
   407  	check := func(ctx context.Context, id discover.NodeID) (bool, error) {
   408  		select {
   409  		case <-ctx.Done():
   410  			return false, ctx.Err()
   411  		case e := <-disconnectC:
   412  			log.Error(e.Error())
   413  			return false, fmt.Errorf("Disconnect event detected, network unhealthy")
   414  		default:
   415  		}
   416  		log.Trace(fmt.Sprintf("Checking node: %s", id))
   417  		//select the local store for the given node
   418  		//if there are more than one chunk, test only succeeds if all expected chunks are found
   419  		allSuccess := true
   420  
   421  		//all the chunk indexes which are supposed to be found for this node
   422  		localChunks := conf.idToChunksMap[id]
   423  		//for each expected chunk, check if it is in the local store
   424  		for _, ch := range localChunks {
   425  			//get the real chunk by the index in the index array
   426  			chunk := conf.hashes[ch]
   427  			log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   428  			//check if the expected chunk is indeed in the localstore
   429  			var err error
   430  			if *useMockStore {
   431  				if globalStore == nil {
   432  					return false, fmt.Errorf("Somathing went wrong; using mockStore enabled but globalStore is nil")
   433  				}
   434  				//use the globalStore if the mockStore should be used; in that case,
   435  				//the complete localStore stack is bypassed for getting the chunk
   436  				_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   437  			} else {
   438  				//use the actual localstore
   439  				lstore := stores[id]
   440  				_, err = lstore.Get(chunk)
   441  			}
   442  			if err != nil {
   443  				log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   444  				allSuccess = false
   445  			} else {
   446  				log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   447  			}
   448  		}
   449  
   450  		return allSuccess, nil
   451  	}
   452  
   453  	//for each tick, run the checks on all nodes
   454  	timingTicker := time.NewTicker(time.Second * 1)
   455  	defer timingTicker.Stop()
   456  	go func() {
   457  		for range timingTicker.C {
   458  			for i := 0; i < len(ids); i++ {
   459  				log.Trace(fmt.Sprintf("triggering step %d, id %s", i, ids[i]))
   460  				trigger <- ids[i]
   461  			}
   462  		}
   463  	}()
   464  
   465  	log.Info("Starting simulation run...")
   466  
   467  	timeout := MaxTimeout * time.Second
   468  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   469  	defer cancel()
   470  
   471  	//run the simulation
   472  	result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{
   473  		Action:  action,
   474  		Trigger: trigger,
   475  		Expect: &simulations.Expectation{
   476  			Nodes: ids,
   477  			Check: check,
   478  		},
   479  	})
   480  
   481  	if result.Error != nil {
   482  		return result.Error
   483  	}
   484  	log.Info("Simulation terminated")
   485  	return nil
   486  }
   487  
   488  //the server func to start syncing
   489  //issues `RequestSubscriptionMsg` to peers, based on po, by iterating over
   490  //the kademlia's `EachBin` function.
   491  //returns the number of subscriptions requested
   492  func (r *TestRegistry) StartSyncing(ctx context.Context) (int, error) {
   493  	var err error
   494  
   495  	if log.Lvl(*loglevel) == log.LvlDebug {
   496  		//PeerPot for this node
   497  		addr := common.Bytes2Hex(r.addr.OAddr)
   498  		pp := ppmap[addr]
   499  		//call Healthy RPC
   500  		h := r.delivery.overlay.Healthy(pp)
   501  		//print info
   502  		log.Debug(r.delivery.overlay.String())
   503  		log.Debug(fmt.Sprintf("IS HEALTHY: %t", h.GotNN && h.KnowNN && h.Full))
   504  	}
   505  
   506  	kad, ok := r.delivery.overlay.(*network.Kademlia)
   507  	if !ok {
   508  		return 0, fmt.Errorf("Not a Kademlia!")
   509  	}
   510  
   511  	subCnt := 0
   512  	//iterate over each bin and solicit needed subscription to bins
   513  	kad.EachBin(r.addr.Over(), pof, 0, func(conn network.OverlayConn, po int) bool {
   514  		//identify begin and start index of the bin(s) we want to subscribe to
   515  		log.Debug(fmt.Sprintf("Requesting subscription by: registry %s from peer %s for bin: %d", r.addr.ID(), conf.addrToIdMap[string(conn.Address())], po))
   516  		var histRange *Range
   517  		if history {
   518  			histRange = &Range{}
   519  		}
   520  
   521  		subCnt++
   522  		err = r.RequestSubscription(conf.addrToIdMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), live), histRange, Top)
   523  		if err != nil {
   524  			log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
   525  			return false
   526  		}
   527  		return true
   528  
   529  	})
   530  	return subCnt, nil
   531  }
   532  
   533  //map chunk keys to addresses which are responsible
   534  func mapKeysToNodes(conf *synctestConfig) {
   535  	kmap := make(map[string][]int)
   536  	nodemap := make(map[string][]int)
   537  	//build a pot for chunk hashes
   538  	np := pot.NewPot(nil, 0)
   539  	indexmap := make(map[string]int)
   540  	for i, a := range conf.addrs {
   541  		indexmap[string(a)] = i
   542  		np, _, _ = pot.Add(np, a, pof)
   543  	}
   544  	//for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes
   545  	log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
   546  	for i := 0; i < len(conf.hashes); i++ {
   547  		pl := 256 //highest possible proximity
   548  		var nns []int
   549  		np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool {
   550  			a := val.([]byte)
   551  			if pl < 256 && pl != po {
   552  				return false
   553  			}
   554  			if pl == 256 || pl == po {
   555  				log.Trace(fmt.Sprintf("appending %s", conf.addrToIdMap[string(a)]))
   556  				nns = append(nns, indexmap[string(a)])
   557  				nodemap[string(a)] = append(nodemap[string(a)], i)
   558  			}
   559  			if pl == 256 && len(nns) >= testMinProxBinSize {
   560  				//maxProxBinSize has been reached at this po, so save it
   561  				//we will add all other nodes at the same po
   562  				pl = po
   563  			}
   564  			return true
   565  		})
   566  		kmap[string(conf.hashes[i])] = nns
   567  	}
   568  	for addr, chunks := range nodemap {
   569  		//this selects which chunks are expected to be found with the given node
   570  		conf.idToChunksMap[conf.addrToIdMap[addr]] = chunks
   571  	}
   572  	log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
   573  	conf.chunksToNodesMap = kmap
   574  }
   575  
   576  //upload a file(chunks) to a single local node store
   577  func uploadFileToSingleNodeStore(id discover.NodeID, chunkCount int) ([]storage.Address, error) {
   578  	log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
   579  	lstore := stores[id]
   580  	size := chunkSize
   581  	fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
   582  	var rootAddrs []storage.Address
   583  	for i := 0; i < chunkCount; i++ {
   584  		rk, wait, err := fileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   585  		wait()
   586  		if err != nil {
   587  			return nil, err
   588  		}
   589  		rootAddrs = append(rootAddrs, (rk))
   590  	}
   591  
   592  	return rootAddrs, nil
   593  }
   594  
   595  //initialize a network from a snapshot
   596  func initNetWithSnapshot(nodeCount int) (*simulations.Network, error) {
   597  
   598  	var a adapters.NodeAdapter
   599  	//add the streamer service to the node adapter
   600  
   601  	if *adapter == "exec" {
   602  		dirname, err := ioutil.TempDir(".", "")
   603  		if err != nil {
   604  			return nil, err
   605  		}
   606  		a = adapters.NewExecAdapter(dirname)
   607  	} else if *adapter == "tcp" {
   608  		a = adapters.NewTCPAdapter(services)
   609  	} else if *adapter == "sim" {
   610  		a = adapters.NewSimAdapter(services)
   611  	}
   612  
   613  	log.Info("Setting up Snapshot network")
   614  
   615  	net := simulations.NewNetwork(a, &simulations.NetworkConfig{
   616  		ID:             "0",
   617  		DefaultService: "streamer",
   618  	})
   619  
   620  	f, err := os.Open(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   621  	if err != nil {
   622  		return nil, err
   623  	}
   624  	defer f.Close()
   625  	jsonbyte, err := ioutil.ReadAll(f)
   626  	if err != nil {
   627  		return nil, err
   628  	}
   629  	var snap simulations.Snapshot
   630  	err = json.Unmarshal(jsonbyte, &snap)
   631  	if err != nil {
   632  		return nil, err
   633  	}
   634  
   635  	//the snapshot probably has the property EnableMsgEvents not set
   636  	//just in case, set it to true!
   637  	//(we need this to wait for messages before uploading)
   638  	for _, n := range snap.Nodes {
   639  		n.Node.Config.EnableMsgEvents = true
   640  	}
   641  
   642  	log.Info("Waiting for p2p connections to be established...")
   643  
   644  	//now we can load the snapshot
   645  	err = net.Load(&snap)
   646  	if err != nil {
   647  		return nil, err
   648  	}
   649  	log.Info("Snapshot loaded")
   650  	return net, nil
   651  }
   652  
   653  //we want to wait for subscriptions to be established before uploading to test
   654  //that live syncing is working correctly
   655  func watchSubscriptionEvents(ctx context.Context, id discover.NodeID, client *rpc.Client, errc chan error, quitC chan struct{}) (doneC <-chan struct{}) {
   656  	events := make(chan *p2p.PeerEvent)
   657  	sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents")
   658  	if err != nil {
   659  		log.Error(err.Error())
   660  		errc <- fmt.Errorf("error getting peer events for node %v: %s", id, err)
   661  		return
   662  	}
   663  	c := make(chan struct{})
   664  
   665  	go func() {
   666  		defer func() {
   667  			log.Trace("watch subscription events: unsubscribe", "id", id)
   668  			sub.Unsubscribe()
   669  			close(c)
   670  		}()
   671  
   672  		for {
   673  			select {
   674  			case <-quitC:
   675  				return
   676  			case <-ctx.Done():
   677  				select {
   678  				case errc <- ctx.Err():
   679  				case <-quitC:
   680  				}
   681  				return
   682  			case e := <-events:
   683  				//just catch SubscribeMsg
   684  				if e.Type == p2p.PeerEventTypeMsgRecv && e.Protocol == "stream" && e.MsgCode != nil && *e.MsgCode == 4 {
   685  					errc <- nil
   686  				}
   687  			case err := <-sub.Err():
   688  				if err != nil {
   689  					select {
   690  					case errc <- fmt.Errorf("error getting peer events for node %v: %v", id, err):
   691  					case <-quitC:
   692  					}
   693  					return
   694  				}
   695  			}
   696  		}
   697  	}()
   698  	return c
   699  }
   700  
   701  //create a local store for the given node
   702  func createTestLocalStorageForId(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) {
   703  	var datadir string
   704  	var err error
   705  	datadir, err = ioutil.TempDir("", fmt.Sprintf("syncer-test-%s", id.TerminalString()))
   706  	if err != nil {
   707  		return nil, err
   708  	}
   709  	datadirs[id] = datadir
   710  	var store storage.ChunkStore
   711  	params := storage.NewDefaultLocalStoreParams()
   712  	params.ChunkDbPath = datadir
   713  	params.BaseKey = addr.Over()
   714  	store, err = storage.NewTestLocalStoreForAddr(params)
   715  	if err != nil {
   716  		return nil, err
   717  	}
   718  	return store, nil
   719  }