github.com/etsc3259/etsc@v0.0.0-20190109113336-a9c2c10f9c95/swarm/network/stream/snapshot_sync_test.go (about)

     1  // Copyright 2018 The go-etsc Authors
     2  // This file is part of the go-etsc library.
     3  //
     4  // The go-etsc library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-etsc library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-etsc library. If not, see <http://www.gnu.org/licenses/>.
    16  package stream
    17  
    18  import (
    19  	"context"
    20  	"fmt"
    21  	"os"
    22  	"runtime"
    23  	"sync"
    24  	"testing"
    25  	"time"
    26  
    27  	"github.com/ETSC3259/etsc/common"
    28  	"github.com/ETSC3259/etsc/log"
    29  	"github.com/ETSC3259/etsc/node"
    30  	"github.com/ETSC3259/etsc/p2p"
    31  	"github.com/ETSC3259/etsc/p2p/enode"
    32  	"github.com/ETSC3259/etsc/p2p/simulations"
    33  	"github.com/ETSC3259/etsc/p2p/simulations/adapters"
    34  	"github.com/ETSC3259/etsc/swarm/network"
    35  	"github.com/ETSC3259/etsc/swarm/network/simulation"
    36  	"github.com/ETSC3259/etsc/swarm/pot"
    37  	"github.com/ETSC3259/etsc/swarm/state"
    38  	"github.com/ETSC3259/etsc/swarm/storage"
    39  	mockdb "github.com/ETSC3259/etsc/swarm/storage/mock/db"
    40  	"github.com/ETSC3259/etsc/swarm/testutil"
    41  )
    42  
    43  const MaxTimeout = 600
    44  
    45  type synctestConfig struct {
    46  	addrs         [][]byte
    47  	hashes        []storage.Address
    48  	idToChunksMap map[enode.ID][]int
    49  	//chunksToNodesMap map[string][]int
    50  	addrToIDMap map[string]enode.ID
    51  }
    52  
    53  const (
    54  	// EventTypeNode is the type of event emitted when a node is either
    55  	// created, started or stopped
    56  	EventTypeChunkCreated   simulations.EventType = "chunkCreated"
    57  	EventTypeChunkOffered   simulations.EventType = "chunkOffered"
    58  	EventTypeChunkWanted    simulations.EventType = "chunkWanted"
    59  	EventTypeChunkDelivered simulations.EventType = "chunkDelivered"
    60  	EventTypeChunkArrived   simulations.EventType = "chunkArrived"
    61  	EventTypeSimTerminated  simulations.EventType = "simTerminated"
    62  )
    63  
    64  // Tests in this file should not request chunks from peers.
    65  // This function will panic indicating that there is a problem if request has been made.
    66  func dummyRequestFromPeers(_ context.Context, req *network.Request) (*enode.ID, chan struct{}, error) {
    67  	panic(fmt.Sprintf("unexpected request: address %s, source %s", req.Addr.String(), req.Source.String()))
    68  }
    69  
    70  //This test is a syncing test for nodes.
    71  //One node is randomly selected to be the pivot node.
    72  //A configurable number of chunks and nodes can be
    73  //provided to the test, the number of chunks is uploaded
    74  //to the pivot node, and we check that nodes get the chunks
    75  //they are expected to store based on the syncing protocol.
    76  //Number of chunks and nodes can be provided via commandline too.
    77  func TestSyncingViaGlobalSync(t *testing.T) {
    78  	if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
    79  		t.Skip("Flaky on mac on travis")
    80  	}
    81  	//if nodes/chunks have been provided via commandline,
    82  	//run the tests with these values
    83  	if *nodes != 0 && *chunks != 0 {
    84  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
    85  		testSyncingViaGlobalSync(t, *chunks, *nodes)
    86  	} else {
    87  		var nodeCnt []int
    88  		var chnkCnt []int
    89  		//if the `longrunning` flag has been provided
    90  		//run more test combinations
    91  		if *longrunning {
    92  			chnkCnt = []int{1, 8, 32, 256, 1024}
    93  			nodeCnt = []int{16, 32, 64, 128, 256}
    94  		} else {
    95  			//default test
    96  			chnkCnt = []int{4, 32}
    97  			nodeCnt = []int{32, 16}
    98  		}
    99  		for _, chnk := range chnkCnt {
   100  			for _, n := range nodeCnt {
   101  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
   102  				testSyncingViaGlobalSync(t, chnk, n)
   103  			}
   104  		}
   105  	}
   106  }
   107  
   108  func TestSyncingViaDirectSubscribe(t *testing.T) {
   109  	if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
   110  		t.Skip("Flaky on mac on travis")
   111  	}
   112  	//if nodes/chunks have been provided via commandline,
   113  	//run the tests with these values
   114  	if *nodes != 0 && *chunks != 0 {
   115  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
   116  		err := testSyncingViaDirectSubscribe(t, *chunks, *nodes)
   117  		if err != nil {
   118  			t.Fatal(err)
   119  		}
   120  	} else {
   121  		var nodeCnt []int
   122  		var chnkCnt []int
   123  		//if the `longrunning` flag has been provided
   124  		//run more test combinations
   125  		if *longrunning {
   126  			chnkCnt = []int{1, 8, 32, 256, 1024}
   127  			nodeCnt = []int{32, 16}
   128  		} else {
   129  			//default test
   130  			chnkCnt = []int{4, 32}
   131  			nodeCnt = []int{32, 16}
   132  		}
   133  		for _, chnk := range chnkCnt {
   134  			for _, n := range nodeCnt {
   135  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
   136  				err := testSyncingViaDirectSubscribe(t, chnk, n)
   137  				if err != nil {
   138  					t.Fatal(err)
   139  				}
   140  			}
   141  		}
   142  	}
   143  }
   144  
   145  var simServiceMap = map[string]simulation.ServiceFunc{
   146  	"streamer": streamerFunc,
   147  }
   148  
   149  func streamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   150  	n := ctx.Config.Node()
   151  	addr := network.NewAddr(n)
   152  	store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
   153  	if err != nil {
   154  		return nil, nil, err
   155  	}
   156  	bucket.Store(bucketKeyStore, store)
   157  	localStore := store.(*storage.LocalStore)
   158  	netStore, err := storage.NewNetStore(localStore, nil)
   159  	if err != nil {
   160  		return nil, nil, err
   161  	}
   162  	kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   163  	delivery := NewDelivery(kad, netStore)
   164  	netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
   165  
   166  	r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   167  		Retrieval:       RetrievalDisabled,
   168  		Syncing:         SyncingAutoSubscribe,
   169  		SyncUpdateDelay: 3 * time.Second,
   170  	})
   171  
   172  	bucket.Store(bucketKeyRegistry, r)
   173  
   174  	cleanup = func() {
   175  		os.RemoveAll(datadir)
   176  		netStore.Close()
   177  		r.Close()
   178  	}
   179  
   180  	return r, cleanup, nil
   181  
   182  }
   183  
   184  func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
   185  	sim := simulation.New(simServiceMap)
   186  	defer sim.Close()
   187  
   188  	log.Info("Initializing test config")
   189  
   190  	conf := &synctestConfig{}
   191  	//map of discover ID to indexes of chunks expected at that ID
   192  	conf.idToChunksMap = make(map[enode.ID][]int)
   193  	//map of overlay address to discover ID
   194  	conf.addrToIDMap = make(map[string]enode.ID)
   195  	//array where the generated chunk hashes will be stored
   196  	conf.hashes = make([]storage.Address, 0)
   197  
   198  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   199  	if err != nil {
   200  		t.Fatal(err)
   201  	}
   202  
   203  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
   204  	defer cancelSimRun()
   205  
   206  	if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   207  		t.Fatal(err)
   208  	}
   209  
   210  	disconnections := sim.PeerEvents(
   211  		context.Background(),
   212  		sim.NodeIDs(),
   213  		simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   214  	)
   215  
   216  	go func() {
   217  		for d := range disconnections {
   218  			log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   219  			t.Fatal("unexpected disconnect")
   220  			cancelSimRun()
   221  		}
   222  	}()
   223  
   224  	result := runSim(conf, ctx, sim, chunkCount)
   225  
   226  	if result.Error != nil {
   227  		t.Fatal(result.Error)
   228  	}
   229  	log.Info("Simulation ended")
   230  }
   231  
   232  func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulation, chunkCount int) simulation.Result {
   233  
   234  	return sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   235  		nodeIDs := sim.UpNodeIDs()
   236  		for _, n := range nodeIDs {
   237  			//get the kademlia overlay address from this ID
   238  			a := n.Bytes()
   239  			//append it to the array of all overlay addresses
   240  			conf.addrs = append(conf.addrs, a)
   241  			//the proximity calculation is on overlay addr,
   242  			//the p2p/simulations check func triggers on enode.ID,
   243  			//so we need to know which overlay addr maps to which nodeID
   244  			conf.addrToIDMap[string(a)] = n
   245  		}
   246  
   247  		//get the node at that index
   248  		//this is the node selected for upload
   249  		node := sim.RandomUpNode()
   250  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   251  		if !ok {
   252  			return fmt.Errorf("No localstore")
   253  		}
   254  		lstore := item.(*storage.LocalStore)
   255  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   256  		if err != nil {
   257  			return err
   258  		}
   259  		for _, h := range hashes {
   260  			evt := &simulations.Event{
   261  				Type: EventTypeChunkCreated,
   262  				Node: sim.Net.GetNode(node.ID),
   263  				Data: h.String(),
   264  			}
   265  			sim.Net.Events().Send(evt)
   266  		}
   267  		conf.hashes = append(conf.hashes, hashes...)
   268  		mapKeysToNodes(conf)
   269  
   270  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   271  		// or until the timeout is reached.
   272  		var gDir string
   273  		var globalStore *mockdb.GlobalStore
   274  		if *useMockStore {
   275  			gDir, globalStore, err = createGlobalStore()
   276  			if err != nil {
   277  				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
   278  			}
   279  			defer func() {
   280  				os.RemoveAll(gDir)
   281  				err := globalStore.Close()
   282  				if err != nil {
   283  					log.Error("Error closing global store! %v", "err", err)
   284  				}
   285  			}()
   286  		}
   287  	REPEAT:
   288  		for {
   289  			for _, id := range nodeIDs {
   290  				//for each expected chunk, check if it is in the local store
   291  				localChunks := conf.idToChunksMap[id]
   292  				for _, ch := range localChunks {
   293  					//get the real chunk by the index in the index array
   294  					chunk := conf.hashes[ch]
   295  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   296  					//check if the expected chunk is indeed in the localstore
   297  					var err error
   298  					if *useMockStore {
   299  						//use the globalStore if the mockStore should be used; in that case,
   300  						//the complete localStore stack is bypassed for getting the chunk
   301  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   302  					} else {
   303  						//use the actual localstore
   304  						item, ok := sim.NodeItem(id, bucketKeyStore)
   305  						if !ok {
   306  							return fmt.Errorf("Error accessing localstore")
   307  						}
   308  						lstore := item.(*storage.LocalStore)
   309  						_, err = lstore.Get(ctx, chunk)
   310  					}
   311  					if err != nil {
   312  						log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   313  						// Do not get crazy with logging the warn message
   314  						time.Sleep(500 * time.Millisecond)
   315  						continue REPEAT
   316  					}
   317  					evt := &simulations.Event{
   318  						Type: EventTypeChunkArrived,
   319  						Node: sim.Net.GetNode(id),
   320  						Data: chunk.String(),
   321  					}
   322  					sim.Net.Events().Send(evt)
   323  					log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   324  				}
   325  			}
   326  			return nil
   327  		}
   328  	})
   329  }
   330  
   331  /*
   332  The test generates the given number of chunks
   333  
   334  For every chunk generated, the nearest node addresses
   335  are identified, we verify that the nodes closer to the
   336  chunk addresses actually do have the chunks in their local stores.
   337  
   338  The test loads a snapshot file to construct the swarm network,
   339  assuming that the snapshot file identifies a healthy
   340  kademlia network. The snapshot should have 'streamer' in its service list.
   341  */
   342  func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int) error {
   343  	sim := simulation.New(map[string]simulation.ServiceFunc{
   344  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   345  			n := ctx.Config.Node()
   346  			addr := network.NewAddr(n)
   347  			store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
   348  			if err != nil {
   349  				return nil, nil, err
   350  			}
   351  			bucket.Store(bucketKeyStore, store)
   352  			localStore := store.(*storage.LocalStore)
   353  			netStore, err := storage.NewNetStore(localStore, nil)
   354  			if err != nil {
   355  				return nil, nil, err
   356  			}
   357  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   358  			delivery := NewDelivery(kad, netStore)
   359  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
   360  
   361  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   362  				Retrieval: RetrievalDisabled,
   363  				Syncing:   SyncingRegisterOnly,
   364  			})
   365  			bucket.Store(bucketKeyRegistry, r)
   366  
   367  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   368  			bucket.Store(bucketKeyFileStore, fileStore)
   369  
   370  			cleanup = func() {
   371  				os.RemoveAll(datadir)
   372  				netStore.Close()
   373  				r.Close()
   374  			}
   375  
   376  			return r, cleanup, nil
   377  
   378  		},
   379  	})
   380  	defer sim.Close()
   381  
   382  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
   383  	defer cancelSimRun()
   384  
   385  	conf := &synctestConfig{}
   386  	//map of discover ID to indexes of chunks expected at that ID
   387  	conf.idToChunksMap = make(map[enode.ID][]int)
   388  	//map of overlay address to discover ID
   389  	conf.addrToIDMap = make(map[string]enode.ID)
   390  	//array where the generated chunk hashes will be stored
   391  	conf.hashes = make([]storage.Address, 0)
   392  
   393  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   394  	if err != nil {
   395  		return err
   396  	}
   397  
   398  	if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   399  		return err
   400  	}
   401  
   402  	disconnections := sim.PeerEvents(
   403  		context.Background(),
   404  		sim.NodeIDs(),
   405  		simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   406  	)
   407  
   408  	go func() {
   409  		for d := range disconnections {
   410  			log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   411  			t.Fatal("unexpected disconnect")
   412  			cancelSimRun()
   413  		}
   414  	}()
   415  
   416  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   417  		nodeIDs := sim.UpNodeIDs()
   418  		for _, n := range nodeIDs {
   419  			//get the kademlia overlay address from this ID
   420  			a := n.Bytes()
   421  			//append it to the array of all overlay addresses
   422  			conf.addrs = append(conf.addrs, a)
   423  			//the proximity calculation is on overlay addr,
   424  			//the p2p/simulations check func triggers on enode.ID,
   425  			//so we need to know which overlay addr maps to which nodeID
   426  			conf.addrToIDMap[string(a)] = n
   427  		}
   428  
   429  		var subscriptionCount int
   430  
   431  		filter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(4)
   432  		eventC := sim.PeerEvents(ctx, nodeIDs, filter)
   433  
   434  		for j, node := range nodeIDs {
   435  			log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j))
   436  			//start syncing!
   437  			item, ok := sim.NodeItem(node, bucketKeyRegistry)
   438  			if !ok {
   439  				return fmt.Errorf("No registry")
   440  			}
   441  			registry := item.(*Registry)
   442  
   443  			var cnt int
   444  			cnt, err = startSyncing(registry, conf)
   445  			if err != nil {
   446  				return err
   447  			}
   448  			//increment the number of subscriptions we need to wait for
   449  			//by the count returned from startSyncing (SYNC subscriptions)
   450  			subscriptionCount += cnt
   451  		}
   452  
   453  		for e := range eventC {
   454  			if e.Error != nil {
   455  				return e.Error
   456  			}
   457  			subscriptionCount--
   458  			if subscriptionCount == 0 {
   459  				break
   460  			}
   461  		}
   462  		//select a random node for upload
   463  		node := sim.RandomUpNode()
   464  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   465  		if !ok {
   466  			return fmt.Errorf("No localstore")
   467  		}
   468  		lstore := item.(*storage.LocalStore)
   469  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   470  		if err != nil {
   471  			return err
   472  		}
   473  		conf.hashes = append(conf.hashes, hashes...)
   474  		mapKeysToNodes(conf)
   475  
   476  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   477  			return err
   478  		}
   479  
   480  		var gDir string
   481  		var globalStore *mockdb.GlobalStore
   482  		if *useMockStore {
   483  			gDir, globalStore, err = createGlobalStore()
   484  			if err != nil {
   485  				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
   486  			}
   487  			defer os.RemoveAll(gDir)
   488  		}
   489  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   490  		// or until the timeout is reached.
   491  	REPEAT:
   492  		for {
   493  			for _, id := range nodeIDs {
   494  				//for each expected chunk, check if it is in the local store
   495  				localChunks := conf.idToChunksMap[id]
   496  				for _, ch := range localChunks {
   497  					//get the real chunk by the index in the index array
   498  					chunk := conf.hashes[ch]
   499  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   500  					//check if the expected chunk is indeed in the localstore
   501  					var err error
   502  					if *useMockStore {
   503  						//use the globalStore if the mockStore should be used; in that case,
   504  						//the complete localStore stack is bypassed for getting the chunk
   505  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   506  					} else {
   507  						//use the actual localstore
   508  						item, ok := sim.NodeItem(id, bucketKeyStore)
   509  						if !ok {
   510  							return fmt.Errorf("Error accessing localstore")
   511  						}
   512  						lstore := item.(*storage.LocalStore)
   513  						_, err = lstore.Get(ctx, chunk)
   514  					}
   515  					if err != nil {
   516  						log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   517  						// Do not get crazy with logging the warn message
   518  						time.Sleep(500 * time.Millisecond)
   519  						continue REPEAT
   520  					}
   521  					log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   522  				}
   523  			}
   524  			return nil
   525  		}
   526  	})
   527  
   528  	if result.Error != nil {
   529  		return result.Error
   530  	}
   531  
   532  	log.Info("Simulation ended")
   533  	return nil
   534  }
   535  
   536  //the server func to start syncing
   537  //issues `RequestSubscriptionMsg` to peers, based on po, by iterating over
   538  //the kademlia's `EachBin` function.
   539  //returns the number of subscriptions requested
   540  func startSyncing(r *Registry, conf *synctestConfig) (int, error) {
   541  	var err error
   542  	kad := r.delivery.kad
   543  	subCnt := 0
   544  	//iterate over each bin and solicit needed subscription to bins
   545  	kad.EachBin(r.addr[:], pof, 0, func(conn *network.Peer, po int) bool {
   546  		//identify begin and start index of the bin(s) we want to subscribe to
   547  		subCnt++
   548  		err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), NewRange(0, 0), High)
   549  		if err != nil {
   550  			log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
   551  			return false
   552  		}
   553  		return true
   554  
   555  	})
   556  	return subCnt, nil
   557  }
   558  
   559  //map chunk keys to addresses which are responsible
   560  func mapKeysToNodes(conf *synctestConfig) {
   561  	nodemap := make(map[string][]int)
   562  	//build a pot for chunk hashes
   563  	np := pot.NewPot(nil, 0)
   564  	indexmap := make(map[string]int)
   565  	for i, a := range conf.addrs {
   566  		indexmap[string(a)] = i
   567  		np, _, _ = pot.Add(np, a, pof)
   568  	}
   569  
   570  	var kadMinProxSize = 2
   571  
   572  	ppmap := network.NewPeerPotMap(kadMinProxSize, conf.addrs)
   573  
   574  	//for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes
   575  	log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
   576  	for i := 0; i < len(conf.hashes); i++ {
   577  		var a []byte
   578  		np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool {
   579  			// take the first address
   580  			a = val.([]byte)
   581  			return false
   582  		})
   583  
   584  		nns := ppmap[common.Bytes2Hex(a)].NNSet
   585  		nns = append(nns, a)
   586  
   587  		for _, p := range nns {
   588  			nodemap[string(p)] = append(nodemap[string(p)], i)
   589  		}
   590  	}
   591  	for addr, chunks := range nodemap {
   592  		//this selects which chunks are expected to be found with the given node
   593  		conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks
   594  	}
   595  	log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
   596  }
   597  
   598  //upload a file(chunks) to a single local node store
   599  func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) {
   600  	log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
   601  	fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
   602  	size := chunkSize
   603  	var rootAddrs []storage.Address
   604  	for i := 0; i < chunkCount; i++ {
   605  		rk, wait, err := fileStore.Store(context.TODO(), testutil.RandomReader(i, size), int64(size), false)
   606  		if err != nil {
   607  			return nil, err
   608  		}
   609  		err = wait(context.TODO())
   610  		if err != nil {
   611  			return nil, err
   612  		}
   613  		rootAddrs = append(rootAddrs, (rk))
   614  	}
   615  
   616  	return rootAddrs, nil
   617  }