github.com/oskarth/go-ethereum@v1.6.8-0.20191013093314-dac24a9d3494/swarm/network/stream/snapshot_sync_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  package stream
    17  
    18  import (
    19  	"context"
    20  	crand "crypto/rand"
    21  	"fmt"
    22  	"io"
    23  	"os"
    24  	"runtime"
    25  	"sync"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/log"
    31  	"github.com/ethereum/go-ethereum/node"
    32  	"github.com/ethereum/go-ethereum/p2p"
    33  	"github.com/ethereum/go-ethereum/p2p/enode"
    34  	"github.com/ethereum/go-ethereum/p2p/simulations"
    35  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    36  	"github.com/ethereum/go-ethereum/swarm/network"
    37  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    38  	"github.com/ethereum/go-ethereum/swarm/pot"
    39  	"github.com/ethereum/go-ethereum/swarm/state"
    40  	"github.com/ethereum/go-ethereum/swarm/storage"
    41  	mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
    42  )
    43  
    44  const MaxTimeout = 600
    45  
    46  type synctestConfig struct {
    47  	addrs         [][]byte
    48  	hashes        []storage.Address
    49  	idToChunksMap map[enode.ID][]int
    50  	//chunksToNodesMap map[string][]int
    51  	addrToIDMap map[string]enode.ID
    52  }
    53  
    54  const (
    55  	// EventTypeNode is the type of event emitted when a node is either
    56  	// created, started or stopped
    57  	EventTypeChunkCreated   simulations.EventType = "chunkCreated"
    58  	EventTypeChunkOffered   simulations.EventType = "chunkOffered"
    59  	EventTypeChunkWanted    simulations.EventType = "chunkWanted"
    60  	EventTypeChunkDelivered simulations.EventType = "chunkDelivered"
    61  	EventTypeChunkArrived   simulations.EventType = "chunkArrived"
    62  	EventTypeSimTerminated  simulations.EventType = "simTerminated"
    63  )
    64  
    65  // Tests in this file should not request chunks from peers.
    66  // This function will panic indicating that there is a problem if request has been made.
    67  func dummyRequestFromPeers(_ context.Context, req *network.Request) (*enode.ID, chan struct{}, error) {
    68  	panic(fmt.Sprintf("unexpected request: address %s, source %s", req.Addr.String(), req.Source.String()))
    69  }
    70  
    71  //This test is a syncing test for nodes.
    72  //One node is randomly selected to be the pivot node.
    73  //A configurable number of chunks and nodes can be
    74  //provided to the test, the number of chunks is uploaded
    75  //to the pivot node, and we check that nodes get the chunks
    76  //they are expected to store based on the syncing protocol.
    77  //Number of chunks and nodes can be provided via commandline too.
    78  func TestSyncingViaGlobalSync(t *testing.T) {
    79  	if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
    80  		t.Skip("Flaky on mac on travis")
    81  	}
    82  	//if nodes/chunks have been provided via commandline,
    83  	//run the tests with these values
    84  	if *nodes != 0 && *chunks != 0 {
    85  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
    86  		testSyncingViaGlobalSync(t, *chunks, *nodes)
    87  	} else {
    88  		var nodeCnt []int
    89  		var chnkCnt []int
    90  		//if the `longrunning` flag has been provided
    91  		//run more test combinations
    92  		if *longrunning {
    93  			chnkCnt = []int{1, 8, 32, 256, 1024}
    94  			nodeCnt = []int{16, 32, 64, 128, 256}
    95  		} else {
    96  			//default test
    97  			chnkCnt = []int{4, 32}
    98  			nodeCnt = []int{32, 16}
    99  		}
   100  		for _, chnk := range chnkCnt {
   101  			for _, n := range nodeCnt {
   102  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
   103  				testSyncingViaGlobalSync(t, chnk, n)
   104  			}
   105  		}
   106  	}
   107  }
   108  
   109  func TestSyncingViaDirectSubscribe(t *testing.T) {
   110  	if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
   111  		t.Skip("Flaky on mac on travis")
   112  	}
   113  	//if nodes/chunks have been provided via commandline,
   114  	//run the tests with these values
   115  	if *nodes != 0 && *chunks != 0 {
   116  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
   117  		err := testSyncingViaDirectSubscribe(t, *chunks, *nodes)
   118  		if err != nil {
   119  			t.Fatal(err)
   120  		}
   121  	} else {
   122  		var nodeCnt []int
   123  		var chnkCnt []int
   124  		//if the `longrunning` flag has been provided
   125  		//run more test combinations
   126  		if *longrunning {
   127  			chnkCnt = []int{1, 8, 32, 256, 1024}
   128  			nodeCnt = []int{32, 16}
   129  		} else {
   130  			//default test
   131  			chnkCnt = []int{4, 32}
   132  			nodeCnt = []int{32, 16}
   133  		}
   134  		for _, chnk := range chnkCnt {
   135  			for _, n := range nodeCnt {
   136  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
   137  				err := testSyncingViaDirectSubscribe(t, chnk, n)
   138  				if err != nil {
   139  					t.Fatal(err)
   140  				}
   141  			}
   142  		}
   143  	}
   144  }
   145  
   146  var simServiceMap = map[string]simulation.ServiceFunc{
   147  	"streamer": streamerFunc,
   148  }
   149  
   150  func streamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   151  	n := ctx.Config.Node()
   152  	addr := network.NewAddr(n)
   153  	store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
   154  	if err != nil {
   155  		return nil, nil, err
   156  	}
   157  	bucket.Store(bucketKeyStore, store)
   158  	localStore := store.(*storage.LocalStore)
   159  	netStore, err := storage.NewNetStore(localStore, nil)
   160  	if err != nil {
   161  		return nil, nil, err
   162  	}
   163  	kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   164  	delivery := NewDelivery(kad, netStore)
   165  	netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
   166  
   167  	r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   168  		DoSync:          true,
   169  		DoServeRetrieve: true,
   170  		SyncUpdateDelay: 3 * time.Second,
   171  	})
   172  
   173  	bucket.Store(bucketKeyRegistry, r)
   174  
   175  	cleanup = func() {
   176  		os.RemoveAll(datadir)
   177  		netStore.Close()
   178  		r.Close()
   179  	}
   180  
   181  	return r, cleanup, nil
   182  
   183  }
   184  
   185  func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
   186  	sim := simulation.New(simServiceMap)
   187  	defer sim.Close()
   188  
   189  	log.Info("Initializing test config")
   190  
   191  	conf := &synctestConfig{}
   192  	//map of discover ID to indexes of chunks expected at that ID
   193  	conf.idToChunksMap = make(map[enode.ID][]int)
   194  	//map of overlay address to discover ID
   195  	conf.addrToIDMap = make(map[string]enode.ID)
   196  	//array where the generated chunk hashes will be stored
   197  	conf.hashes = make([]storage.Address, 0)
   198  
   199  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   200  	if err != nil {
   201  		t.Fatal(err)
   202  	}
   203  
   204  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
   205  	defer cancelSimRun()
   206  
   207  	if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   208  		t.Fatal(err)
   209  	}
   210  
   211  	disconnections := sim.PeerEvents(
   212  		context.Background(),
   213  		sim.NodeIDs(),
   214  		simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   215  	)
   216  
   217  	go func() {
   218  		for d := range disconnections {
   219  			log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   220  			t.Fatal("unexpected disconnect")
   221  			cancelSimRun()
   222  		}
   223  	}()
   224  
   225  	result := runSim(conf, ctx, sim, chunkCount)
   226  
   227  	if result.Error != nil {
   228  		t.Fatal(result.Error)
   229  	}
   230  	log.Info("Simulation ended")
   231  }
   232  
   233  func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulation, chunkCount int) simulation.Result {
   234  
   235  	return sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   236  		nodeIDs := sim.UpNodeIDs()
   237  		for _, n := range nodeIDs {
   238  			//get the kademlia overlay address from this ID
   239  			a := n.Bytes()
   240  			//append it to the array of all overlay addresses
   241  			conf.addrs = append(conf.addrs, a)
   242  			//the proximity calculation is on overlay addr,
   243  			//the p2p/simulations check func triggers on enode.ID,
   244  			//so we need to know which overlay addr maps to which nodeID
   245  			conf.addrToIDMap[string(a)] = n
   246  		}
   247  
   248  		//get the node at that index
   249  		//this is the node selected for upload
   250  		node := sim.RandomUpNode()
   251  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   252  		if !ok {
   253  			return fmt.Errorf("No localstore")
   254  		}
   255  		lstore := item.(*storage.LocalStore)
   256  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   257  		if err != nil {
   258  			return err
   259  		}
   260  		for _, h := range hashes {
   261  			evt := &simulations.Event{
   262  				Type: EventTypeChunkCreated,
   263  				Node: sim.Net.GetNode(node.ID),
   264  				Data: h.String(),
   265  			}
   266  			sim.Net.Events().Send(evt)
   267  		}
   268  		conf.hashes = append(conf.hashes, hashes...)
   269  		mapKeysToNodes(conf)
   270  
   271  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   272  		// or until the timeout is reached.
   273  		var gDir string
   274  		var globalStore *mockdb.GlobalStore
   275  		if *useMockStore {
   276  			gDir, globalStore, err = createGlobalStore()
   277  			if err != nil {
   278  				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
   279  			}
   280  			defer func() {
   281  				os.RemoveAll(gDir)
   282  				err := globalStore.Close()
   283  				if err != nil {
   284  					log.Error("Error closing global store! %v", "err", err)
   285  				}
   286  			}()
   287  		}
   288  	REPEAT:
   289  		for {
   290  			for _, id := range nodeIDs {
   291  				//for each expected chunk, check if it is in the local store
   292  				localChunks := conf.idToChunksMap[id]
   293  				for _, ch := range localChunks {
   294  					//get the real chunk by the index in the index array
   295  					chunk := conf.hashes[ch]
   296  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   297  					//check if the expected chunk is indeed in the localstore
   298  					var err error
   299  					if *useMockStore {
   300  						//use the globalStore if the mockStore should be used; in that case,
   301  						//the complete localStore stack is bypassed for getting the chunk
   302  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   303  					} else {
   304  						//use the actual localstore
   305  						item, ok := sim.NodeItem(id, bucketKeyStore)
   306  						if !ok {
   307  							return fmt.Errorf("Error accessing localstore")
   308  						}
   309  						lstore := item.(*storage.LocalStore)
   310  						_, err = lstore.Get(ctx, chunk)
   311  					}
   312  					if err != nil {
   313  						log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   314  						// Do not get crazy with logging the warn message
   315  						time.Sleep(500 * time.Millisecond)
   316  						continue REPEAT
   317  					}
   318  					evt := &simulations.Event{
   319  						Type: EventTypeChunkArrived,
   320  						Node: sim.Net.GetNode(id),
   321  						Data: chunk.String(),
   322  					}
   323  					sim.Net.Events().Send(evt)
   324  					log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   325  				}
   326  			}
   327  			return nil
   328  		}
   329  	})
   330  }
   331  
   332  /*
   333  The test generates the given number of chunks
   334  
   335  For every chunk generated, the nearest node addresses
   336  are identified, we verify that the nodes closer to the
   337  chunk addresses actually do have the chunks in their local stores.
   338  
   339  The test loads a snapshot file to construct the swarm network,
   340  assuming that the snapshot file identifies a healthy
   341  kademlia network. The snapshot should have 'streamer' in its service list.
   342  */
   343  func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int) error {
   344  	sim := simulation.New(map[string]simulation.ServiceFunc{
   345  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   346  			n := ctx.Config.Node()
   347  			addr := network.NewAddr(n)
   348  			store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
   349  			if err != nil {
   350  				return nil, nil, err
   351  			}
   352  			bucket.Store(bucketKeyStore, store)
   353  			localStore := store.(*storage.LocalStore)
   354  			netStore, err := storage.NewNetStore(localStore, nil)
   355  			if err != nil {
   356  				return nil, nil, err
   357  			}
   358  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   359  			delivery := NewDelivery(kad, netStore)
   360  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
   361  
   362  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   363  				DoServeRetrieve: true,
   364  				DoSync:          true,
   365  			})
   366  			bucket.Store(bucketKeyRegistry, r)
   367  
   368  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   369  			bucket.Store(bucketKeyFileStore, fileStore)
   370  
   371  			cleanup = func() {
   372  				os.RemoveAll(datadir)
   373  				netStore.Close()
   374  				r.Close()
   375  			}
   376  
   377  			return r, cleanup, nil
   378  
   379  		},
   380  	})
   381  	defer sim.Close()
   382  
   383  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
   384  	defer cancelSimRun()
   385  
   386  	conf := &synctestConfig{}
   387  	//map of discover ID to indexes of chunks expected at that ID
   388  	conf.idToChunksMap = make(map[enode.ID][]int)
   389  	//map of overlay address to discover ID
   390  	conf.addrToIDMap = make(map[string]enode.ID)
   391  	//array where the generated chunk hashes will be stored
   392  	conf.hashes = make([]storage.Address, 0)
   393  
   394  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   395  	if err != nil {
   396  		return err
   397  	}
   398  
   399  	if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   400  		return err
   401  	}
   402  
   403  	disconnections := sim.PeerEvents(
   404  		context.Background(),
   405  		sim.NodeIDs(),
   406  		simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   407  	)
   408  
   409  	go func() {
   410  		for d := range disconnections {
   411  			log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   412  			t.Fatal("unexpected disconnect")
   413  			cancelSimRun()
   414  		}
   415  	}()
   416  
   417  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   418  		nodeIDs := sim.UpNodeIDs()
   419  		for _, n := range nodeIDs {
   420  			//get the kademlia overlay address from this ID
   421  			a := n.Bytes()
   422  			//append it to the array of all overlay addresses
   423  			conf.addrs = append(conf.addrs, a)
   424  			//the proximity calculation is on overlay addr,
   425  			//the p2p/simulations check func triggers on enode.ID,
   426  			//so we need to know which overlay addr maps to which nodeID
   427  			conf.addrToIDMap[string(a)] = n
   428  		}
   429  
   430  		var subscriptionCount int
   431  
   432  		filter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(4)
   433  		eventC := sim.PeerEvents(ctx, nodeIDs, filter)
   434  
   435  		for j, node := range nodeIDs {
   436  			log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j))
   437  			//start syncing!
   438  			item, ok := sim.NodeItem(node, bucketKeyRegistry)
   439  			if !ok {
   440  				return fmt.Errorf("No registry")
   441  			}
   442  			registry := item.(*Registry)
   443  
   444  			var cnt int
   445  			cnt, err = startSyncing(registry, conf)
   446  			if err != nil {
   447  				return err
   448  			}
   449  			//increment the number of subscriptions we need to wait for
   450  			//by the count returned from startSyncing (SYNC subscriptions)
   451  			subscriptionCount += cnt
   452  		}
   453  
   454  		for e := range eventC {
   455  			if e.Error != nil {
   456  				return e.Error
   457  			}
   458  			subscriptionCount--
   459  			if subscriptionCount == 0 {
   460  				break
   461  			}
   462  		}
   463  		//select a random node for upload
   464  		node := sim.RandomUpNode()
   465  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   466  		if !ok {
   467  			return fmt.Errorf("No localstore")
   468  		}
   469  		lstore := item.(*storage.LocalStore)
   470  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   471  		if err != nil {
   472  			return err
   473  		}
   474  		conf.hashes = append(conf.hashes, hashes...)
   475  		mapKeysToNodes(conf)
   476  
   477  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   478  			return err
   479  		}
   480  
   481  		var gDir string
   482  		var globalStore *mockdb.GlobalStore
   483  		if *useMockStore {
   484  			gDir, globalStore, err = createGlobalStore()
   485  			if err != nil {
   486  				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
   487  			}
   488  			defer os.RemoveAll(gDir)
   489  		}
   490  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   491  		// or until the timeout is reached.
   492  	REPEAT:
   493  		for {
   494  			for _, id := range nodeIDs {
   495  				//for each expected chunk, check if it is in the local store
   496  				localChunks := conf.idToChunksMap[id]
   497  				for _, ch := range localChunks {
   498  					//get the real chunk by the index in the index array
   499  					chunk := conf.hashes[ch]
   500  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   501  					//check if the expected chunk is indeed in the localstore
   502  					var err error
   503  					if *useMockStore {
   504  						//use the globalStore if the mockStore should be used; in that case,
   505  						//the complete localStore stack is bypassed for getting the chunk
   506  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   507  					} else {
   508  						//use the actual localstore
   509  						item, ok := sim.NodeItem(id, bucketKeyStore)
   510  						if !ok {
   511  							return fmt.Errorf("Error accessing localstore")
   512  						}
   513  						lstore := item.(*storage.LocalStore)
   514  						_, err = lstore.Get(ctx, chunk)
   515  					}
   516  					if err != nil {
   517  						log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   518  						// Do not get crazy with logging the warn message
   519  						time.Sleep(500 * time.Millisecond)
   520  						continue REPEAT
   521  					}
   522  					log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   523  				}
   524  			}
   525  			return nil
   526  		}
   527  	})
   528  
   529  	if result.Error != nil {
   530  		return result.Error
   531  	}
   532  
   533  	log.Info("Simulation ended")
   534  	return nil
   535  }
   536  
   537  //the server func to start syncing
   538  //issues `RequestSubscriptionMsg` to peers, based on po, by iterating over
   539  //the kademlia's `EachBin` function.
   540  //returns the number of subscriptions requested
   541  func startSyncing(r *Registry, conf *synctestConfig) (int, error) {
   542  	var err error
   543  	kad := r.delivery.kad
   544  	subCnt := 0
   545  	//iterate over each bin and solicit needed subscription to bins
   546  	kad.EachBin(r.addr[:], pof, 0, func(conn *network.Peer, po int) bool {
   547  		//identify begin and start index of the bin(s) we want to subscribe to
   548  		subCnt++
   549  		err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), NewRange(0, 0), High)
   550  		if err != nil {
   551  			log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
   552  			return false
   553  		}
   554  		return true
   555  
   556  	})
   557  	return subCnt, nil
   558  }
   559  
   560  //map chunk keys to addresses which are responsible
   561  func mapKeysToNodes(conf *synctestConfig) {
   562  	nodemap := make(map[string][]int)
   563  	//build a pot for chunk hashes
   564  	np := pot.NewPot(nil, 0)
   565  	indexmap := make(map[string]int)
   566  	for i, a := range conf.addrs {
   567  		indexmap[string(a)] = i
   568  		np, _, _ = pot.Add(np, a, pof)
   569  	}
   570  
   571  	var kadMinProxSize = 2
   572  
   573  	ppmap := network.NewPeerPotMap(kadMinProxSize, conf.addrs)
   574  
   575  	//for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes
   576  	log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
   577  	for i := 0; i < len(conf.hashes); i++ {
   578  		var a []byte
   579  		np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool {
   580  			// take the first address
   581  			a = val.([]byte)
   582  			return false
   583  		})
   584  
   585  		nns := ppmap[common.Bytes2Hex(a)].NNSet
   586  		nns = append(nns, a)
   587  
   588  		for _, p := range nns {
   589  			nodemap[string(p)] = append(nodemap[string(p)], i)
   590  		}
   591  	}
   592  	for addr, chunks := range nodemap {
   593  		//this selects which chunks are expected to be found with the given node
   594  		conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks
   595  	}
   596  	log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
   597  }
   598  
   599  //upload a file(chunks) to a single local node store
   600  func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) {
   601  	log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
   602  	fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
   603  	size := chunkSize
   604  	var rootAddrs []storage.Address
   605  	for i := 0; i < chunkCount; i++ {
   606  		rk, wait, err := fileStore.Store(context.TODO(), io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   607  		if err != nil {
   608  			return nil, err
   609  		}
   610  		err = wait(context.TODO())
   611  		if err != nil {
   612  			return nil, err
   613  		}
   614  		rootAddrs = append(rootAddrs, (rk))
   615  	}
   616  
   617  	return rootAddrs, nil
   618  }