github.com/insight-chain/inb-go@v1.1.3-0.20191221022159-da049980ae38/swarm/network/stream/snapshot_sync_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software MiningReward, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  package stream
    17  
    18  import (
    19  	"context"
    20  	"fmt"
    21  	"os"
    22  	"runtime"
    23  	"sync"
    24  	"testing"
    25  	"time"
    26  
    27  	"github.com/insight-chain/inb-go/common"
    28  	"github.com/insight-chain/inb-go/log"
    29  	"github.com/insight-chain/inb-go/node"
    30  	"github.com/insight-chain/inb-go/p2p/enode"
    31  	"github.com/insight-chain/inb-go/p2p/simulations"
    32  	"github.com/insight-chain/inb-go/p2p/simulations/adapters"
    33  	"github.com/insight-chain/inb-go/swarm/network"
    34  	"github.com/insight-chain/inb-go/swarm/network/simulation"
    35  	"github.com/insight-chain/inb-go/swarm/pot"
    36  	"github.com/insight-chain/inb-go/swarm/state"
    37  	"github.com/insight-chain/inb-go/swarm/storage"
    38  	"github.com/insight-chain/inb-go/swarm/storage/mock"
    39  	mockmem "github.com/insight-chain/inb-go/swarm/storage/mock/mem"
    40  	"github.com/insight-chain/inb-go/swarm/testutil"
    41  )
    42  
    43  const MaxTimeout = 600
    44  
    45  type synctestConfig struct {
    46  	addrs         [][]byte
    47  	hashes        []storage.Address
    48  	idToChunksMap map[enode.ID][]int
    49  	//chunksToNodesMap map[string][]int
    50  	addrToIDMap map[string]enode.ID
    51  }
    52  
    53  const (
    54  	// EventTypeNode is the type of event emitted when a node is either
    55  	// created, started or stopped
    56  	EventTypeChunkCreated   simulations.EventType = "chunkCreated"
    57  	EventTypeChunkOffered   simulations.EventType = "chunkOffered"
    58  	EventTypeChunkWanted    simulations.EventType = "chunkWanted"
    59  	EventTypeChunkDelivered simulations.EventType = "chunkDelivered"
    60  	EventTypeChunkArrived   simulations.EventType = "chunkArrived"
    61  	EventTypeSimTerminated  simulations.EventType = "simTerminated"
    62  )
    63  
    64  // Tests in this file should not request chunks from peers.
    65  // This function will panic indicating that there is a problem if request has been made.
    66  func dummyRequestFromPeers(_ context.Context, req *network.Request) (*enode.ID, chan struct{}, error) {
    67  	panic(fmt.Sprintf("unexpected request: address %s, source %s", req.Addr.String(), req.Source.String()))
    68  }
    69  
    70  //This test is a syncing test for nodes.
    71  //One node is randomly selected to be the pivot node.
    72  //A configurable number of chunks and nodes can be
    73  //provided to the test, the number of chunks is uploaded
    74  //to the pivot node, and we check that nodes get the chunks
    75  //they are expected to store based on the syncing protocol.
    76  //Number of chunks and nodes can be provided via commandline too.
    77  func TestSyncingViaGlobalSync(t *testing.T) {
    78  	if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
    79  		t.Skip("Flaky on mac on travis")
    80  	}
    81  	//if nodes/chunks have been provided via commandline,
    82  	//run the tests with these values
    83  	if *nodes != 0 && *chunks != 0 {
    84  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
    85  		testSyncingViaGlobalSync(t, *chunks, *nodes)
    86  	} else {
    87  		var nodeCnt []int
    88  		var chnkCnt []int
    89  		//if the `longrunning` flag has been provided
    90  		//run more test combinations
    91  		if *longrunning {
    92  			chnkCnt = []int{1, 8, 32, 256, 1024}
    93  			nodeCnt = []int{16, 32, 64, 128, 256}
    94  		} else {
    95  			//default test
    96  			chnkCnt = []int{4, 32}
    97  			nodeCnt = []int{32, 16}
    98  		}
    99  		for _, chnk := range chnkCnt {
   100  			for _, n := range nodeCnt {
   101  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
   102  				testSyncingViaGlobalSync(t, chnk, n)
   103  			}
   104  		}
   105  	}
   106  }
   107  
   108  func TestSyncingViaDirectSubscribe(t *testing.T) {
   109  	if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
   110  		t.Skip("Flaky on mac on travis")
   111  	}
   112  	//if nodes/chunks have been provided via commandline,
   113  	//run the tests with these values
   114  	if *nodes != 0 && *chunks != 0 {
   115  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
   116  		err := testSyncingViaDirectSubscribe(t, *chunks, *nodes)
   117  		if err != nil {
   118  			t.Fatal(err)
   119  		}
   120  	} else {
   121  		var nodeCnt []int
   122  		var chnkCnt []int
   123  		//if the `longrunning` flag has been provided
   124  		//run more test combinations
   125  		if *longrunning {
   126  			chnkCnt = []int{1, 8, 32, 256, 1024}
   127  			nodeCnt = []int{32, 16}
   128  		} else {
   129  			//default test
   130  			chnkCnt = []int{4, 32}
   131  			nodeCnt = []int{32, 16}
   132  		}
   133  		for _, chnk := range chnkCnt {
   134  			for _, n := range nodeCnt {
   135  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
   136  				err := testSyncingViaDirectSubscribe(t, chnk, n)
   137  				if err != nil {
   138  					t.Fatal(err)
   139  				}
   140  			}
   141  		}
   142  	}
   143  }
   144  
   145  var simServiceMap = map[string]simulation.ServiceFunc{
   146  	"streamer": streamerFunc,
   147  }
   148  
   149  func streamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   150  	n := ctx.Config.Node()
   151  	addr := network.NewAddr(n)
   152  	store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
   153  	if err != nil {
   154  		return nil, nil, err
   155  	}
   156  	bucket.Store(bucketKeyStore, store)
   157  	localStore := store.(*storage.LocalStore)
   158  	netStore, err := storage.NewNetStore(localStore, nil)
   159  	if err != nil {
   160  		return nil, nil, err
   161  	}
   162  	kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   163  	delivery := NewDelivery(kad, netStore)
   164  	netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
   165  
   166  	r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   167  		Retrieval:       RetrievalDisabled,
   168  		Syncing:         SyncingAutoSubscribe,
   169  		SyncUpdateDelay: 3 * time.Second,
   170  	}, nil)
   171  
   172  	bucket.Store(bucketKeyRegistry, r)
   173  
   174  	cleanup = func() {
   175  		os.RemoveAll(datadir)
   176  		netStore.Close()
   177  		r.Close()
   178  	}
   179  
   180  	return r, cleanup, nil
   181  
   182  }
   183  
   184  func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
   185  
   186  	t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
   187  	sim := simulation.New(simServiceMap)
   188  	defer sim.Close()
   189  
   190  	log.Info("Initializing test config")
   191  
   192  	conf := &synctestConfig{}
   193  	//map of discover ID to indexes of chunks expected at that ID
   194  	conf.idToChunksMap = make(map[enode.ID][]int)
   195  	//map of overlay address to discover ID
   196  	conf.addrToIDMap = make(map[string]enode.ID)
   197  	//array where the generated chunk hashes will be stored
   198  	conf.hashes = make([]storage.Address, 0)
   199  
   200  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   201  	if err != nil {
   202  		t.Fatal(err)
   203  	}
   204  
   205  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
   206  	defer cancelSimRun()
   207  
   208  	if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   209  		t.Fatal(err)
   210  	}
   211  
   212  	disconnections := sim.PeerEvents(
   213  		context.Background(),
   214  		sim.NodeIDs(),
   215  		simulation.NewPeerEventsFilter().Drop(),
   216  	)
   217  
   218  	go func() {
   219  		for d := range disconnections {
   220  			log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
   221  			t.Fatal("unexpected disconnect")
   222  			cancelSimRun()
   223  		}
   224  	}()
   225  
   226  	result := runSim(conf, ctx, sim, chunkCount)
   227  
   228  	if result.Error != nil {
   229  		t.Fatal(result.Error)
   230  	}
   231  	log.Info("Simulation ended")
   232  }
   233  
   234  func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulation, chunkCount int) simulation.Result {
   235  
   236  	return sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   237  		nodeIDs := sim.UpNodeIDs()
   238  		for _, n := range nodeIDs {
   239  			//get the kademlia overlay address from this ID
   240  			a := n.Bytes()
   241  			//append it to the array of all overlay addresses
   242  			conf.addrs = append(conf.addrs, a)
   243  			//the proximity calculation is on overlay addr,
   244  			//the p2p/simulations check func triggers on enode.ID,
   245  			//so we need to know which overlay addr maps to which nodeID
   246  			conf.addrToIDMap[string(a)] = n
   247  		}
   248  
   249  		//get the node at that index
   250  		//this is the node selected for upload
   251  		node := sim.RandomUpNode()
   252  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   253  		if !ok {
   254  			return fmt.Errorf("No localstore")
   255  		}
   256  		lstore := item.(*storage.LocalStore)
   257  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   258  		if err != nil {
   259  			return err
   260  		}
   261  		for _, h := range hashes {
   262  			evt := &simulations.Event{
   263  				Type: EventTypeChunkCreated,
   264  				Node: sim.Net.GetNode(node.ID),
   265  				Data: h.String(),
   266  			}
   267  			sim.Net.Events().Send(evt)
   268  		}
   269  		conf.hashes = append(conf.hashes, hashes...)
   270  		mapKeysToNodes(conf)
   271  
   272  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   273  		// or until the timeout is reached.
   274  		var globalStore mock.GlobalStorer
   275  		if *useMockStore {
   276  			globalStore = mockmem.NewGlobalStore()
   277  		}
   278  	REPEAT:
   279  		for {
   280  			for _, id := range nodeIDs {
   281  				//for each expected chunk, check if it is in the local store
   282  				localChunks := conf.idToChunksMap[id]
   283  				for _, ch := range localChunks {
   284  					//get the real chunk by the index in the index array
   285  					chunk := conf.hashes[ch]
   286  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   287  					//check if the expected chunk is indeed in the localstore
   288  					var err error
   289  					if *useMockStore {
   290  						//use the globalStore if the mockStore should be used; in that case,
   291  						//the complete localStore stack is bypassed for getting the chunk
   292  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   293  					} else {
   294  						//use the actual localstore
   295  						item, ok := sim.NodeItem(id, bucketKeyStore)
   296  						if !ok {
   297  							return fmt.Errorf("Error accessing localstore")
   298  						}
   299  						lstore := item.(*storage.LocalStore)
   300  						_, err = lstore.Get(ctx, chunk)
   301  					}
   302  					if err != nil {
   303  						log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   304  						// Do not get crazy with logging the warn message
   305  						time.Sleep(500 * time.Millisecond)
   306  						continue REPEAT
   307  					}
   308  					evt := &simulations.Event{
   309  						Type: EventTypeChunkArrived,
   310  						Node: sim.Net.GetNode(id),
   311  						Data: chunk.String(),
   312  					}
   313  					sim.Net.Events().Send(evt)
   314  					log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   315  				}
   316  			}
   317  			return nil
   318  		}
   319  	})
   320  }
   321  
   322  /*
   323  The test generates the given number of chunks
   324  
   325  For every chunk generated, the nearest node addresses
   326  are identified, we verify that the nodes closer to the
   327  chunk addresses actually do have the chunks in their local stores.
   328  
   329  The test loads a snapshot file to construct the swarm network,
   330  assuming that the snapshot file identifies a healthy
   331  kademlia network. The snapshot should have 'streamer' in its service list.
   332  */
   333  func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int) error {
   334  
   335  	t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
   336  	sim := simulation.New(map[string]simulation.ServiceFunc{
   337  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   338  			n := ctx.Config.Node()
   339  			addr := network.NewAddr(n)
   340  			store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
   341  			if err != nil {
   342  				return nil, nil, err
   343  			}
   344  			bucket.Store(bucketKeyStore, store)
   345  			localStore := store.(*storage.LocalStore)
   346  			netStore, err := storage.NewNetStore(localStore, nil)
   347  			if err != nil {
   348  				return nil, nil, err
   349  			}
   350  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   351  			delivery := NewDelivery(kad, netStore)
   352  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
   353  
   354  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   355  				Retrieval: RetrievalDisabled,
   356  				Syncing:   SyncingRegisterOnly,
   357  			}, nil)
   358  			bucket.Store(bucketKeyRegistry, r)
   359  
   360  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   361  			bucket.Store(bucketKeyFileStore, fileStore)
   362  
   363  			cleanup = func() {
   364  				os.RemoveAll(datadir)
   365  				netStore.Close()
   366  				r.Close()
   367  			}
   368  
   369  			return r, cleanup, nil
   370  
   371  		},
   372  	})
   373  	defer sim.Close()
   374  
   375  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
   376  	defer cancelSimRun()
   377  
   378  	conf := &synctestConfig{}
   379  	//map of discover ID to indexes of chunks expected at that ID
   380  	conf.idToChunksMap = make(map[enode.ID][]int)
   381  	//map of overlay address to discover ID
   382  	conf.addrToIDMap = make(map[string]enode.ID)
   383  	//array where the generated chunk hashes will be stored
   384  	conf.hashes = make([]storage.Address, 0)
   385  
   386  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   387  	if err != nil {
   388  		return err
   389  	}
   390  
   391  	if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   392  		return err
   393  	}
   394  
   395  	disconnections := sim.PeerEvents(
   396  		context.Background(),
   397  		sim.NodeIDs(),
   398  		simulation.NewPeerEventsFilter().Drop(),
   399  	)
   400  
   401  	go func() {
   402  		for d := range disconnections {
   403  			log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
   404  			t.Fatal("unexpected disconnect")
   405  			cancelSimRun()
   406  		}
   407  	}()
   408  
   409  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   410  		nodeIDs := sim.UpNodeIDs()
   411  		for _, n := range nodeIDs {
   412  			//get the kademlia overlay address from this ID
   413  			a := n.Bytes()
   414  			//append it to the array of all overlay addresses
   415  			conf.addrs = append(conf.addrs, a)
   416  			//the proximity calculation is on overlay addr,
   417  			//the p2p/simulations check func triggers on enode.ID,
   418  			//so we need to know which overlay addr maps to which nodeID
   419  			conf.addrToIDMap[string(a)] = n
   420  		}
   421  
   422  		var subscriptionCount int
   423  
   424  		filter := simulation.NewPeerEventsFilter().ReceivedMessages().Protocol("stream").MsgCode(4)
   425  		eventC := sim.PeerEvents(ctx, nodeIDs, filter)
   426  
   427  		for j, node := range nodeIDs {
   428  			log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j))
   429  			//start syncing!
   430  			item, ok := sim.NodeItem(node, bucketKeyRegistry)
   431  			if !ok {
   432  				return fmt.Errorf("No registry")
   433  			}
   434  			registry := item.(*Registry)
   435  
   436  			var cnt int
   437  			cnt, err = startSyncing(registry, conf)
   438  			if err != nil {
   439  				return err
   440  			}
   441  			//increment the number of subscriptions we need to wait for
   442  			//by the count returned from startSyncing (SYNC subscriptions)
   443  			subscriptionCount += cnt
   444  		}
   445  
   446  		for e := range eventC {
   447  			if e.Error != nil {
   448  				return e.Error
   449  			}
   450  			subscriptionCount--
   451  			if subscriptionCount == 0 {
   452  				break
   453  			}
   454  		}
   455  		//select a random node for upload
   456  		node := sim.RandomUpNode()
   457  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   458  		if !ok {
   459  			return fmt.Errorf("No localstore")
   460  		}
   461  		lstore := item.(*storage.LocalStore)
   462  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   463  		if err != nil {
   464  			return err
   465  		}
   466  		conf.hashes = append(conf.hashes, hashes...)
   467  		mapKeysToNodes(conf)
   468  
   469  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   470  			return err
   471  		}
   472  
   473  		var globalStore mock.GlobalStorer
   474  		if *useMockStore {
   475  			globalStore = mockmem.NewGlobalStore()
   476  		}
   477  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   478  		// or until the timeout is reached.
   479  	REPEAT:
   480  		for {
   481  			for _, id := range nodeIDs {
   482  				//for each expected chunk, check if it is in the local store
   483  				localChunks := conf.idToChunksMap[id]
   484  				for _, ch := range localChunks {
   485  					//get the real chunk by the index in the index array
   486  					chunk := conf.hashes[ch]
   487  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   488  					//check if the expected chunk is indeed in the localstore
   489  					var err error
   490  					if *useMockStore {
   491  						//use the globalStore if the mockStore should be used; in that case,
   492  						//the complete localStore stack is bypassed for getting the chunk
   493  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   494  					} else {
   495  						//use the actual localstore
   496  						item, ok := sim.NodeItem(id, bucketKeyStore)
   497  						if !ok {
   498  							return fmt.Errorf("Error accessing localstore")
   499  						}
   500  						lstore := item.(*storage.LocalStore)
   501  						_, err = lstore.Get(ctx, chunk)
   502  					}
   503  					if err != nil {
   504  						log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   505  						// Do not get crazy with logging the warn message
   506  						time.Sleep(500 * time.Millisecond)
   507  						continue REPEAT
   508  					}
   509  					log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   510  				}
   511  			}
   512  			return nil
   513  		}
   514  	})
   515  
   516  	if result.Error != nil {
   517  		return result.Error
   518  	}
   519  
   520  	log.Info("Simulation ended")
   521  	return nil
   522  }
   523  
   524  //the server func to start syncing
   525  //issues `RequestSubscriptionMsg` to peers, based on po, by iterating over
   526  //the kademlia's `EachBin` function.
   527  //returns the number of subscriptions requested
   528  func startSyncing(r *Registry, conf *synctestConfig) (int, error) {
   529  	var err error
   530  	kad := r.delivery.kad
   531  	subCnt := 0
   532  	//iterate over each bin and solicit needed subscription to bins
   533  	kad.EachBin(r.addr[:], pof, 0, func(conn *network.Peer, po int) bool {
   534  		//identify begin and start index of the bin(s) we want to subscribe to
   535  		subCnt++
   536  		err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), NewRange(0, 0), High)
   537  		if err != nil {
   538  			log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
   539  			return false
   540  		}
   541  		return true
   542  
   543  	})
   544  	return subCnt, nil
   545  }
   546  
   547  //map chunk keys to addresses which are responsible
   548  func mapKeysToNodes(conf *synctestConfig) {
   549  	nodemap := make(map[string][]int)
   550  	//build a pot for chunk hashes
   551  	np := pot.NewPot(nil, 0)
   552  	indexmap := make(map[string]int)
   553  	for i, a := range conf.addrs {
   554  		indexmap[string(a)] = i
   555  		np, _, _ = pot.Add(np, a, pof)
   556  	}
   557  
   558  	var kadMinProxSize = 2
   559  
   560  	ppmap := network.NewPeerPotMap(kadMinProxSize, conf.addrs)
   561  
   562  	//for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes
   563  	log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
   564  	for i := 0; i < len(conf.hashes); i++ {
   565  		var a []byte
   566  		np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool {
   567  			// take the first address
   568  			a = val.([]byte)
   569  			return false
   570  		})
   571  
   572  		nns := ppmap[common.Bytes2Hex(a)].NNSet
   573  		nns = append(nns, a)
   574  
   575  		for _, p := range nns {
   576  			nodemap[string(p)] = append(nodemap[string(p)], i)
   577  		}
   578  	}
   579  	for addr, chunks := range nodemap {
   580  		//this selects which chunks are expected to be found with the given node
   581  		conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks
   582  	}
   583  	log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
   584  }
   585  
   586  //upload a file(chunks) to a single local node store
   587  func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) {
   588  	log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
   589  	fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
   590  	size := chunkSize
   591  	var rootAddrs []storage.Address
   592  	for i := 0; i < chunkCount; i++ {
   593  		rk, wait, err := fileStore.Store(context.TODO(), testutil.RandomReader(i, size), int64(size), false)
   594  		if err != nil {
   595  			return nil, err
   596  		}
   597  		err = wait(context.TODO())
   598  		if err != nil {
   599  			return nil, err
   600  		}
   601  		rootAddrs = append(rootAddrs, (rk))
   602  	}
   603  
   604  	return rootAddrs, nil
   605  }