github.com/Ethersocial/go-esn@v0.3.7/swarm/network/stream/snapshot_sync_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  package stream
    17  
    18  import (
    19  	"context"
    20  	crand "crypto/rand"
    21  	"fmt"
    22  	"io"
    23  	"os"
    24  	"runtime"
    25  	"sync"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/ethersocial/go-esn/common"
    30  	"github.com/ethersocial/go-esn/log"
    31  	"github.com/ethersocial/go-esn/node"
    32  	"github.com/ethersocial/go-esn/p2p"
    33  	"github.com/ethersocial/go-esn/p2p/enode"
    34  	"github.com/ethersocial/go-esn/p2p/simulations"
    35  	"github.com/ethersocial/go-esn/p2p/simulations/adapters"
    36  	"github.com/ethersocial/go-esn/swarm/network"
    37  	"github.com/ethersocial/go-esn/swarm/network/simulation"
    38  	"github.com/ethersocial/go-esn/swarm/pot"
    39  	"github.com/ethersocial/go-esn/swarm/state"
    40  	"github.com/ethersocial/go-esn/swarm/storage"
    41  	mockdb "github.com/ethersocial/go-esn/swarm/storage/mock/db"
    42  )
    43  
    44  const MaxTimeout = 600
    45  
    46  type synctestConfig struct {
    47  	addrs         [][]byte
    48  	hashes        []storage.Address
    49  	idToChunksMap map[enode.ID][]int
    50  	//chunksToNodesMap map[string][]int
    51  	addrToIDMap map[string]enode.ID
    52  }
    53  
    54  const (
    55  	// EventTypeNode is the type of event emitted when a node is either
    56  	// created, started or stopped
    57  	EventTypeChunkCreated   simulations.EventType = "chunkCreated"
    58  	EventTypeChunkOffered   simulations.EventType = "chunkOffered"
    59  	EventTypeChunkWanted    simulations.EventType = "chunkWanted"
    60  	EventTypeChunkDelivered simulations.EventType = "chunkDelivered"
    61  	EventTypeChunkArrived   simulations.EventType = "chunkArrived"
    62  	EventTypeSimTerminated  simulations.EventType = "simTerminated"
    63  )
    64  
    65  // Tests in this file should not request chunks from peers.
    66  // This function will panic indicating that there is a problem if request has been made.
    67  func dummyRequestFromPeers(_ context.Context, req *network.Request) (*enode.ID, chan struct{}, error) {
    68  	panic(fmt.Sprintf("unexpected request: address %s, source %s", req.Addr.String(), req.Source.String()))
    69  }
    70  
    71  //This test is a syncing test for nodes.
    72  //One node is randomly selected to be the pivot node.
    73  //A configurable number of chunks and nodes can be
    74  //provided to the test, the number of chunks is uploaded
    75  //to the pivot node, and we check that nodes get the chunks
    76  //they are expected to store based on the syncing protocol.
    77  //Number of chunks and nodes can be provided via commandline too.
    78  func TestSyncingViaGlobalSync(t *testing.T) {
    79  	if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
    80  		t.Skip("Flaky on mac on travis")
    81  	}
    82  	//if nodes/chunks have been provided via commandline,
    83  	//run the tests with these values
    84  	if *nodes != 0 && *chunks != 0 {
    85  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
    86  		testSyncingViaGlobalSync(t, *chunks, *nodes)
    87  	} else {
    88  		var nodeCnt []int
    89  		var chnkCnt []int
    90  		//if the `longrunning` flag has been provided
    91  		//run more test combinations
    92  		if *longrunning {
    93  			chnkCnt = []int{1, 8, 32, 256, 1024}
    94  			nodeCnt = []int{16, 32, 64, 128, 256}
    95  		} else {
    96  			//default test
    97  			chnkCnt = []int{4, 32}
    98  			nodeCnt = []int{32, 16}
    99  		}
   100  		for _, chnk := range chnkCnt {
   101  			for _, n := range nodeCnt {
   102  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
   103  				testSyncingViaGlobalSync(t, chnk, n)
   104  			}
   105  		}
   106  	}
   107  }
   108  
   109  func TestSyncingViaDirectSubscribe(t *testing.T) {
   110  	if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
   111  		t.Skip("Flaky on mac on travis")
   112  	}
   113  	//if nodes/chunks have been provided via commandline,
   114  	//run the tests with these values
   115  	if *nodes != 0 && *chunks != 0 {
   116  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
   117  		err := testSyncingViaDirectSubscribe(t, *chunks, *nodes)
   118  		if err != nil {
   119  			t.Fatal(err)
   120  		}
   121  	} else {
   122  		var nodeCnt []int
   123  		var chnkCnt []int
   124  		//if the `longrunning` flag has been provided
   125  		//run more test combinations
   126  		if *longrunning {
   127  			chnkCnt = []int{1, 8, 32, 256, 1024}
   128  			nodeCnt = []int{32, 16}
   129  		} else {
   130  			//default test
   131  			chnkCnt = []int{4, 32}
   132  			nodeCnt = []int{32, 16}
   133  		}
   134  		for _, chnk := range chnkCnt {
   135  			for _, n := range nodeCnt {
   136  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
   137  				err := testSyncingViaDirectSubscribe(t, chnk, n)
   138  				if err != nil {
   139  					t.Fatal(err)
   140  				}
   141  			}
   142  		}
   143  	}
   144  }
   145  
   146  var simServiceMap = map[string]simulation.ServiceFunc{
   147  	"streamer": streamerFunc,
   148  }
   149  
   150  func streamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   151  	n := ctx.Config.Node()
   152  	addr := network.NewAddr(n)
   153  	store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
   154  	if err != nil {
   155  		return nil, nil, err
   156  	}
   157  	bucket.Store(bucketKeyStore, store)
   158  	localStore := store.(*storage.LocalStore)
   159  	netStore, err := storage.NewNetStore(localStore, nil)
   160  	if err != nil {
   161  		return nil, nil, err
   162  	}
   163  	kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   164  	delivery := NewDelivery(kad, netStore)
   165  	netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
   166  
   167  	r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   168  		DoSync:          true,
   169  		SyncUpdateDelay: 3 * time.Second,
   170  	})
   171  
   172  	bucket.Store(bucketKeyRegistry, r)
   173  
   174  	cleanup = func() {
   175  		os.RemoveAll(datadir)
   176  		netStore.Close()
   177  		r.Close()
   178  	}
   179  
   180  	return r, cleanup, nil
   181  
   182  }
   183  
   184  func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
   185  	sim := simulation.New(simServiceMap)
   186  	defer sim.Close()
   187  
   188  	log.Info("Initializing test config")
   189  
   190  	conf := &synctestConfig{}
   191  	//map of discover ID to indexes of chunks expected at that ID
   192  	conf.idToChunksMap = make(map[enode.ID][]int)
   193  	//map of overlay address to discover ID
   194  	conf.addrToIDMap = make(map[string]enode.ID)
   195  	//array where the generated chunk hashes will be stored
   196  	conf.hashes = make([]storage.Address, 0)
   197  
   198  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   199  	if err != nil {
   200  		t.Fatal(err)
   201  	}
   202  
   203  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
   204  	defer cancelSimRun()
   205  
   206  	if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   207  		t.Fatal(err)
   208  	}
   209  
   210  	disconnections := sim.PeerEvents(
   211  		context.Background(),
   212  		sim.NodeIDs(),
   213  		simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   214  	)
   215  
   216  	go func() {
   217  		for d := range disconnections {
   218  			log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   219  			t.Fatal("unexpected disconnect")
   220  			cancelSimRun()
   221  		}
   222  	}()
   223  
   224  	result := runSim(conf, ctx, sim, chunkCount)
   225  
   226  	if result.Error != nil {
   227  		t.Fatal(result.Error)
   228  	}
   229  	log.Info("Simulation ended")
   230  }
   231  
   232  func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulation, chunkCount int) simulation.Result {
   233  
   234  	return sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   235  		nodeIDs := sim.UpNodeIDs()
   236  		for _, n := range nodeIDs {
   237  			//get the kademlia overlay address from this ID
   238  			a := n.Bytes()
   239  			//append it to the array of all overlay addresses
   240  			conf.addrs = append(conf.addrs, a)
   241  			//the proximity calculation is on overlay addr,
   242  			//the p2p/simulations check func triggers on enode.ID,
   243  			//so we need to know which overlay addr maps to which nodeID
   244  			conf.addrToIDMap[string(a)] = n
   245  		}
   246  
   247  		//get the node at that index
   248  		//this is the node selected for upload
   249  		node := sim.RandomUpNode()
   250  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   251  		if !ok {
   252  			return fmt.Errorf("No localstore")
   253  		}
   254  		lstore := item.(*storage.LocalStore)
   255  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   256  		if err != nil {
   257  			return err
   258  		}
   259  		for _, h := range hashes {
   260  			evt := &simulations.Event{
   261  				Type: EventTypeChunkCreated,
   262  				Node: sim.Net.GetNode(node.ID),
   263  				Data: h.String(),
   264  			}
   265  			sim.Net.Events().Send(evt)
   266  		}
   267  		conf.hashes = append(conf.hashes, hashes...)
   268  		mapKeysToNodes(conf)
   269  
   270  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   271  		// or until the timeout is reached.
   272  		var gDir string
   273  		var globalStore *mockdb.GlobalStore
   274  		if *useMockStore {
   275  			gDir, globalStore, err = createGlobalStore()
   276  			if err != nil {
   277  				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
   278  			}
   279  			defer func() {
   280  				os.RemoveAll(gDir)
   281  				err := globalStore.Close()
   282  				if err != nil {
   283  					log.Error("Error closing global store! %v", "err", err)
   284  				}
   285  			}()
   286  		}
   287  	REPEAT:
   288  		for {
   289  			for _, id := range nodeIDs {
   290  				//for each expected chunk, check if it is in the local store
   291  				localChunks := conf.idToChunksMap[id]
   292  				for _, ch := range localChunks {
   293  					//get the real chunk by the index in the index array
   294  					chunk := conf.hashes[ch]
   295  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   296  					//check if the expected chunk is indeed in the localstore
   297  					var err error
   298  					if *useMockStore {
   299  						//use the globalStore if the mockStore should be used; in that case,
   300  						//the complete localStore stack is bypassed for getting the chunk
   301  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   302  					} else {
   303  						//use the actual localstore
   304  						item, ok := sim.NodeItem(id, bucketKeyStore)
   305  						if !ok {
   306  							return fmt.Errorf("Error accessing localstore")
   307  						}
   308  						lstore := item.(*storage.LocalStore)
   309  						_, err = lstore.Get(ctx, chunk)
   310  					}
   311  					if err != nil {
   312  						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   313  						// Do not get crazy with logging the warn message
   314  						time.Sleep(500 * time.Millisecond)
   315  						continue REPEAT
   316  					}
   317  					evt := &simulations.Event{
   318  						Type: EventTypeChunkArrived,
   319  						Node: sim.Net.GetNode(id),
   320  						Data: chunk.String(),
   321  					}
   322  					sim.Net.Events().Send(evt)
   323  					log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   324  				}
   325  			}
   326  			return nil
   327  		}
   328  	})
   329  }
   330  
   331  /*
   332  The test generates the given number of chunks
   333  
   334  For every chunk generated, the nearest node addresses
   335  are identified, we verify that the nodes closer to the
   336  chunk addresses actually do have the chunks in their local stores.
   337  
   338  The test loads a snapshot file to construct the swarm network,
   339  assuming that the snapshot file identifies a healthy
   340  kademlia network. The snapshot should have 'streamer' in its service list.
   341  */
   342  func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int) error {
   343  	sim := simulation.New(map[string]simulation.ServiceFunc{
   344  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   345  			n := ctx.Config.Node()
   346  			addr := network.NewAddr(n)
   347  			store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
   348  			if err != nil {
   349  				return nil, nil, err
   350  			}
   351  			bucket.Store(bucketKeyStore, store)
   352  			localStore := store.(*storage.LocalStore)
   353  			netStore, err := storage.NewNetStore(localStore, nil)
   354  			if err != nil {
   355  				return nil, nil, err
   356  			}
   357  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   358  			delivery := NewDelivery(kad, netStore)
   359  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
   360  
   361  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), nil)
   362  			bucket.Store(bucketKeyRegistry, r)
   363  
   364  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   365  			bucket.Store(bucketKeyFileStore, fileStore)
   366  
   367  			cleanup = func() {
   368  				os.RemoveAll(datadir)
   369  				netStore.Close()
   370  				r.Close()
   371  			}
   372  
   373  			return r, cleanup, nil
   374  
   375  		},
   376  	})
   377  	defer sim.Close()
   378  
   379  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
   380  	defer cancelSimRun()
   381  
   382  	conf := &synctestConfig{}
   383  	//map of discover ID to indexes of chunks expected at that ID
   384  	conf.idToChunksMap = make(map[enode.ID][]int)
   385  	//map of overlay address to discover ID
   386  	conf.addrToIDMap = make(map[string]enode.ID)
   387  	//array where the generated chunk hashes will be stored
   388  	conf.hashes = make([]storage.Address, 0)
   389  
   390  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   391  	if err != nil {
   392  		return err
   393  	}
   394  
   395  	if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   396  		return err
   397  	}
   398  
   399  	disconnections := sim.PeerEvents(
   400  		context.Background(),
   401  		sim.NodeIDs(),
   402  		simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   403  	)
   404  
   405  	go func() {
   406  		for d := range disconnections {
   407  			log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   408  			t.Fatal("unexpected disconnect")
   409  			cancelSimRun()
   410  		}
   411  	}()
   412  
   413  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   414  		nodeIDs := sim.UpNodeIDs()
   415  		for _, n := range nodeIDs {
   416  			//get the kademlia overlay address from this ID
   417  			a := n.Bytes()
   418  			//append it to the array of all overlay addresses
   419  			conf.addrs = append(conf.addrs, a)
   420  			//the proximity calculation is on overlay addr,
   421  			//the p2p/simulations check func triggers on enode.ID,
   422  			//so we need to know which overlay addr maps to which nodeID
   423  			conf.addrToIDMap[string(a)] = n
   424  		}
   425  
   426  		var subscriptionCount int
   427  
   428  		filter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(4)
   429  		eventC := sim.PeerEvents(ctx, nodeIDs, filter)
   430  
   431  		for j, node := range nodeIDs {
   432  			log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j))
   433  			//start syncing!
   434  			item, ok := sim.NodeItem(node, bucketKeyRegistry)
   435  			if !ok {
   436  				return fmt.Errorf("No registry")
   437  			}
   438  			registry := item.(*Registry)
   439  
   440  			var cnt int
   441  			cnt, err = startSyncing(registry, conf)
   442  			if err != nil {
   443  				return err
   444  			}
   445  			//increment the number of subscriptions we need to wait for
   446  			//by the count returned from startSyncing (SYNC subscriptions)
   447  			subscriptionCount += cnt
   448  		}
   449  
   450  		for e := range eventC {
   451  			if e.Error != nil {
   452  				return e.Error
   453  			}
   454  			subscriptionCount--
   455  			if subscriptionCount == 0 {
   456  				break
   457  			}
   458  		}
   459  		//select a random node for upload
   460  		node := sim.RandomUpNode()
   461  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   462  		if !ok {
   463  			return fmt.Errorf("No localstore")
   464  		}
   465  		lstore := item.(*storage.LocalStore)
   466  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   467  		if err != nil {
   468  			return err
   469  		}
   470  		conf.hashes = append(conf.hashes, hashes...)
   471  		mapKeysToNodes(conf)
   472  
   473  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   474  			return err
   475  		}
   476  
   477  		var gDir string
   478  		var globalStore *mockdb.GlobalStore
   479  		if *useMockStore {
   480  			gDir, globalStore, err = createGlobalStore()
   481  			if err != nil {
   482  				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
   483  			}
   484  			defer os.RemoveAll(gDir)
   485  		}
   486  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   487  		// or until the timeout is reached.
   488  	REPEAT:
   489  		for {
   490  			for _, id := range nodeIDs {
   491  				//for each expected chunk, check if it is in the local store
   492  				localChunks := conf.idToChunksMap[id]
   493  				for _, ch := range localChunks {
   494  					//get the real chunk by the index in the index array
   495  					chunk := conf.hashes[ch]
   496  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   497  					//check if the expected chunk is indeed in the localstore
   498  					var err error
   499  					if *useMockStore {
   500  						//use the globalStore if the mockStore should be used; in that case,
   501  						//the complete localStore stack is bypassed for getting the chunk
   502  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   503  					} else {
   504  						//use the actual localstore
   505  						item, ok := sim.NodeItem(id, bucketKeyStore)
   506  						if !ok {
   507  							return fmt.Errorf("Error accessing localstore")
   508  						}
   509  						lstore := item.(*storage.LocalStore)
   510  						_, err = lstore.Get(ctx, chunk)
   511  					}
   512  					if err != nil {
   513  						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   514  						// Do not get crazy with logging the warn message
   515  						time.Sleep(500 * time.Millisecond)
   516  						continue REPEAT
   517  					}
   518  					log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   519  				}
   520  			}
   521  			return nil
   522  		}
   523  	})
   524  
   525  	if result.Error != nil {
   526  		return result.Error
   527  	}
   528  
   529  	log.Info("Simulation ended")
   530  	return nil
   531  }
   532  
   533  //the server func to start syncing
   534  //issues `RequestSubscriptionMsg` to peers, based on po, by iterating over
   535  //the kademlia's `EachBin` function.
   536  //returns the number of subscriptions requested
   537  func startSyncing(r *Registry, conf *synctestConfig) (int, error) {
   538  	var err error
   539  	kad := r.delivery.kad
   540  	subCnt := 0
   541  	//iterate over each bin and solicit needed subscription to bins
   542  	kad.EachBin(r.addr[:], pof, 0, func(conn *network.Peer, po int) bool {
   543  		//identify begin and start index of the bin(s) we want to subscribe to
   544  		subCnt++
   545  		err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), NewRange(0, 0), High)
   546  		if err != nil {
   547  			log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
   548  			return false
   549  		}
   550  		return true
   551  
   552  	})
   553  	return subCnt, nil
   554  }
   555  
   556  //map chunk keys to addresses which are responsible
   557  func mapKeysToNodes(conf *synctestConfig) {
   558  	nodemap := make(map[string][]int)
   559  	//build a pot for chunk hashes
   560  	np := pot.NewPot(nil, 0)
   561  	indexmap := make(map[string]int)
   562  	for i, a := range conf.addrs {
   563  		indexmap[string(a)] = i
   564  		np, _, _ = pot.Add(np, a, pof)
   565  	}
   566  
   567  	var kadMinProxSize = 2
   568  
   569  	ppmap := network.NewPeerPotMap(kadMinProxSize, conf.addrs)
   570  
   571  	//for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes
   572  	log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
   573  	for i := 0; i < len(conf.hashes); i++ {
   574  		var a []byte
   575  		np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool {
   576  			// take the first address
   577  			a = val.([]byte)
   578  			return false
   579  		})
   580  
   581  		nns := ppmap[common.Bytes2Hex(a)].NNSet
   582  		nns = append(nns, a)
   583  
   584  		for _, p := range nns {
   585  			nodemap[string(p)] = append(nodemap[string(p)], i)
   586  		}
   587  	}
   588  	for addr, chunks := range nodemap {
   589  		//this selects which chunks are expected to be found with the given node
   590  		conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks
   591  	}
   592  	log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
   593  }
   594  
   595  //upload a file(chunks) to a single local node store
   596  func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) {
   597  	log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
   598  	fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
   599  	size := chunkSize
   600  	var rootAddrs []storage.Address
   601  	for i := 0; i < chunkCount; i++ {
   602  		rk, wait, err := fileStore.Store(context.TODO(), io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   603  		if err != nil {
   604  			return nil, err
   605  		}
   606  		err = wait(context.TODO())
   607  		if err != nil {
   608  			return nil, err
   609  		}
   610  		rootAddrs = append(rootAddrs, (rk))
   611  	}
   612  
   613  	return rootAddrs, nil
   614  }