github.com/FUSIONFoundation/efsn@v3.6.2-0.20200916075423-dbb5dd5d2cc7+incompatible/swarm/network/stream/snapshot_sync_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  package stream
    17  
    18  import (
    19  	"context"
    20  	crand "crypto/rand"
    21  	"fmt"
    22  	"io"
    23  	"os"
    24  	"runtime"
    25  	"sync"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/FusionFoundation/efsn/common"
    30  	"github.com/FusionFoundation/efsn/log"
    31  	"github.com/FusionFoundation/efsn/node"
    32  	"github.com/FusionFoundation/efsn/p2p"
    33  	"github.com/FusionFoundation/efsn/p2p/discover"
    34  	"github.com/FusionFoundation/efsn/p2p/simulations/adapters"
    35  	"github.com/FusionFoundation/efsn/swarm/network"
    36  	"github.com/FusionFoundation/efsn/swarm/network/simulation"
    37  	"github.com/FusionFoundation/efsn/swarm/pot"
    38  	"github.com/FusionFoundation/efsn/swarm/state"
    39  	"github.com/FusionFoundation/efsn/swarm/storage"
    40  	mockdb "github.com/FusionFoundation/efsn/swarm/storage/mock/db"
    41  )
    42  
    43  const MaxTimeout = 600
    44  
    45  type synctestConfig struct {
    46  	addrs         [][]byte
    47  	hashes        []storage.Address
    48  	idToChunksMap map[discover.NodeID][]int
    49  	//chunksToNodesMap map[string][]int
    50  	addrToIDMap map[string]discover.NodeID
    51  }
    52  
    53  // Tests in this file should not request chunks from peers.
    54  // This function will panic indicating that there is a problem if request has been made.
    55  func dummyRequestFromPeers(_ context.Context, req *network.Request) (*discover.NodeID, chan struct{}, error) {
    56  	panic(fmt.Sprintf("unexpected request: address %s, source %s", req.Addr.String(), req.Source.String()))
    57  }
    58  
    59  //This test is a syncing test for nodes.
    60  //One node is randomly selected to be the pivot node.
    61  //A configurable number of chunks and nodes can be
    62  //provided to the test, the number of chunks is uploaded
    63  //to the pivot node, and we check that nodes get the chunks
    64  //they are expected to store based on the syncing protocol.
    65  //Number of chunks and nodes can be provided via commandline too.
    66  func TestSyncingViaGlobalSync(t *testing.T) {
    67  	if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
    68  		t.Skip("Flaky on mac on travis")
    69  	}
    70  	//if nodes/chunks have been provided via commandline,
    71  	//run the tests with these values
    72  	if *nodes != 0 && *chunks != 0 {
    73  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
    74  		testSyncingViaGlobalSync(t, *chunks, *nodes)
    75  	} else {
    76  		var nodeCnt []int
    77  		var chnkCnt []int
    78  		//if the `longrunning` flag has been provided
    79  		//run more test combinations
    80  		if *longrunning {
    81  			chnkCnt = []int{1, 8, 32, 256, 1024}
    82  			nodeCnt = []int{16, 32, 64, 128, 256}
    83  		} else {
    84  			//default test
    85  			chnkCnt = []int{4, 32}
    86  			nodeCnt = []int{32, 16}
    87  		}
    88  		for _, chnk := range chnkCnt {
    89  			for _, n := range nodeCnt {
    90  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
    91  				testSyncingViaGlobalSync(t, chnk, n)
    92  			}
    93  		}
    94  	}
    95  }
    96  
    97  func TestSyncingViaDirectSubscribe(t *testing.T) {
    98  	if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
    99  		t.Skip("Flaky on mac on travis")
   100  	}
   101  	//if nodes/chunks have been provided via commandline,
   102  	//run the tests with these values
   103  	if *nodes != 0 && *chunks != 0 {
   104  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
   105  		err := testSyncingViaDirectSubscribe(t, *chunks, *nodes)
   106  		if err != nil {
   107  			t.Fatal(err)
   108  		}
   109  	} else {
   110  		var nodeCnt []int
   111  		var chnkCnt []int
   112  		//if the `longrunning` flag has been provided
   113  		//run more test combinations
   114  		if *longrunning {
   115  			chnkCnt = []int{1, 8, 32, 256, 1024}
   116  			nodeCnt = []int{32, 16}
   117  		} else {
   118  			//default test
   119  			chnkCnt = []int{4, 32}
   120  			nodeCnt = []int{32, 16}
   121  		}
   122  		for _, chnk := range chnkCnt {
   123  			for _, n := range nodeCnt {
   124  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
   125  				err := testSyncingViaDirectSubscribe(t, chnk, n)
   126  				if err != nil {
   127  					t.Fatal(err)
   128  				}
   129  			}
   130  		}
   131  	}
   132  }
   133  
   134  func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
   135  	sim := simulation.New(map[string]simulation.ServiceFunc{
   136  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   137  
   138  			id := ctx.Config.ID
   139  			addr := network.NewAddrFromNodeID(id)
   140  			store, datadir, err := createTestLocalStorageForID(id, addr)
   141  			if err != nil {
   142  				return nil, nil, err
   143  			}
   144  			bucket.Store(bucketKeyStore, store)
   145  			localStore := store.(*storage.LocalStore)
   146  			netStore, err := storage.NewNetStore(localStore, nil)
   147  			if err != nil {
   148  				return nil, nil, err
   149  			}
   150  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   151  			delivery := NewDelivery(kad, netStore)
   152  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
   153  
   154  			r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   155  				DoSync:          true,
   156  				SyncUpdateDelay: 3 * time.Second,
   157  			})
   158  			bucket.Store(bucketKeyRegistry, r)
   159  
   160  			cleanup = func() {
   161  				os.RemoveAll(datadir)
   162  				netStore.Close()
   163  				r.Close()
   164  			}
   165  
   166  			return r, cleanup, nil
   167  
   168  		},
   169  	})
   170  	defer sim.Close()
   171  
   172  	log.Info("Initializing test config")
   173  
   174  	conf := &synctestConfig{}
   175  	//map of discover ID to indexes of chunks expected at that ID
   176  	conf.idToChunksMap = make(map[discover.NodeID][]int)
   177  	//map of overlay address to discover ID
   178  	conf.addrToIDMap = make(map[string]discover.NodeID)
   179  	//array where the generated chunk hashes will be stored
   180  	conf.hashes = make([]storage.Address, 0)
   181  
   182  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   183  	if err != nil {
   184  		t.Fatal(err)
   185  	}
   186  
   187  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
   188  	defer cancelSimRun()
   189  
   190  	if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   191  		t.Fatal(err)
   192  	}
   193  
   194  	disconnections := sim.PeerEvents(
   195  		context.Background(),
   196  		sim.NodeIDs(),
   197  		simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   198  	)
   199  
   200  	go func() {
   201  		for d := range disconnections {
   202  			log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   203  			t.Fatal("unexpected disconnect")
   204  			cancelSimRun()
   205  		}
   206  	}()
   207  
   208  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   209  		nodeIDs := sim.UpNodeIDs()
   210  		for _, n := range nodeIDs {
   211  			//get the kademlia overlay address from this ID
   212  			a := network.ToOverlayAddr(n.Bytes())
   213  			//append it to the array of all overlay addresses
   214  			conf.addrs = append(conf.addrs, a)
   215  			//the proximity calculation is on overlay addr,
   216  			//the p2p/simulations check func triggers on discover.NodeID,
   217  			//so we need to know which overlay addr maps to which nodeID
   218  			conf.addrToIDMap[string(a)] = n
   219  		}
   220  
   221  		//get the node at that index
   222  		//this is the node selected for upload
   223  		node := sim.RandomUpNode()
   224  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   225  		if !ok {
   226  			return fmt.Errorf("No localstore")
   227  		}
   228  		lstore := item.(*storage.LocalStore)
   229  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   230  		if err != nil {
   231  			return err
   232  		}
   233  		conf.hashes = append(conf.hashes, hashes...)
   234  		mapKeysToNodes(conf)
   235  
   236  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   237  		// or until the timeout is reached.
   238  		allSuccess := false
   239  		var gDir string
   240  		var globalStore *mockdb.GlobalStore
   241  		if *useMockStore {
   242  			gDir, globalStore, err = createGlobalStore()
   243  			if err != nil {
   244  				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
   245  			}
   246  			defer func() {
   247  				os.RemoveAll(gDir)
   248  				err := globalStore.Close()
   249  				if err != nil {
   250  					log.Error("Error closing global store! %v", "err", err)
   251  				}
   252  			}()
   253  		}
   254  		for !allSuccess {
   255  			allSuccess = true
   256  			for _, id := range nodeIDs {
   257  				//for each expected chunk, check if it is in the local store
   258  				localChunks := conf.idToChunksMap[id]
   259  				localSuccess := true
   260  				for _, ch := range localChunks {
   261  					//get the real chunk by the index in the index array
   262  					chunk := conf.hashes[ch]
   263  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   264  					//check if the expected chunk is indeed in the localstore
   265  					var err error
   266  					if *useMockStore {
   267  						//use the globalStore if the mockStore should be used; in that case,
   268  						//the complete localStore stack is bypassed for getting the chunk
   269  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   270  					} else {
   271  						//use the actual localstore
   272  						item, ok := sim.NodeItem(id, bucketKeyStore)
   273  						if !ok {
   274  							return fmt.Errorf("Error accessing localstore")
   275  						}
   276  						lstore := item.(*storage.LocalStore)
   277  						_, err = lstore.Get(ctx, chunk)
   278  					}
   279  					if err != nil {
   280  						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   281  						localSuccess = false
   282  						// Do not get crazy with logging the warn message
   283  						time.Sleep(500 * time.Millisecond)
   284  					} else {
   285  						log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   286  					}
   287  				}
   288  				if !localSuccess {
   289  					allSuccess = false
   290  					break
   291  				}
   292  			}
   293  		}
   294  		if !allSuccess {
   295  			return fmt.Errorf("Not all chunks succeeded!")
   296  		}
   297  		return nil
   298  	})
   299  
   300  	if result.Error != nil {
   301  		t.Fatal(result.Error)
   302  	}
   303  	log.Info("Simulation ended")
   304  }
   305  
   306  /*
   307  The test generates the given number of chunks
   308  
   309  For every chunk generated, the nearest node addresses
   310  are identified, we verify that the nodes closer to the
   311  chunk addresses actually do have the chunks in their local stores.
   312  
   313  The test loads a snapshot file to construct the swarm network,
   314  assuming that the snapshot file identifies a healthy
   315  kademlia network. The snapshot should have 'streamer' in its service list.
   316  */
   317  func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int) error {
   318  	sim := simulation.New(map[string]simulation.ServiceFunc{
   319  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   320  
   321  			id := ctx.Config.ID
   322  			addr := network.NewAddrFromNodeID(id)
   323  			store, datadir, err := createTestLocalStorageForID(id, addr)
   324  			if err != nil {
   325  				return nil, nil, err
   326  			}
   327  			bucket.Store(bucketKeyStore, store)
   328  			localStore := store.(*storage.LocalStore)
   329  			netStore, err := storage.NewNetStore(localStore, nil)
   330  			if err != nil {
   331  				return nil, nil, err
   332  			}
   333  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   334  			delivery := NewDelivery(kad, netStore)
   335  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
   336  
   337  			r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), nil)
   338  			bucket.Store(bucketKeyRegistry, r)
   339  
   340  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   341  			bucket.Store(bucketKeyFileStore, fileStore)
   342  
   343  			cleanup = func() {
   344  				os.RemoveAll(datadir)
   345  				netStore.Close()
   346  				r.Close()
   347  			}
   348  
   349  			return r, cleanup, nil
   350  
   351  		},
   352  	})
   353  	defer sim.Close()
   354  
   355  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
   356  	defer cancelSimRun()
   357  
   358  	conf := &synctestConfig{}
   359  	//map of discover ID to indexes of chunks expected at that ID
   360  	conf.idToChunksMap = make(map[discover.NodeID][]int)
   361  	//map of overlay address to discover ID
   362  	conf.addrToIDMap = make(map[string]discover.NodeID)
   363  	//array where the generated chunk hashes will be stored
   364  	conf.hashes = make([]storage.Address, 0)
   365  
   366  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   367  	if err != nil {
   368  		return err
   369  	}
   370  
   371  	if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   372  		return err
   373  	}
   374  
   375  	disconnections := sim.PeerEvents(
   376  		context.Background(),
   377  		sim.NodeIDs(),
   378  		simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   379  	)
   380  
   381  	go func() {
   382  		for d := range disconnections {
   383  			log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   384  			t.Fatal("unexpected disconnect")
   385  			cancelSimRun()
   386  		}
   387  	}()
   388  
   389  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   390  		nodeIDs := sim.UpNodeIDs()
   391  		for _, n := range nodeIDs {
   392  			//get the kademlia overlay address from this ID
   393  			a := network.ToOverlayAddr(n.Bytes())
   394  			//append it to the array of all overlay addresses
   395  			conf.addrs = append(conf.addrs, a)
   396  			//the proximity calculation is on overlay addr,
   397  			//the p2p/simulations check func triggers on discover.NodeID,
   398  			//so we need to know which overlay addr maps to which nodeID
   399  			conf.addrToIDMap[string(a)] = n
   400  		}
   401  
   402  		var subscriptionCount int
   403  
   404  		filter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(4)
   405  		eventC := sim.PeerEvents(ctx, nodeIDs, filter)
   406  
   407  		for j, node := range nodeIDs {
   408  			log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j))
   409  			//start syncing!
   410  			item, ok := sim.NodeItem(node, bucketKeyRegistry)
   411  			if !ok {
   412  				return fmt.Errorf("No registry")
   413  			}
   414  			registry := item.(*Registry)
   415  
   416  			var cnt int
   417  			cnt, err = startSyncing(registry, conf)
   418  			if err != nil {
   419  				return err
   420  			}
   421  			//increment the number of subscriptions we need to wait for
   422  			//by the count returned from startSyncing (SYNC subscriptions)
   423  			subscriptionCount += cnt
   424  		}
   425  
   426  		for e := range eventC {
   427  			if e.Error != nil {
   428  				return e.Error
   429  			}
   430  			subscriptionCount--
   431  			if subscriptionCount == 0 {
   432  				break
   433  			}
   434  		}
   435  		//select a random node for upload
   436  		node := sim.RandomUpNode()
   437  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   438  		if !ok {
   439  			return fmt.Errorf("No localstore")
   440  		}
   441  		lstore := item.(*storage.LocalStore)
   442  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   443  		if err != nil {
   444  			return err
   445  		}
   446  		conf.hashes = append(conf.hashes, hashes...)
   447  		mapKeysToNodes(conf)
   448  
   449  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   450  			return err
   451  		}
   452  
   453  		var gDir string
   454  		var globalStore *mockdb.GlobalStore
   455  		if *useMockStore {
   456  			gDir, globalStore, err = createGlobalStore()
   457  			if err != nil {
   458  				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
   459  			}
   460  			defer os.RemoveAll(gDir)
   461  		}
   462  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   463  		// or until the timeout is reached.
   464  		allSuccess := false
   465  		for !allSuccess {
   466  			allSuccess = true
   467  			for _, id := range nodeIDs {
   468  				//for each expected chunk, check if it is in the local store
   469  				localChunks := conf.idToChunksMap[id]
   470  				localSuccess := true
   471  				for _, ch := range localChunks {
   472  					//get the real chunk by the index in the index array
   473  					chunk := conf.hashes[ch]
   474  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   475  					//check if the expected chunk is indeed in the localstore
   476  					var err error
   477  					if *useMockStore {
   478  						//use the globalStore if the mockStore should be used; in that case,
   479  						//the complete localStore stack is bypassed for getting the chunk
   480  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   481  					} else {
   482  						//use the actual localstore
   483  						item, ok := sim.NodeItem(id, bucketKeyStore)
   484  						if !ok {
   485  							return fmt.Errorf("Error accessing localstore")
   486  						}
   487  						lstore := item.(*storage.LocalStore)
   488  						_, err = lstore.Get(ctx, chunk)
   489  					}
   490  					if err != nil {
   491  						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   492  						localSuccess = false
   493  						// Do not get crazy with logging the warn message
   494  						time.Sleep(500 * time.Millisecond)
   495  					} else {
   496  						log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   497  					}
   498  				}
   499  				if !localSuccess {
   500  					allSuccess = false
   501  					break
   502  				}
   503  			}
   504  		}
   505  		if !allSuccess {
   506  			return fmt.Errorf("Not all chunks succeeded!")
   507  		}
   508  		return nil
   509  	})
   510  
   511  	if result.Error != nil {
   512  		return result.Error
   513  	}
   514  
   515  	log.Info("Simulation ended")
   516  	return nil
   517  }
   518  
   519  //the server func to start syncing
   520  //issues `RequestSubscriptionMsg` to peers, based on po, by iterating over
   521  //the kademlia's `EachBin` function.
   522  //returns the number of subscriptions requested
   523  func startSyncing(r *Registry, conf *synctestConfig) (int, error) {
   524  	var err error
   525  	kad := r.delivery.kad
   526  	subCnt := 0
   527  	//iterate over each bin and solicit needed subscription to bins
   528  	kad.EachBin(r.addr.Over(), pof, 0, func(conn *network.Peer, po int) bool {
   529  		//identify begin and start index of the bin(s) we want to subscribe to
   530  
   531  		subCnt++
   532  		err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), NewRange(0, 0), High)
   533  		if err != nil {
   534  			log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
   535  			return false
   536  		}
   537  		return true
   538  
   539  	})
   540  	return subCnt, nil
   541  }
   542  
   543  //map chunk keys to addresses which are responsible
   544  func mapKeysToNodes(conf *synctestConfig) {
   545  	nodemap := make(map[string][]int)
   546  	//build a pot for chunk hashes
   547  	np := pot.NewPot(nil, 0)
   548  	indexmap := make(map[string]int)
   549  	for i, a := range conf.addrs {
   550  		indexmap[string(a)] = i
   551  		np, _, _ = pot.Add(np, a, pof)
   552  	}
   553  
   554  	var kadMinProxSize = 2
   555  
   556  	ppmap := network.NewPeerPotMap(kadMinProxSize, conf.addrs)
   557  
   558  	//for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes
   559  	log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
   560  	for i := 0; i < len(conf.hashes); i++ {
   561  		var a []byte
   562  		np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool {
   563  			// take the first address
   564  			a = val.([]byte)
   565  			return false
   566  		})
   567  
   568  		nns := ppmap[common.Bytes2Hex(a)].NNSet
   569  		nns = append(nns, a)
   570  
   571  		for _, p := range nns {
   572  			nodemap[string(p)] = append(nodemap[string(p)], i)
   573  		}
   574  	}
   575  	for addr, chunks := range nodemap {
   576  		//this selects which chunks are expected to be found with the given node
   577  		conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks
   578  	}
   579  	log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
   580  }
   581  
   582  //upload a file(chunks) to a single local node store
   583  func uploadFileToSingleNodeStore(id discover.NodeID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) {
   584  	log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
   585  	fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
   586  	size := chunkSize
   587  	var rootAddrs []storage.Address
   588  	for i := 0; i < chunkCount; i++ {
   589  		rk, wait, err := fileStore.Store(context.TODO(), io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   590  		if err != nil {
   591  			return nil, err
   592  		}
   593  		err = wait(context.TODO())
   594  		if err != nil {
   595  			return nil, err
   596  		}
   597  		rootAddrs = append(rootAddrs, (rk))
   598  	}
   599  
   600  	return rootAddrs, nil
   601  }