github.com/pslzym/go-ethereum@v1.8.17-0.20180926104442-4b6824e07b1b/swarm/network/stream/snapshot_sync_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  package stream
    17  
    18  import (
    19  	"context"
    20  	crand "crypto/rand"
    21  	"fmt"
    22  	"io"
    23  	"os"
    24  	"runtime"
    25  	"sync"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/log"
    31  	"github.com/ethereum/go-ethereum/node"
    32  	"github.com/ethereum/go-ethereum/p2p"
    33  	"github.com/ethereum/go-ethereum/p2p/enode"
    34  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    35  	"github.com/ethereum/go-ethereum/swarm/network"
    36  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    37  	"github.com/ethereum/go-ethereum/swarm/pot"
    38  	"github.com/ethereum/go-ethereum/swarm/state"
    39  	"github.com/ethereum/go-ethereum/swarm/storage"
    40  	mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
    41  )
    42  
    43  const MaxTimeout = 600
    44  
    45  type synctestConfig struct {
    46  	addrs         [][]byte
    47  	hashes        []storage.Address
    48  	idToChunksMap map[enode.ID][]int
    49  	//chunksToNodesMap map[string][]int
    50  	addrToIDMap map[string]enode.ID
    51  }
    52  
    53  // Tests in this file should not request chunks from peers.
    54  // This function will panic indicating that there is a problem if request has been made.
    55  func dummyRequestFromPeers(_ context.Context, req *network.Request) (*enode.ID, chan struct{}, error) {
    56  	panic(fmt.Sprintf("unexpected request: address %s, source %s", req.Addr.String(), req.Source.String()))
    57  }
    58  
    59  //This test is a syncing test for nodes.
    60  //One node is randomly selected to be the pivot node.
    61  //A configurable number of chunks and nodes can be
    62  //provided to the test, the number of chunks is uploaded
    63  //to the pivot node, and we check that nodes get the chunks
    64  //they are expected to store based on the syncing protocol.
    65  //Number of chunks and nodes can be provided via commandline too.
    66  func TestSyncingViaGlobalSync(t *testing.T) {
    67  	if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
    68  		t.Skip("Flaky on mac on travis")
    69  	}
    70  	//if nodes/chunks have been provided via commandline,
    71  	//run the tests with these values
    72  	if *nodes != 0 && *chunks != 0 {
    73  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
    74  		testSyncingViaGlobalSync(t, *chunks, *nodes)
    75  	} else {
    76  		var nodeCnt []int
    77  		var chnkCnt []int
    78  		//if the `longrunning` flag has been provided
    79  		//run more test combinations
    80  		if *longrunning {
    81  			chnkCnt = []int{1, 8, 32, 256, 1024}
    82  			nodeCnt = []int{16, 32, 64, 128, 256}
    83  		} else {
    84  			//default test
    85  			chnkCnt = []int{4, 32}
    86  			nodeCnt = []int{32, 16}
    87  		}
    88  		for _, chnk := range chnkCnt {
    89  			for _, n := range nodeCnt {
    90  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
    91  				testSyncingViaGlobalSync(t, chnk, n)
    92  			}
    93  		}
    94  	}
    95  }
    96  
    97  func TestSyncingViaDirectSubscribe(t *testing.T) {
    98  	if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
    99  		t.Skip("Flaky on mac on travis")
   100  	}
   101  	//if nodes/chunks have been provided via commandline,
   102  	//run the tests with these values
   103  	if *nodes != 0 && *chunks != 0 {
   104  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
   105  		err := testSyncingViaDirectSubscribe(t, *chunks, *nodes)
   106  		if err != nil {
   107  			t.Fatal(err)
   108  		}
   109  	} else {
   110  		var nodeCnt []int
   111  		var chnkCnt []int
   112  		//if the `longrunning` flag has been provided
   113  		//run more test combinations
   114  		if *longrunning {
   115  			chnkCnt = []int{1, 8, 32, 256, 1024}
   116  			nodeCnt = []int{32, 16}
   117  		} else {
   118  			//default test
   119  			chnkCnt = []int{4, 32}
   120  			nodeCnt = []int{32, 16}
   121  		}
   122  		for _, chnk := range chnkCnt {
   123  			for _, n := range nodeCnt {
   124  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
   125  				err := testSyncingViaDirectSubscribe(t, chnk, n)
   126  				if err != nil {
   127  					t.Fatal(err)
   128  				}
   129  			}
   130  		}
   131  	}
   132  }
   133  
   134  func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
   135  	sim := simulation.New(map[string]simulation.ServiceFunc{
   136  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   137  			n := ctx.Config.Node()
   138  			addr := network.NewAddr(n)
   139  			store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
   140  			if err != nil {
   141  				return nil, nil, err
   142  			}
   143  			bucket.Store(bucketKeyStore, store)
   144  			localStore := store.(*storage.LocalStore)
   145  			netStore, err := storage.NewNetStore(localStore, nil)
   146  			if err != nil {
   147  				return nil, nil, err
   148  			}
   149  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   150  			delivery := NewDelivery(kad, netStore)
   151  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
   152  
   153  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   154  				DoSync:          true,
   155  				SyncUpdateDelay: 3 * time.Second,
   156  			})
   157  			bucket.Store(bucketKeyRegistry, r)
   158  
   159  			cleanup = func() {
   160  				os.RemoveAll(datadir)
   161  				netStore.Close()
   162  				r.Close()
   163  			}
   164  
   165  			return r, cleanup, nil
   166  
   167  		},
   168  	})
   169  	defer sim.Close()
   170  
   171  	log.Info("Initializing test config")
   172  
   173  	conf := &synctestConfig{}
   174  	//map of discover ID to indexes of chunks expected at that ID
   175  	conf.idToChunksMap = make(map[enode.ID][]int)
   176  	//map of overlay address to discover ID
   177  	conf.addrToIDMap = make(map[string]enode.ID)
   178  	//array where the generated chunk hashes will be stored
   179  	conf.hashes = make([]storage.Address, 0)
   180  
   181  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   182  	if err != nil {
   183  		t.Fatal(err)
   184  	}
   185  
   186  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
   187  	defer cancelSimRun()
   188  
   189  	if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   190  		t.Fatal(err)
   191  	}
   192  
   193  	disconnections := sim.PeerEvents(
   194  		context.Background(),
   195  		sim.NodeIDs(),
   196  		simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   197  	)
   198  
   199  	go func() {
   200  		for d := range disconnections {
   201  			log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   202  			t.Fatal("unexpected disconnect")
   203  			cancelSimRun()
   204  		}
   205  	}()
   206  
   207  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   208  		nodeIDs := sim.UpNodeIDs()
   209  		for _, n := range nodeIDs {
   210  			//get the kademlia overlay address from this ID
   211  			a := n.Bytes()
   212  			//append it to the array of all overlay addresses
   213  			conf.addrs = append(conf.addrs, a)
   214  			//the proximity calculation is on overlay addr,
   215  			//the p2p/simulations check func triggers on enode.ID,
   216  			//so we need to know which overlay addr maps to which nodeID
   217  			conf.addrToIDMap[string(a)] = n
   218  		}
   219  
   220  		//get the node at that index
   221  		//this is the node selected for upload
   222  		node := sim.RandomUpNode()
   223  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   224  		if !ok {
   225  			return fmt.Errorf("No localstore")
   226  		}
   227  		lstore := item.(*storage.LocalStore)
   228  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   229  		if err != nil {
   230  			return err
   231  		}
   232  		conf.hashes = append(conf.hashes, hashes...)
   233  		mapKeysToNodes(conf)
   234  
   235  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   236  		// or until the timeout is reached.
   237  		allSuccess := false
   238  		var gDir string
   239  		var globalStore *mockdb.GlobalStore
   240  		if *useMockStore {
   241  			gDir, globalStore, err = createGlobalStore()
   242  			if err != nil {
   243  				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
   244  			}
   245  			defer func() {
   246  				os.RemoveAll(gDir)
   247  				err := globalStore.Close()
   248  				if err != nil {
   249  					log.Error("Error closing global store! %v", "err", err)
   250  				}
   251  			}()
   252  		}
   253  		for !allSuccess {
   254  			allSuccess = true
   255  			for _, id := range nodeIDs {
   256  				//for each expected chunk, check if it is in the local store
   257  				localChunks := conf.idToChunksMap[id]
   258  				localSuccess := true
   259  				for _, ch := range localChunks {
   260  					//get the real chunk by the index in the index array
   261  					chunk := conf.hashes[ch]
   262  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   263  					//check if the expected chunk is indeed in the localstore
   264  					var err error
   265  					if *useMockStore {
   266  						//use the globalStore if the mockStore should be used; in that case,
   267  						//the complete localStore stack is bypassed for getting the chunk
   268  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   269  					} else {
   270  						//use the actual localstore
   271  						item, ok := sim.NodeItem(id, bucketKeyStore)
   272  						if !ok {
   273  							return fmt.Errorf("Error accessing localstore")
   274  						}
   275  						lstore := item.(*storage.LocalStore)
   276  						_, err = lstore.Get(ctx, chunk)
   277  					}
   278  					if err != nil {
   279  						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   280  						localSuccess = false
   281  						// Do not get crazy with logging the warn message
   282  						time.Sleep(500 * time.Millisecond)
   283  					} else {
   284  						log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   285  					}
   286  				}
   287  				if !localSuccess {
   288  					allSuccess = false
   289  					break
   290  				}
   291  			}
   292  		}
   293  		if !allSuccess {
   294  			return fmt.Errorf("Not all chunks succeeded!")
   295  		}
   296  		return nil
   297  	})
   298  
   299  	if result.Error != nil {
   300  		t.Fatal(result.Error)
   301  	}
   302  	log.Info("Simulation ended")
   303  }
   304  
   305  /*
   306  The test generates the given number of chunks
   307  
   308  For every chunk generated, the nearest node addresses
   309  are identified, we verify that the nodes closer to the
   310  chunk addresses actually do have the chunks in their local stores.
   311  
   312  The test loads a snapshot file to construct the swarm network,
   313  assuming that the snapshot file identifies a healthy
   314  kademlia network. The snapshot should have 'streamer' in its service list.
   315  */
   316  func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int) error {
   317  	sim := simulation.New(map[string]simulation.ServiceFunc{
   318  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   319  			n := ctx.Config.Node()
   320  			addr := network.NewAddr(n)
   321  			store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
   322  			if err != nil {
   323  				return nil, nil, err
   324  			}
   325  			bucket.Store(bucketKeyStore, store)
   326  			localStore := store.(*storage.LocalStore)
   327  			netStore, err := storage.NewNetStore(localStore, nil)
   328  			if err != nil {
   329  				return nil, nil, err
   330  			}
   331  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   332  			delivery := NewDelivery(kad, netStore)
   333  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
   334  
   335  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), nil)
   336  			bucket.Store(bucketKeyRegistry, r)
   337  
   338  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   339  			bucket.Store(bucketKeyFileStore, fileStore)
   340  
   341  			cleanup = func() {
   342  				os.RemoveAll(datadir)
   343  				netStore.Close()
   344  				r.Close()
   345  			}
   346  
   347  			return r, cleanup, nil
   348  
   349  		},
   350  	})
   351  	defer sim.Close()
   352  
   353  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
   354  	defer cancelSimRun()
   355  
   356  	conf := &synctestConfig{}
   357  	//map of discover ID to indexes of chunks expected at that ID
   358  	conf.idToChunksMap = make(map[enode.ID][]int)
   359  	//map of overlay address to discover ID
   360  	conf.addrToIDMap = make(map[string]enode.ID)
   361  	//array where the generated chunk hashes will be stored
   362  	conf.hashes = make([]storage.Address, 0)
   363  
   364  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   365  	if err != nil {
   366  		return err
   367  	}
   368  
   369  	if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   370  		return err
   371  	}
   372  
   373  	disconnections := sim.PeerEvents(
   374  		context.Background(),
   375  		sim.NodeIDs(),
   376  		simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   377  	)
   378  
   379  	go func() {
   380  		for d := range disconnections {
   381  			log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   382  			t.Fatal("unexpected disconnect")
   383  			cancelSimRun()
   384  		}
   385  	}()
   386  
   387  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   388  		nodeIDs := sim.UpNodeIDs()
   389  		for _, n := range nodeIDs {
   390  			//get the kademlia overlay address from this ID
   391  			a := n.Bytes()
   392  			//append it to the array of all overlay addresses
   393  			conf.addrs = append(conf.addrs, a)
   394  			//the proximity calculation is on overlay addr,
   395  			//the p2p/simulations check func triggers on enode.ID,
   396  			//so we need to know which overlay addr maps to which nodeID
   397  			conf.addrToIDMap[string(a)] = n
   398  		}
   399  
   400  		var subscriptionCount int
   401  
   402  		filter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(4)
   403  		eventC := sim.PeerEvents(ctx, nodeIDs, filter)
   404  
   405  		for j, node := range nodeIDs {
   406  			log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j))
   407  			//start syncing!
   408  			item, ok := sim.NodeItem(node, bucketKeyRegistry)
   409  			if !ok {
   410  				return fmt.Errorf("No registry")
   411  			}
   412  			registry := item.(*Registry)
   413  
   414  			var cnt int
   415  			cnt, err = startSyncing(registry, conf)
   416  			if err != nil {
   417  				return err
   418  			}
   419  			//increment the number of subscriptions we need to wait for
   420  			//by the count returned from startSyncing (SYNC subscriptions)
   421  			subscriptionCount += cnt
   422  		}
   423  
   424  		for e := range eventC {
   425  			if e.Error != nil {
   426  				return e.Error
   427  			}
   428  			subscriptionCount--
   429  			if subscriptionCount == 0 {
   430  				break
   431  			}
   432  		}
   433  		//select a random node for upload
   434  		node := sim.RandomUpNode()
   435  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   436  		if !ok {
   437  			return fmt.Errorf("No localstore")
   438  		}
   439  		lstore := item.(*storage.LocalStore)
   440  		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   441  		if err != nil {
   442  			return err
   443  		}
   444  		conf.hashes = append(conf.hashes, hashes...)
   445  		mapKeysToNodes(conf)
   446  
   447  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   448  			return err
   449  		}
   450  
   451  		var gDir string
   452  		var globalStore *mockdb.GlobalStore
   453  		if *useMockStore {
   454  			gDir, globalStore, err = createGlobalStore()
   455  			if err != nil {
   456  				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
   457  			}
   458  			defer os.RemoveAll(gDir)
   459  		}
   460  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   461  		// or until the timeout is reached.
   462  		allSuccess := false
   463  		for !allSuccess {
   464  			allSuccess = true
   465  			for _, id := range nodeIDs {
   466  				//for each expected chunk, check if it is in the local store
   467  				localChunks := conf.idToChunksMap[id]
   468  				localSuccess := true
   469  				for _, ch := range localChunks {
   470  					//get the real chunk by the index in the index array
   471  					chunk := conf.hashes[ch]
   472  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   473  					//check if the expected chunk is indeed in the localstore
   474  					var err error
   475  					if *useMockStore {
   476  						//use the globalStore if the mockStore should be used; in that case,
   477  						//the complete localStore stack is bypassed for getting the chunk
   478  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   479  					} else {
   480  						//use the actual localstore
   481  						item, ok := sim.NodeItem(id, bucketKeyStore)
   482  						if !ok {
   483  							return fmt.Errorf("Error accessing localstore")
   484  						}
   485  						lstore := item.(*storage.LocalStore)
   486  						_, err = lstore.Get(ctx, chunk)
   487  					}
   488  					if err != nil {
   489  						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   490  						localSuccess = false
   491  						// Do not get crazy with logging the warn message
   492  						time.Sleep(500 * time.Millisecond)
   493  					} else {
   494  						log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   495  					}
   496  				}
   497  				if !localSuccess {
   498  					allSuccess = false
   499  					break
   500  				}
   501  			}
   502  		}
   503  		if !allSuccess {
   504  			return fmt.Errorf("Not all chunks succeeded!")
   505  		}
   506  		return nil
   507  	})
   508  
   509  	if result.Error != nil {
   510  		return result.Error
   511  	}
   512  
   513  	log.Info("Simulation ended")
   514  	return nil
   515  }
   516  
   517  //the server func to start syncing
   518  //issues `RequestSubscriptionMsg` to peers, based on po, by iterating over
   519  //the kademlia's `EachBin` function.
   520  //returns the number of subscriptions requested
   521  func startSyncing(r *Registry, conf *synctestConfig) (int, error) {
   522  	var err error
   523  	kad := r.delivery.kad
   524  	subCnt := 0
   525  	//iterate over each bin and solicit needed subscription to bins
   526  	kad.EachBin(r.addr[:], pof, 0, func(conn *network.Peer, po int) bool {
   527  		//identify begin and start index of the bin(s) we want to subscribe to
   528  		subCnt++
   529  		err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), NewRange(0, 0), High)
   530  		if err != nil {
   531  			log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
   532  			return false
   533  		}
   534  		return true
   535  
   536  	})
   537  	return subCnt, nil
   538  }
   539  
   540  //map chunk keys to addresses which are responsible
   541  func mapKeysToNodes(conf *synctestConfig) {
   542  	nodemap := make(map[string][]int)
   543  	//build a pot for chunk hashes
   544  	np := pot.NewPot(nil, 0)
   545  	indexmap := make(map[string]int)
   546  	for i, a := range conf.addrs {
   547  		indexmap[string(a)] = i
   548  		np, _, _ = pot.Add(np, a, pof)
   549  	}
   550  
   551  	var kadMinProxSize = 2
   552  
   553  	ppmap := network.NewPeerPotMap(kadMinProxSize, conf.addrs)
   554  
   555  	//for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes
   556  	log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
   557  	for i := 0; i < len(conf.hashes); i++ {
   558  		var a []byte
   559  		np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool {
   560  			// take the first address
   561  			a = val.([]byte)
   562  			return false
   563  		})
   564  
   565  		nns := ppmap[common.Bytes2Hex(a)].NNSet
   566  		nns = append(nns, a)
   567  
   568  		for _, p := range nns {
   569  			nodemap[string(p)] = append(nodemap[string(p)], i)
   570  		}
   571  	}
   572  	for addr, chunks := range nodemap {
   573  		//this selects which chunks are expected to be found with the given node
   574  		conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks
   575  	}
   576  	log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
   577  }
   578  
   579  //upload a file(chunks) to a single local node store
   580  func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) {
   581  	log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
   582  	fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
   583  	size := chunkSize
   584  	var rootAddrs []storage.Address
   585  	for i := 0; i < chunkCount; i++ {
   586  		rk, wait, err := fileStore.Store(context.TODO(), io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   587  		if err != nil {
   588  			return nil, err
   589  		}
   590  		err = wait(context.TODO())
   591  		if err != nil {
   592  			return nil, err
   593  		}
   594  		rootAddrs = append(rootAddrs, (rk))
   595  	}
   596  
   597  	return rootAddrs, nil
   598  }