github.com/letterj/go-ethereum@v1.8.22-0.20190204142846-520024dfd689/swarm/network/stream/delivery_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"errors"
    23  	"fmt"
    24  	"sync"
    25  	"sync/atomic"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/node"
    30  	"github.com/ethereum/go-ethereum/p2p"
    31  	"github.com/ethereum/go-ethereum/p2p/enode"
    32  	"github.com/ethereum/go-ethereum/p2p/protocols"
    33  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    34  	p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
    35  	"github.com/ethereum/go-ethereum/swarm/log"
    36  	"github.com/ethereum/go-ethereum/swarm/network"
    37  	pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue"
    38  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    39  	"github.com/ethereum/go-ethereum/swarm/state"
    40  	"github.com/ethereum/go-ethereum/swarm/storage"
    41  	"github.com/ethereum/go-ethereum/swarm/testutil"
    42  )
    43  
    44  //Tests initializing a retrieve request
    45  func TestStreamerRetrieveRequest(t *testing.T) {
    46  	regOpts := &RegistryOptions{
    47  		Retrieval: RetrievalClientOnly,
    48  		Syncing:   SyncingDisabled,
    49  	}
    50  	tester, streamer, _, teardown, err := newStreamerTester(regOpts)
    51  	defer teardown()
    52  	if err != nil {
    53  		t.Fatal(err)
    54  	}
    55  
    56  	node := tester.Nodes[0]
    57  
    58  	ctx := context.Background()
    59  	req := network.NewRequest(
    60  		storage.Address(hash0[:]),
    61  		true,
    62  		&sync.Map{},
    63  	)
    64  	streamer.delivery.RequestFromPeers(ctx, req)
    65  
    66  	stream := NewStream(swarmChunkServerStreamName, "", true)
    67  
    68  	err = tester.TestExchanges(p2ptest.Exchange{
    69  		Label: "RetrieveRequestMsg",
    70  		Expects: []p2ptest.Expect{
    71  			{ //start expecting a subscription for RETRIEVE_REQUEST due to `RetrievalClientOnly`
    72  				Code: 4,
    73  				Msg: &SubscribeMsg{
    74  					Stream:   stream,
    75  					History:  nil,
    76  					Priority: Top,
    77  				},
    78  				Peer: node.ID(),
    79  			},
    80  			{ //expect a retrieve request message for the given hash
    81  				Code: 5,
    82  				Msg: &RetrieveRequestMsg{
    83  					Addr:      hash0[:],
    84  					SkipCheck: true,
    85  				},
    86  				Peer: node.ID(),
    87  			},
    88  		},
    89  	})
    90  
    91  	if err != nil {
    92  		t.Fatalf("Expected no error, got %v", err)
    93  	}
    94  }
    95  
    96  //Test requesting a chunk from a peer then issuing a "empty" OfferedHashesMsg (no hashes available yet)
    97  //Should time out as the peer does not have the chunk (no syncing happened previously)
    98  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
    99  	tester, streamer, _, teardown, err := newStreamerTester(&RegistryOptions{
   100  		Retrieval: RetrievalEnabled,
   101  		Syncing:   SyncingDisabled, //do no syncing
   102  	})
   103  	defer teardown()
   104  	if err != nil {
   105  		t.Fatal(err)
   106  	}
   107  
   108  	node := tester.Nodes[0]
   109  
   110  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
   111  
   112  	peer := streamer.getPeer(node.ID())
   113  
   114  	stream := NewStream(swarmChunkServerStreamName, "", true)
   115  	//simulate pre-subscription to RETRIEVE_REQUEST stream on peer
   116  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   117  		Stream:   stream,
   118  		History:  nil,
   119  		Priority: Top,
   120  	})
   121  
   122  	//test the exchange
   123  	err = tester.TestExchanges(p2ptest.Exchange{
   124  		Expects: []p2ptest.Expect{
   125  			{ //first expect a subscription to the RETRIEVE_REQUEST stream
   126  				Code: 4,
   127  				Msg: &SubscribeMsg{
   128  					Stream:   stream,
   129  					History:  nil,
   130  					Priority: Top,
   131  				},
   132  				Peer: node.ID(),
   133  			},
   134  		},
   135  	}, p2ptest.Exchange{
   136  		Label: "RetrieveRequestMsg",
   137  		Triggers: []p2ptest.Trigger{
   138  			{ //then the actual RETRIEVE_REQUEST....
   139  				Code: 5,
   140  				Msg: &RetrieveRequestMsg{
   141  					Addr: chunk.Address()[:],
   142  				},
   143  				Peer: node.ID(),
   144  			},
   145  		},
   146  		Expects: []p2ptest.Expect{
   147  			{ //to which the peer responds with offered hashes
   148  				Code: 1,
   149  				Msg: &OfferedHashesMsg{
   150  					HandoverProof: nil,
   151  					Hashes:        nil,
   152  					From:          0,
   153  					To:            0,
   154  				},
   155  				Peer: node.ID(),
   156  			},
   157  		},
   158  	})
   159  
   160  	//should fail with a timeout as the peer we are requesting
   161  	//the chunk from does not have the chunk
   162  	expectedError := `exchange #1 "RetrieveRequestMsg": timed out`
   163  	if err == nil || err.Error() != expectedError {
   164  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   165  	}
   166  }
   167  
   168  // upstream request server receives a retrieve Request and responds with
   169  // offered hashes or delivery if skipHash is set to true
   170  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   171  	tester, streamer, localStore, teardown, err := newStreamerTester(&RegistryOptions{
   172  		Retrieval: RetrievalEnabled,
   173  		Syncing:   SyncingDisabled,
   174  	})
   175  	defer teardown()
   176  	if err != nil {
   177  		t.Fatal(err)
   178  	}
   179  
   180  	node := tester.Nodes[0]
   181  
   182  	peer := streamer.getPeer(node.ID())
   183  
   184  	stream := NewStream(swarmChunkServerStreamName, "", true)
   185  
   186  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   187  		Stream:   stream,
   188  		History:  nil,
   189  		Priority: Top,
   190  	})
   191  
   192  	hash := storage.Address(hash0[:])
   193  	chunk := storage.NewChunk(hash, hash)
   194  	err = localStore.Put(context.TODO(), chunk)
   195  	if err != nil {
   196  		t.Fatalf("Expected no err got %v", err)
   197  	}
   198  
   199  	err = tester.TestExchanges(p2ptest.Exchange{
   200  		Expects: []p2ptest.Expect{
   201  			{
   202  				Code: 4,
   203  				Msg: &SubscribeMsg{
   204  					Stream:   stream,
   205  					History:  nil,
   206  					Priority: Top,
   207  				},
   208  				Peer: node.ID(),
   209  			},
   210  		},
   211  	}, p2ptest.Exchange{
   212  		Label: "RetrieveRequestMsg",
   213  		Triggers: []p2ptest.Trigger{
   214  			{
   215  				Code: 5,
   216  				Msg: &RetrieveRequestMsg{
   217  					Addr: hash,
   218  				},
   219  				Peer: node.ID(),
   220  			},
   221  		},
   222  		Expects: []p2ptest.Expect{
   223  			{
   224  				Code: 1,
   225  				Msg: &OfferedHashesMsg{
   226  					HandoverProof: &HandoverProof{
   227  						Handover: &Handover{},
   228  					},
   229  					Hashes: hash,
   230  					From:   0,
   231  					// TODO: why is this 32???
   232  					To:     32,
   233  					Stream: stream,
   234  				},
   235  				Peer: node.ID(),
   236  			},
   237  		},
   238  	})
   239  
   240  	if err != nil {
   241  		t.Fatal(err)
   242  	}
   243  
   244  	hash = storage.Address(hash1[:])
   245  	chunk = storage.NewChunk(hash, hash1[:])
   246  	err = localStore.Put(context.TODO(), chunk)
   247  	if err != nil {
   248  		t.Fatalf("Expected no err got %v", err)
   249  	}
   250  
   251  	err = tester.TestExchanges(p2ptest.Exchange{
   252  		Label: "RetrieveRequestMsg",
   253  		Triggers: []p2ptest.Trigger{
   254  			{
   255  				Code: 5,
   256  				Msg: &RetrieveRequestMsg{
   257  					Addr:      hash,
   258  					SkipCheck: true,
   259  				},
   260  				Peer: node.ID(),
   261  			},
   262  		},
   263  		Expects: []p2ptest.Expect{
   264  			{
   265  				Code: 6,
   266  				Msg: &ChunkDeliveryMsg{
   267  					Addr:  hash,
   268  					SData: hash,
   269  				},
   270  				Peer: node.ID(),
   271  			},
   272  		},
   273  	})
   274  
   275  	if err != nil {
   276  		t.Fatal(err)
   277  	}
   278  }
   279  
   280  // if there is one peer in the Kademlia, RequestFromPeers should return it
   281  func TestRequestFromPeers(t *testing.T) {
   282  	dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
   283  
   284  	addr := network.RandomAddr()
   285  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
   286  	delivery := NewDelivery(to, nil)
   287  	protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
   288  	peer := network.NewPeer(&network.BzzPeer{
   289  		BzzAddr:   network.RandomAddr(),
   290  		LightNode: false,
   291  		Peer:      protocolsPeer,
   292  	}, to)
   293  	to.On(peer)
   294  	r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
   295  
   296  	// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
   297  	sp := &Peer{
   298  		Peer:     protocolsPeer,
   299  		pq:       pq.New(int(PriorityQueue), PriorityQueueCap),
   300  		streamer: r,
   301  	}
   302  	r.setPeer(sp)
   303  	req := network.NewRequest(
   304  		storage.Address(hash0[:]),
   305  		true,
   306  		&sync.Map{},
   307  	)
   308  	ctx := context.Background()
   309  	id, _, err := delivery.RequestFromPeers(ctx, req)
   310  
   311  	if err != nil {
   312  		t.Fatal(err)
   313  	}
   314  	if *id != dummyPeerID {
   315  		t.Fatalf("Expected an id, got %v", id)
   316  	}
   317  }
   318  
   319  // RequestFromPeers should not return light nodes
   320  func TestRequestFromPeersWithLightNode(t *testing.T) {
   321  	dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
   322  
   323  	addr := network.RandomAddr()
   324  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
   325  	delivery := NewDelivery(to, nil)
   326  
   327  	protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
   328  	// setting up a lightnode
   329  	peer := network.NewPeer(&network.BzzPeer{
   330  		BzzAddr:   network.RandomAddr(),
   331  		LightNode: true,
   332  		Peer:      protocolsPeer,
   333  	}, to)
   334  	to.On(peer)
   335  	r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
   336  	// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
   337  	sp := &Peer{
   338  		Peer:     protocolsPeer,
   339  		pq:       pq.New(int(PriorityQueue), PriorityQueueCap),
   340  		streamer: r,
   341  	}
   342  	r.setPeer(sp)
   343  
   344  	req := network.NewRequest(
   345  		storage.Address(hash0[:]),
   346  		true,
   347  		&sync.Map{},
   348  	)
   349  
   350  	ctx := context.Background()
   351  	// making a request which should return with "no peer found"
   352  	_, _, err := delivery.RequestFromPeers(ctx, req)
   353  
   354  	expectedError := "no peer found"
   355  	if err.Error() != expectedError {
   356  		t.Fatalf("expected '%v', got %v", expectedError, err)
   357  	}
   358  }
   359  
   360  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   361  	tester, streamer, localStore, teardown, err := newStreamerTester(&RegistryOptions{
   362  		Retrieval: RetrievalDisabled,
   363  		Syncing:   SyncingDisabled,
   364  	})
   365  	defer teardown()
   366  	if err != nil {
   367  		t.Fatal(err)
   368  	}
   369  
   370  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   371  		return &testClient{
   372  			t: t,
   373  		}, nil
   374  	})
   375  
   376  	node := tester.Nodes[0]
   377  
   378  	//subscribe to custom stream
   379  	stream := NewStream("foo", "", true)
   380  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   381  	if err != nil {
   382  		t.Fatalf("Expected no error, got %v", err)
   383  	}
   384  
   385  	chunkKey := hash0[:]
   386  	chunkData := hash1[:]
   387  
   388  	err = tester.TestExchanges(p2ptest.Exchange{
   389  		Label: "Subscribe message",
   390  		Expects: []p2ptest.Expect{
   391  			{ //first expect subscription to the custom stream...
   392  				Code: 4,
   393  				Msg: &SubscribeMsg{
   394  					Stream:   stream,
   395  					History:  NewRange(5, 8),
   396  					Priority: Top,
   397  				},
   398  				Peer: node.ID(),
   399  			},
   400  		},
   401  	},
   402  		p2ptest.Exchange{
   403  			Label: "ChunkDelivery message",
   404  			Triggers: []p2ptest.Trigger{
   405  				{ //...then trigger a chunk delivery for the given chunk from peer in order for
   406  					//local node to get the chunk delivered
   407  					Code: 6,
   408  					Msg: &ChunkDeliveryMsg{
   409  						Addr:  chunkKey,
   410  						SData: chunkData,
   411  					},
   412  					Peer: node.ID(),
   413  				},
   414  			},
   415  		})
   416  
   417  	if err != nil {
   418  		t.Fatalf("Expected no error, got %v", err)
   419  	}
   420  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
   421  	defer cancel()
   422  
   423  	// wait for the chunk to get stored
   424  	storedChunk, err := localStore.Get(ctx, chunkKey)
   425  	for err != nil {
   426  		select {
   427  		case <-ctx.Done():
   428  			t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
   429  		default:
   430  		}
   431  		storedChunk, err = localStore.Get(ctx, chunkKey)
   432  		time.Sleep(50 * time.Millisecond)
   433  	}
   434  
   435  	if err != nil {
   436  		t.Fatalf("Expected no error, got %v", err)
   437  	}
   438  
   439  	if !bytes.Equal(storedChunk.Data(), chunkData) {
   440  		t.Fatal("Retrieved chunk has different data than original")
   441  	}
   442  
   443  }
   444  
   445  func TestDeliveryFromNodes(t *testing.T) {
   446  	testDeliveryFromNodes(t, 2, dataChunkCount, true)
   447  	testDeliveryFromNodes(t, 2, dataChunkCount, false)
   448  	testDeliveryFromNodes(t, 4, dataChunkCount, true)
   449  	testDeliveryFromNodes(t, 4, dataChunkCount, false)
   450  	testDeliveryFromNodes(t, 8, dataChunkCount, true)
   451  	testDeliveryFromNodes(t, 8, dataChunkCount, false)
   452  	testDeliveryFromNodes(t, 16, dataChunkCount, true)
   453  	testDeliveryFromNodes(t, 16, dataChunkCount, false)
   454  }
   455  
   456  func testDeliveryFromNodes(t *testing.T, nodes, chunkCount int, skipCheck bool) {
   457  	sim := simulation.New(map[string]simulation.ServiceFunc{
   458  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   459  			addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
   460  			if err != nil {
   461  				return nil, nil, err
   462  			}
   463  
   464  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   465  				SkipCheck: skipCheck,
   466  				Syncing:   SyncingDisabled,
   467  				Retrieval: RetrievalEnabled,
   468  			}, nil)
   469  			bucket.Store(bucketKeyRegistry, r)
   470  
   471  			cleanup = func() {
   472  				r.Close()
   473  				clean()
   474  			}
   475  
   476  			return r, cleanup, nil
   477  		},
   478  	})
   479  	defer sim.Close()
   480  
   481  	log.Info("Adding nodes to simulation")
   482  	_, err := sim.AddNodesAndConnectChain(nodes)
   483  	if err != nil {
   484  		t.Fatal(err)
   485  	}
   486  
   487  	log.Info("Starting simulation")
   488  	ctx := context.Background()
   489  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
   490  		nodeIDs := sim.UpNodeIDs()
   491  		//determine the pivot node to be the first node of the simulation
   492  		pivot := nodeIDs[0]
   493  
   494  		//distribute chunks of a random file into Stores of nodes 1 to nodes
   495  		//we will do this by creating a file store with an underlying round-robin store:
   496  		//the file store will create a hash for the uploaded file, but every chunk will be
   497  		//distributed to different nodes via round-robin scheduling
   498  		log.Debug("Writing file to round-robin file store")
   499  		//to do this, we create an array for chunkstores (length minus one, the pivot node)
   500  		stores := make([]storage.ChunkStore, len(nodeIDs)-1)
   501  		//we then need to get all stores from the sim....
   502  		lStores := sim.NodesItems(bucketKeyStore)
   503  		i := 0
   504  		//...iterate the buckets...
   505  		for id, bucketVal := range lStores {
   506  			//...and remove the one which is the pivot node
   507  			if id == pivot {
   508  				continue
   509  			}
   510  			//the other ones are added to the array...
   511  			stores[i] = bucketVal.(storage.ChunkStore)
   512  			i++
   513  		}
   514  		//...which then gets passed to the round-robin file store
   515  		roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
   516  		//now we can actually upload a (random) file to the round-robin store
   517  		size := chunkCount * chunkSize
   518  		log.Debug("Storing data to file store")
   519  		fileHash, wait, err := roundRobinFileStore.Store(ctx, testutil.RandomReader(1, size), int64(size), false)
   520  		// wait until all chunks stored
   521  		if err != nil {
   522  			return err
   523  		}
   524  		err = wait(ctx)
   525  		if err != nil {
   526  			return err
   527  		}
   528  
   529  		log.Debug("Waiting for kademlia")
   530  		// TODO this does not seem to be correct usage of the function, as the simulation may have no kademlias
   531  		if _, err := sim.WaitTillHealthy(ctx); err != nil {
   532  			return err
   533  		}
   534  
   535  		//get the pivot node's filestore
   536  		item, ok := sim.NodeItem(pivot, bucketKeyFileStore)
   537  		if !ok {
   538  			return fmt.Errorf("No filestore")
   539  		}
   540  		pivotFileStore := item.(*storage.FileStore)
   541  		log.Debug("Starting retrieval routine")
   542  		retErrC := make(chan error)
   543  		go func() {
   544  			// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
   545  			// we must wait for the peer connections to have started before requesting
   546  			n, err := readAll(pivotFileStore, fileHash)
   547  			log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   548  			retErrC <- err
   549  		}()
   550  
   551  		log.Debug("Watching for disconnections")
   552  		disconnections := sim.PeerEvents(
   553  			context.Background(),
   554  			sim.NodeIDs(),
   555  			simulation.NewPeerEventsFilter().Drop(),
   556  		)
   557  
   558  		var disconnected atomic.Value
   559  		go func() {
   560  			for d := range disconnections {
   561  				if d.Error != nil {
   562  					log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
   563  					disconnected.Store(true)
   564  				}
   565  			}
   566  		}()
   567  		defer func() {
   568  			if err != nil {
   569  				if yes, ok := disconnected.Load().(bool); ok && yes {
   570  					err = errors.New("disconnect events received")
   571  				}
   572  			}
   573  		}()
   574  
   575  		//finally check that the pivot node gets all chunks via the root hash
   576  		log.Debug("Check retrieval")
   577  		success := true
   578  		var total int64
   579  		total, err = readAll(pivotFileStore, fileHash)
   580  		if err != nil {
   581  			return err
   582  		}
   583  		log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   584  		if err != nil || total != int64(size) {
   585  			success = false
   586  		}
   587  
   588  		if !success {
   589  			return fmt.Errorf("Test failed, chunks not available on all nodes")
   590  		}
   591  		if err := <-retErrC; err != nil {
   592  			t.Fatalf("requesting chunks: %v", err)
   593  		}
   594  		log.Debug("Test terminated successfully")
   595  		return nil
   596  	})
   597  	if result.Error != nil {
   598  		t.Fatal(result.Error)
   599  	}
   600  }
   601  
   602  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   603  	for chunks := 32; chunks <= 128; chunks *= 2 {
   604  		for i := 2; i < 32; i *= 2 {
   605  			b.Run(
   606  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   607  				func(b *testing.B) {
   608  					benchmarkDeliveryFromNodes(b, i, chunks, true)
   609  				},
   610  			)
   611  		}
   612  	}
   613  }
   614  
   615  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   616  	for chunks := 32; chunks <= 128; chunks *= 2 {
   617  		for i := 2; i < 32; i *= 2 {
   618  			b.Run(
   619  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   620  				func(b *testing.B) {
   621  					benchmarkDeliveryFromNodes(b, i, chunks, false)
   622  				},
   623  			)
   624  		}
   625  	}
   626  }
   627  
   628  func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck bool) {
   629  	sim := simulation.New(map[string]simulation.ServiceFunc{
   630  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   631  			addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
   632  			if err != nil {
   633  				return nil, nil, err
   634  			}
   635  
   636  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   637  				SkipCheck:       skipCheck,
   638  				Syncing:         SyncingDisabled,
   639  				Retrieval:       RetrievalDisabled,
   640  				SyncUpdateDelay: 0,
   641  			}, nil)
   642  			bucket.Store(bucketKeyRegistry, r)
   643  
   644  			cleanup = func() {
   645  				r.Close()
   646  				clean()
   647  			}
   648  
   649  			return r, cleanup, nil
   650  		},
   651  	})
   652  	defer sim.Close()
   653  
   654  	log.Info("Initializing test config")
   655  	_, err := sim.AddNodesAndConnectChain(nodes)
   656  	if err != nil {
   657  		b.Fatal(err)
   658  	}
   659  
   660  	ctx := context.Background()
   661  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
   662  		nodeIDs := sim.UpNodeIDs()
   663  		node := nodeIDs[len(nodeIDs)-1]
   664  
   665  		item, ok := sim.NodeItem(node, bucketKeyFileStore)
   666  		if !ok {
   667  			b.Fatal("No filestore")
   668  		}
   669  		remoteFileStore := item.(*storage.FileStore)
   670  
   671  		pivotNode := nodeIDs[0]
   672  		item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
   673  		if !ok {
   674  			b.Fatal("No filestore")
   675  		}
   676  		netStore := item.(*storage.NetStore)
   677  
   678  		if _, err := sim.WaitTillHealthy(ctx); err != nil {
   679  			return err
   680  		}
   681  
   682  		disconnections := sim.PeerEvents(
   683  			context.Background(),
   684  			sim.NodeIDs(),
   685  			simulation.NewPeerEventsFilter().Drop(),
   686  		)
   687  
   688  		var disconnected atomic.Value
   689  		go func() {
   690  			for d := range disconnections {
   691  				if d.Error != nil {
   692  					log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
   693  					disconnected.Store(true)
   694  				}
   695  			}
   696  		}()
   697  		defer func() {
   698  			if err != nil {
   699  				if yes, ok := disconnected.Load().(bool); ok && yes {
   700  					err = errors.New("disconnect events received")
   701  				}
   702  			}
   703  		}()
   704  		// benchmark loop
   705  		b.ResetTimer()
   706  		b.StopTimer()
   707  	Loop:
   708  		for i := 0; i < b.N; i++ {
   709  			// uploading chunkCount random chunks to the last node
   710  			hashes := make([]storage.Address, chunkCount)
   711  			for i := 0; i < chunkCount; i++ {
   712  				// create actual size real chunks
   713  				ctx := context.TODO()
   714  				hash, wait, err := remoteFileStore.Store(ctx, testutil.RandomReader(i, chunkSize), int64(chunkSize), false)
   715  				if err != nil {
   716  					b.Fatalf("expected no error. got %v", err)
   717  				}
   718  				// wait until all chunks stored
   719  				err = wait(ctx)
   720  				if err != nil {
   721  					b.Fatalf("expected no error. got %v", err)
   722  				}
   723  				// collect the hashes
   724  				hashes[i] = hash
   725  			}
   726  			// now benchmark the actual retrieval
   727  			// netstore.Get is called for each hash in a go routine and errors are collected
   728  			b.StartTimer()
   729  			errs := make(chan error)
   730  			for _, hash := range hashes {
   731  				go func(h storage.Address) {
   732  					_, err := netStore.Get(ctx, h)
   733  					log.Warn("test check netstore get", "hash", h, "err", err)
   734  					errs <- err
   735  				}(hash)
   736  			}
   737  			// count and report retrieval errors
   738  			// if there are misses then chunk timeout is too low for the distance and volume (?)
   739  			var total, misses int
   740  			for err := range errs {
   741  				if err != nil {
   742  					log.Warn(err.Error())
   743  					misses++
   744  				}
   745  				total++
   746  				if total == chunkCount {
   747  					break
   748  				}
   749  			}
   750  			b.StopTimer()
   751  
   752  			if misses > 0 {
   753  				err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   754  				break Loop
   755  			}
   756  		}
   757  		if err != nil {
   758  			b.Fatal(err)
   759  		}
   760  		return nil
   761  	})
   762  	if result.Error != nil {
   763  		b.Fatal(result.Error)
   764  	}
   765  
   766  }