github.com/dotlike13/wemix30_go@v1.8.23/swarm/network/stream/delivery_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"errors"
    23  	"fmt"
    24  	"sync"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/node"
    29  	"github.com/ethereum/go-ethereum/p2p"
    30  	"github.com/ethereum/go-ethereum/p2p/enode"
    31  	"github.com/ethereum/go-ethereum/p2p/protocols"
    32  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    33  	p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
    34  	"github.com/ethereum/go-ethereum/swarm/log"
    35  	"github.com/ethereum/go-ethereum/swarm/network"
    36  	pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue"
    37  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    38  	"github.com/ethereum/go-ethereum/swarm/state"
    39  	"github.com/ethereum/go-ethereum/swarm/storage"
    40  	"github.com/ethereum/go-ethereum/swarm/testutil"
    41  )
    42  
    43  //Tests initializing a retrieve request
    44  func TestStreamerRetrieveRequest(t *testing.T) {
    45  	regOpts := &RegistryOptions{
    46  		Retrieval: RetrievalClientOnly,
    47  		Syncing:   SyncingDisabled,
    48  	}
    49  	tester, streamer, _, teardown, err := newStreamerTester(regOpts)
    50  	if err != nil {
    51  		t.Fatal(err)
    52  	}
    53  	defer teardown()
    54  
    55  	node := tester.Nodes[0]
    56  
    57  	ctx := context.Background()
    58  	req := network.NewRequest(
    59  		storage.Address(hash0[:]),
    60  		true,
    61  		&sync.Map{},
    62  	)
    63  	streamer.delivery.RequestFromPeers(ctx, req)
    64  
    65  	stream := NewStream(swarmChunkServerStreamName, "", true)
    66  
    67  	err = tester.TestExchanges(p2ptest.Exchange{
    68  		Label: "RetrieveRequestMsg",
    69  		Expects: []p2ptest.Expect{
    70  			{ //start expecting a subscription for RETRIEVE_REQUEST due to `RetrievalClientOnly`
    71  				Code: 4,
    72  				Msg: &SubscribeMsg{
    73  					Stream:   stream,
    74  					History:  nil,
    75  					Priority: Top,
    76  				},
    77  				Peer: node.ID(),
    78  			},
    79  			{ //expect a retrieve request message for the given hash
    80  				Code: 5,
    81  				Msg: &RetrieveRequestMsg{
    82  					Addr:      hash0[:],
    83  					SkipCheck: true,
    84  				},
    85  				Peer: node.ID(),
    86  			},
    87  		},
    88  	})
    89  
    90  	if err != nil {
    91  		t.Fatalf("Expected no error, got %v", err)
    92  	}
    93  }
    94  
    95  //Test requesting a chunk from a peer then issuing a "empty" OfferedHashesMsg (no hashes available yet)
    96  //Should time out as the peer does not have the chunk (no syncing happened previously)
    97  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
    98  	tester, streamer, _, teardown, err := newStreamerTester(&RegistryOptions{
    99  		Retrieval: RetrievalEnabled,
   100  		Syncing:   SyncingDisabled, //do no syncing
   101  	})
   102  	if err != nil {
   103  		t.Fatal(err)
   104  	}
   105  	defer teardown()
   106  
   107  	node := tester.Nodes[0]
   108  
   109  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
   110  
   111  	peer := streamer.getPeer(node.ID())
   112  
   113  	stream := NewStream(swarmChunkServerStreamName, "", true)
   114  	//simulate pre-subscription to RETRIEVE_REQUEST stream on peer
   115  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   116  		Stream:   stream,
   117  		History:  nil,
   118  		Priority: Top,
   119  	})
   120  
   121  	//test the exchange
   122  	err = tester.TestExchanges(p2ptest.Exchange{
   123  		Expects: []p2ptest.Expect{
   124  			{ //first expect a subscription to the RETRIEVE_REQUEST stream
   125  				Code: 4,
   126  				Msg: &SubscribeMsg{
   127  					Stream:   stream,
   128  					History:  nil,
   129  					Priority: Top,
   130  				},
   131  				Peer: node.ID(),
   132  			},
   133  		},
   134  	}, p2ptest.Exchange{
   135  		Label: "RetrieveRequestMsg",
   136  		Triggers: []p2ptest.Trigger{
   137  			{ //then the actual RETRIEVE_REQUEST....
   138  				Code: 5,
   139  				Msg: &RetrieveRequestMsg{
   140  					Addr: chunk.Address()[:],
   141  				},
   142  				Peer: node.ID(),
   143  			},
   144  		},
   145  		Expects: []p2ptest.Expect{
   146  			{ //to which the peer responds with offered hashes
   147  				Code: 1,
   148  				Msg: &OfferedHashesMsg{
   149  					HandoverProof: nil,
   150  					Hashes:        nil,
   151  					From:          0,
   152  					To:            0,
   153  				},
   154  				Peer: node.ID(),
   155  			},
   156  		},
   157  	})
   158  
   159  	//should fail with a timeout as the peer we are requesting
   160  	//the chunk from does not have the chunk
   161  	expectedError := `exchange #1 "RetrieveRequestMsg": timed out`
   162  	if err == nil || err.Error() != expectedError {
   163  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   164  	}
   165  }
   166  
   167  // upstream request server receives a retrieve Request and responds with
   168  // offered hashes or delivery if skipHash is set to true
   169  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   170  	tester, streamer, localStore, teardown, err := newStreamerTester(&RegistryOptions{
   171  		Retrieval: RetrievalEnabled,
   172  		Syncing:   SyncingDisabled,
   173  	})
   174  	if err != nil {
   175  		t.Fatal(err)
   176  	}
   177  	defer teardown()
   178  
   179  	node := tester.Nodes[0]
   180  
   181  	peer := streamer.getPeer(node.ID())
   182  
   183  	stream := NewStream(swarmChunkServerStreamName, "", true)
   184  
   185  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   186  		Stream:   stream,
   187  		History:  nil,
   188  		Priority: Top,
   189  	})
   190  
   191  	hash := storage.Address(hash0[:])
   192  	chunk := storage.NewChunk(hash, hash)
   193  	err = localStore.Put(context.TODO(), chunk)
   194  	if err != nil {
   195  		t.Fatalf("Expected no err got %v", err)
   196  	}
   197  
   198  	err = tester.TestExchanges(p2ptest.Exchange{
   199  		Expects: []p2ptest.Expect{
   200  			{
   201  				Code: 4,
   202  				Msg: &SubscribeMsg{
   203  					Stream:   stream,
   204  					History:  nil,
   205  					Priority: Top,
   206  				},
   207  				Peer: node.ID(),
   208  			},
   209  		},
   210  	}, p2ptest.Exchange{
   211  		Label: "RetrieveRequestMsg",
   212  		Triggers: []p2ptest.Trigger{
   213  			{
   214  				Code: 5,
   215  				Msg: &RetrieveRequestMsg{
   216  					Addr: hash,
   217  				},
   218  				Peer: node.ID(),
   219  			},
   220  		},
   221  		Expects: []p2ptest.Expect{
   222  			{
   223  				Code: 1,
   224  				Msg: &OfferedHashesMsg{
   225  					HandoverProof: &HandoverProof{
   226  						Handover: &Handover{},
   227  					},
   228  					Hashes: hash,
   229  					From:   0,
   230  					// TODO: why is this 32???
   231  					To:     32,
   232  					Stream: stream,
   233  				},
   234  				Peer: node.ID(),
   235  			},
   236  		},
   237  	})
   238  
   239  	if err != nil {
   240  		t.Fatal(err)
   241  	}
   242  
   243  	hash = storage.Address(hash1[:])
   244  	chunk = storage.NewChunk(hash, hash1[:])
   245  	err = localStore.Put(context.TODO(), chunk)
   246  	if err != nil {
   247  		t.Fatalf("Expected no err got %v", err)
   248  	}
   249  
   250  	err = tester.TestExchanges(p2ptest.Exchange{
   251  		Label: "RetrieveRequestMsg",
   252  		Triggers: []p2ptest.Trigger{
   253  			{
   254  				Code: 5,
   255  				Msg: &RetrieveRequestMsg{
   256  					Addr:      hash,
   257  					SkipCheck: true,
   258  				},
   259  				Peer: node.ID(),
   260  			},
   261  		},
   262  		Expects: []p2ptest.Expect{
   263  			{
   264  				Code: 6,
   265  				Msg: &ChunkDeliveryMsg{
   266  					Addr:  hash,
   267  					SData: hash,
   268  				},
   269  				Peer: node.ID(),
   270  			},
   271  		},
   272  	})
   273  
   274  	if err != nil {
   275  		t.Fatal(err)
   276  	}
   277  }
   278  
   279  // if there is one peer in the Kademlia, RequestFromPeers should return it
   280  func TestRequestFromPeers(t *testing.T) {
   281  	dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
   282  
   283  	addr := network.RandomAddr()
   284  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
   285  	delivery := NewDelivery(to, nil)
   286  	protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
   287  	peer := network.NewPeer(&network.BzzPeer{
   288  		BzzAddr:   network.RandomAddr(),
   289  		LightNode: false,
   290  		Peer:      protocolsPeer,
   291  	}, to)
   292  	to.On(peer)
   293  	r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
   294  
   295  	// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
   296  	sp := &Peer{
   297  		Peer:     protocolsPeer,
   298  		pq:       pq.New(int(PriorityQueue), PriorityQueueCap),
   299  		streamer: r,
   300  	}
   301  	r.setPeer(sp)
   302  	req := network.NewRequest(
   303  		storage.Address(hash0[:]),
   304  		true,
   305  		&sync.Map{},
   306  	)
   307  	ctx := context.Background()
   308  	id, _, err := delivery.RequestFromPeers(ctx, req)
   309  
   310  	if err != nil {
   311  		t.Fatal(err)
   312  	}
   313  	if *id != dummyPeerID {
   314  		t.Fatalf("Expected an id, got %v", id)
   315  	}
   316  }
   317  
   318  // RequestFromPeers should not return light nodes
   319  func TestRequestFromPeersWithLightNode(t *testing.T) {
   320  	dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
   321  
   322  	addr := network.RandomAddr()
   323  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
   324  	delivery := NewDelivery(to, nil)
   325  
   326  	protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
   327  	// setting up a lightnode
   328  	peer := network.NewPeer(&network.BzzPeer{
   329  		BzzAddr:   network.RandomAddr(),
   330  		LightNode: true,
   331  		Peer:      protocolsPeer,
   332  	}, to)
   333  	to.On(peer)
   334  	r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
   335  	// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
   336  	sp := &Peer{
   337  		Peer:     protocolsPeer,
   338  		pq:       pq.New(int(PriorityQueue), PriorityQueueCap),
   339  		streamer: r,
   340  	}
   341  	r.setPeer(sp)
   342  
   343  	req := network.NewRequest(
   344  		storage.Address(hash0[:]),
   345  		true,
   346  		&sync.Map{},
   347  	)
   348  
   349  	ctx := context.Background()
   350  	// making a request which should return with "no peer found"
   351  	_, _, err := delivery.RequestFromPeers(ctx, req)
   352  
   353  	expectedError := "no peer found"
   354  	if err.Error() != expectedError {
   355  		t.Fatalf("expected '%v', got %v", expectedError, err)
   356  	}
   357  }
   358  
   359  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   360  	tester, streamer, localStore, teardown, err := newStreamerTester(&RegistryOptions{
   361  		Retrieval: RetrievalDisabled,
   362  		Syncing:   SyncingDisabled,
   363  	})
   364  	if err != nil {
   365  		t.Fatal(err)
   366  	}
   367  	defer teardown()
   368  
   369  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   370  		return &testClient{
   371  			t: t,
   372  		}, nil
   373  	})
   374  
   375  	node := tester.Nodes[0]
   376  
   377  	//subscribe to custom stream
   378  	stream := NewStream("foo", "", true)
   379  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   380  	if err != nil {
   381  		t.Fatalf("Expected no error, got %v", err)
   382  	}
   383  
   384  	chunkKey := hash0[:]
   385  	chunkData := hash1[:]
   386  
   387  	err = tester.TestExchanges(p2ptest.Exchange{
   388  		Label: "Subscribe message",
   389  		Expects: []p2ptest.Expect{
   390  			{ //first expect subscription to the custom stream...
   391  				Code: 4,
   392  				Msg: &SubscribeMsg{
   393  					Stream:   stream,
   394  					History:  NewRange(5, 8),
   395  					Priority: Top,
   396  				},
   397  				Peer: node.ID(),
   398  			},
   399  		},
   400  	},
   401  		p2ptest.Exchange{
   402  			Label: "ChunkDelivery message",
   403  			Triggers: []p2ptest.Trigger{
   404  				{ //...then trigger a chunk delivery for the given chunk from peer in order for
   405  					//local node to get the chunk delivered
   406  					Code: 6,
   407  					Msg: &ChunkDeliveryMsg{
   408  						Addr:  chunkKey,
   409  						SData: chunkData,
   410  					},
   411  					Peer: node.ID(),
   412  				},
   413  			},
   414  		})
   415  
   416  	if err != nil {
   417  		t.Fatalf("Expected no error, got %v", err)
   418  	}
   419  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
   420  	defer cancel()
   421  
   422  	// wait for the chunk to get stored
   423  	storedChunk, err := localStore.Get(ctx, chunkKey)
   424  	for err != nil {
   425  		select {
   426  		case <-ctx.Done():
   427  			t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
   428  		default:
   429  		}
   430  		storedChunk, err = localStore.Get(ctx, chunkKey)
   431  		time.Sleep(50 * time.Millisecond)
   432  	}
   433  
   434  	if err != nil {
   435  		t.Fatalf("Expected no error, got %v", err)
   436  	}
   437  
   438  	if !bytes.Equal(storedChunk.Data(), chunkData) {
   439  		t.Fatal("Retrieved chunk has different data than original")
   440  	}
   441  
   442  }
   443  
   444  func TestDeliveryFromNodes(t *testing.T) {
   445  	testDeliveryFromNodes(t, 2, dataChunkCount, true)
   446  	testDeliveryFromNodes(t, 2, dataChunkCount, false)
   447  	testDeliveryFromNodes(t, 4, dataChunkCount, true)
   448  	testDeliveryFromNodes(t, 4, dataChunkCount, false)
   449  	testDeliveryFromNodes(t, 8, dataChunkCount, true)
   450  	testDeliveryFromNodes(t, 8, dataChunkCount, false)
   451  	testDeliveryFromNodes(t, 16, dataChunkCount, true)
   452  	testDeliveryFromNodes(t, 16, dataChunkCount, false)
   453  }
   454  
   455  func testDeliveryFromNodes(t *testing.T, nodes, chunkCount int, skipCheck bool) {
   456  	t.Helper()
   457  	t.Run(fmt.Sprintf("testDeliveryFromNodes_%d_%d_skipCheck_%t", nodes, chunkCount, skipCheck), func(t *testing.T) {
   458  		sim := simulation.New(map[string]simulation.ServiceFunc{
   459  			"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   460  				addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
   461  				if err != nil {
   462  					return nil, nil, err
   463  				}
   464  
   465  				r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   466  					SkipCheck: skipCheck,
   467  					Syncing:   SyncingDisabled,
   468  					Retrieval: RetrievalEnabled,
   469  				}, nil)
   470  				bucket.Store(bucketKeyRegistry, r)
   471  
   472  				cleanup = func() {
   473  					r.Close()
   474  					clean()
   475  				}
   476  
   477  				return r, cleanup, nil
   478  			},
   479  		})
   480  		defer sim.Close()
   481  
   482  		log.Info("Adding nodes to simulation")
   483  		_, err := sim.AddNodesAndConnectChain(nodes)
   484  		if err != nil {
   485  			t.Fatal(err)
   486  		}
   487  
   488  		log.Info("Starting simulation")
   489  		ctx, cancel := context.WithCancel(context.Background())
   490  		defer cancel()
   491  		result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
   492  			nodeIDs := sim.UpNodeIDs()
   493  			//determine the pivot node to be the first node of the simulation
   494  			pivot := nodeIDs[0]
   495  
   496  			//distribute chunks of a random file into Stores of nodes 1 to nodes
   497  			//we will do this by creating a file store with an underlying round-robin store:
   498  			//the file store will create a hash for the uploaded file, but every chunk will be
   499  			//distributed to different nodes via round-robin scheduling
   500  			log.Debug("Writing file to round-robin file store")
   501  			//to do this, we create an array for chunkstores (length minus one, the pivot node)
   502  			stores := make([]storage.ChunkStore, len(nodeIDs)-1)
   503  			//we then need to get all stores from the sim....
   504  			lStores := sim.NodesItems(bucketKeyStore)
   505  			i := 0
   506  			//...iterate the buckets...
   507  			for id, bucketVal := range lStores {
   508  				//...and remove the one which is the pivot node
   509  				if id == pivot {
   510  					continue
   511  				}
   512  				//the other ones are added to the array...
   513  				stores[i] = bucketVal.(storage.ChunkStore)
   514  				i++
   515  			}
   516  			//...which then gets passed to the round-robin file store
   517  			roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
   518  			//now we can actually upload a (random) file to the round-robin store
   519  			size := chunkCount * chunkSize
   520  			log.Debug("Storing data to file store")
   521  			fileHash, wait, err := roundRobinFileStore.Store(ctx, testutil.RandomReader(1, size), int64(size), false)
   522  			// wait until all chunks stored
   523  			if err != nil {
   524  				return err
   525  			}
   526  			err = wait(ctx)
   527  			if err != nil {
   528  				return err
   529  			}
   530  
   531  			log.Debug("Waiting for kademlia")
   532  			// TODO this does not seem to be correct usage of the function, as the simulation may have no kademlias
   533  			if _, err := sim.WaitTillHealthy(ctx); err != nil {
   534  				return err
   535  			}
   536  
   537  			//get the pivot node's filestore
   538  			item, ok := sim.NodeItem(pivot, bucketKeyFileStore)
   539  			if !ok {
   540  				return fmt.Errorf("No filestore")
   541  			}
   542  			pivotFileStore := item.(*storage.FileStore)
   543  			log.Debug("Starting retrieval routine")
   544  			retErrC := make(chan error)
   545  			go func() {
   546  				// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
   547  				// we must wait for the peer connections to have started before requesting
   548  				n, err := readAll(pivotFileStore, fileHash)
   549  				log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   550  				retErrC <- err
   551  			}()
   552  
   553  			disconnected := watchDisconnections(ctx, sim)
   554  			defer func() {
   555  				if err != nil && disconnected.bool() {
   556  					err = errors.New("disconnect events received")
   557  				}
   558  			}()
   559  
   560  			//finally check that the pivot node gets all chunks via the root hash
   561  			log.Debug("Check retrieval")
   562  			success := true
   563  			var total int64
   564  			total, err = readAll(pivotFileStore, fileHash)
   565  			if err != nil {
   566  				return err
   567  			}
   568  			log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   569  			if err != nil || total != int64(size) {
   570  				success = false
   571  			}
   572  
   573  			if !success {
   574  				return fmt.Errorf("Test failed, chunks not available on all nodes")
   575  			}
   576  			if err := <-retErrC; err != nil {
   577  				return fmt.Errorf("requesting chunks: %v", err)
   578  			}
   579  			log.Debug("Test terminated successfully")
   580  			return nil
   581  		})
   582  		if result.Error != nil {
   583  			t.Fatal(result.Error)
   584  		}
   585  	})
   586  }
   587  
   588  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   589  	for chunks := 32; chunks <= 128; chunks *= 2 {
   590  		for i := 2; i < 32; i *= 2 {
   591  			b.Run(
   592  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   593  				func(b *testing.B) {
   594  					benchmarkDeliveryFromNodes(b, i, chunks, true)
   595  				},
   596  			)
   597  		}
   598  	}
   599  }
   600  
   601  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   602  	for chunks := 32; chunks <= 128; chunks *= 2 {
   603  		for i := 2; i < 32; i *= 2 {
   604  			b.Run(
   605  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   606  				func(b *testing.B) {
   607  					benchmarkDeliveryFromNodes(b, i, chunks, false)
   608  				},
   609  			)
   610  		}
   611  	}
   612  }
   613  
   614  func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck bool) {
   615  	sim := simulation.New(map[string]simulation.ServiceFunc{
   616  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   617  			addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
   618  			if err != nil {
   619  				return nil, nil, err
   620  			}
   621  
   622  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   623  				SkipCheck:       skipCheck,
   624  				Syncing:         SyncingDisabled,
   625  				Retrieval:       RetrievalDisabled,
   626  				SyncUpdateDelay: 0,
   627  			}, nil)
   628  			bucket.Store(bucketKeyRegistry, r)
   629  
   630  			cleanup = func() {
   631  				r.Close()
   632  				clean()
   633  			}
   634  
   635  			return r, cleanup, nil
   636  		},
   637  	})
   638  	defer sim.Close()
   639  
   640  	log.Info("Initializing test config")
   641  	_, err := sim.AddNodesAndConnectChain(nodes)
   642  	if err != nil {
   643  		b.Fatal(err)
   644  	}
   645  
   646  	ctx, cancel := context.WithCancel(context.Background())
   647  	defer cancel()
   648  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
   649  		nodeIDs := sim.UpNodeIDs()
   650  		node := nodeIDs[len(nodeIDs)-1]
   651  
   652  		item, ok := sim.NodeItem(node, bucketKeyFileStore)
   653  		if !ok {
   654  			return errors.New("No filestore")
   655  		}
   656  		remoteFileStore := item.(*storage.FileStore)
   657  
   658  		pivotNode := nodeIDs[0]
   659  		item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
   660  		if !ok {
   661  			return errors.New("No filestore")
   662  		}
   663  		netStore := item.(*storage.NetStore)
   664  
   665  		if _, err := sim.WaitTillHealthy(ctx); err != nil {
   666  			return err
   667  		}
   668  
   669  		disconnected := watchDisconnections(ctx, sim)
   670  		defer func() {
   671  			if err != nil && disconnected.bool() {
   672  				err = errors.New("disconnect events received")
   673  			}
   674  		}()
   675  		// benchmark loop
   676  		b.ResetTimer()
   677  		b.StopTimer()
   678  	Loop:
   679  		for i := 0; i < b.N; i++ {
   680  			// uploading chunkCount random chunks to the last node
   681  			hashes := make([]storage.Address, chunkCount)
   682  			for i := 0; i < chunkCount; i++ {
   683  				// create actual size real chunks
   684  				ctx := context.TODO()
   685  				hash, wait, err := remoteFileStore.Store(ctx, testutil.RandomReader(i, chunkSize), int64(chunkSize), false)
   686  				if err != nil {
   687  					return fmt.Errorf("store: %v", err)
   688  				}
   689  				// wait until all chunks stored
   690  				err = wait(ctx)
   691  				if err != nil {
   692  					return fmt.Errorf("wait store: %v", err)
   693  				}
   694  				// collect the hashes
   695  				hashes[i] = hash
   696  			}
   697  			// now benchmark the actual retrieval
   698  			// netstore.Get is called for each hash in a go routine and errors are collected
   699  			b.StartTimer()
   700  			errs := make(chan error)
   701  			for _, hash := range hashes {
   702  				go func(h storage.Address) {
   703  					_, err := netStore.Get(ctx, h)
   704  					log.Warn("test check netstore get", "hash", h, "err", err)
   705  					errs <- err
   706  				}(hash)
   707  			}
   708  			// count and report retrieval errors
   709  			// if there are misses then chunk timeout is too low for the distance and volume (?)
   710  			var total, misses int
   711  			for err := range errs {
   712  				if err != nil {
   713  					log.Warn(err.Error())
   714  					misses++
   715  				}
   716  				total++
   717  				if total == chunkCount {
   718  					break
   719  				}
   720  			}
   721  			b.StopTimer()
   722  
   723  			if misses > 0 {
   724  				err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   725  				break Loop
   726  			}
   727  		}
   728  		return err
   729  	})
   730  	if result.Error != nil {
   731  		b.Fatal(result.Error)
   732  	}
   733  
   734  }