github.com/muhammedhassanm/blockchain@v0.0.0-20200120143007-697261defd4d/go-ethereum-master/swarm/network/stream/delivery_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	crand "crypto/rand"
    23  	"fmt"
    24  	"io"
    25  	"sync"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/p2p/discover"
    31  	"github.com/ethereum/go-ethereum/p2p/simulations"
    32  	p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
    33  	"github.com/ethereum/go-ethereum/rpc"
    34  	"github.com/ethereum/go-ethereum/swarm/log"
    35  	"github.com/ethereum/go-ethereum/swarm/network"
    36  	streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing"
    37  	"github.com/ethereum/go-ethereum/swarm/storage"
    38  )
    39  
    40  func TestStreamerRetrieveRequest(t *testing.T) {
    41  	tester, streamer, _, teardown, err := newStreamerTester(t)
    42  	defer teardown()
    43  	if err != nil {
    44  		t.Fatal(err)
    45  	}
    46  
    47  	peerID := tester.IDs[0]
    48  
    49  	streamer.delivery.RequestFromPeers(hash0[:], true)
    50  
    51  	err = tester.TestExchanges(p2ptest.Exchange{
    52  		Label: "RetrieveRequestMsg",
    53  		Expects: []p2ptest.Expect{
    54  			{
    55  				Code: 5,
    56  				Msg: &RetrieveRequestMsg{
    57  					Addr:      hash0[:],
    58  					SkipCheck: true,
    59  				},
    60  				Peer: peerID,
    61  			},
    62  		},
    63  	})
    64  
    65  	if err != nil {
    66  		t.Fatalf("Expected no error, got %v", err)
    67  	}
    68  }
    69  
    70  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
    71  	tester, streamer, _, teardown, err := newStreamerTester(t)
    72  	defer teardown()
    73  	if err != nil {
    74  		t.Fatal(err)
    75  	}
    76  
    77  	peerID := tester.IDs[0]
    78  
    79  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
    80  
    81  	peer := streamer.getPeer(peerID)
    82  
    83  	peer.handleSubscribeMsg(&SubscribeMsg{
    84  		Stream:   NewStream(swarmChunkServerStreamName, "", false),
    85  		History:  nil,
    86  		Priority: Top,
    87  	})
    88  
    89  	err = tester.TestExchanges(p2ptest.Exchange{
    90  		Label: "RetrieveRequestMsg",
    91  		Triggers: []p2ptest.Trigger{
    92  			{
    93  				Code: 5,
    94  				Msg: &RetrieveRequestMsg{
    95  					Addr: chunk.Addr[:],
    96  				},
    97  				Peer: peerID,
    98  			},
    99  		},
   100  		Expects: []p2ptest.Expect{
   101  			{
   102  				Code: 1,
   103  				Msg: &OfferedHashesMsg{
   104  					HandoverProof: nil,
   105  					Hashes:        nil,
   106  					From:          0,
   107  					To:            0,
   108  				},
   109  				Peer: peerID,
   110  			},
   111  		},
   112  	})
   113  
   114  	expectedError := `exchange #0 "RetrieveRequestMsg": timed out`
   115  	if err == nil || err.Error() != expectedError {
   116  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   117  	}
   118  }
   119  
   120  // upstream request server receives a retrieve Request and responds with
   121  // offered hashes or delivery if skipHash is set to true
   122  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   123  	tester, streamer, localStore, teardown, err := newStreamerTester(t)
   124  	defer teardown()
   125  	if err != nil {
   126  		t.Fatal(err)
   127  	}
   128  
   129  	peerID := tester.IDs[0]
   130  	peer := streamer.getPeer(peerID)
   131  
   132  	stream := NewStream(swarmChunkServerStreamName, "", false)
   133  
   134  	peer.handleSubscribeMsg(&SubscribeMsg{
   135  		Stream:   stream,
   136  		History:  nil,
   137  		Priority: Top,
   138  	})
   139  
   140  	hash := storage.Address(hash0[:])
   141  	chunk := storage.NewChunk(hash, nil)
   142  	chunk.SData = hash
   143  	localStore.Put(chunk)
   144  	chunk.WaitToStore()
   145  
   146  	err = tester.TestExchanges(p2ptest.Exchange{
   147  		Label: "RetrieveRequestMsg",
   148  		Triggers: []p2ptest.Trigger{
   149  			{
   150  				Code: 5,
   151  				Msg: &RetrieveRequestMsg{
   152  					Addr: hash,
   153  				},
   154  				Peer: peerID,
   155  			},
   156  		},
   157  		Expects: []p2ptest.Expect{
   158  			{
   159  				Code: 1,
   160  				Msg: &OfferedHashesMsg{
   161  					HandoverProof: &HandoverProof{
   162  						Handover: &Handover{},
   163  					},
   164  					Hashes: hash,
   165  					From:   0,
   166  					// TODO: why is this 32???
   167  					To:     32,
   168  					Stream: stream,
   169  				},
   170  				Peer: peerID,
   171  			},
   172  		},
   173  	})
   174  
   175  	if err != nil {
   176  		t.Fatal(err)
   177  	}
   178  
   179  	hash = storage.Address(hash1[:])
   180  	chunk = storage.NewChunk(hash, nil)
   181  	chunk.SData = hash1[:]
   182  	localStore.Put(chunk)
   183  	chunk.WaitToStore()
   184  
   185  	err = tester.TestExchanges(p2ptest.Exchange{
   186  		Label: "RetrieveRequestMsg",
   187  		Triggers: []p2ptest.Trigger{
   188  			{
   189  				Code: 5,
   190  				Msg: &RetrieveRequestMsg{
   191  					Addr:      hash,
   192  					SkipCheck: true,
   193  				},
   194  				Peer: peerID,
   195  			},
   196  		},
   197  		Expects: []p2ptest.Expect{
   198  			{
   199  				Code: 6,
   200  				Msg: &ChunkDeliveryMsg{
   201  					Addr:  hash,
   202  					SData: hash,
   203  				},
   204  				Peer: peerID,
   205  			},
   206  		},
   207  	})
   208  
   209  	if err != nil {
   210  		t.Fatal(err)
   211  	}
   212  }
   213  
   214  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   215  	tester, streamer, localStore, teardown, err := newStreamerTester(t)
   216  	defer teardown()
   217  	if err != nil {
   218  		t.Fatal(err)
   219  	}
   220  
   221  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   222  		return &testClient{
   223  			t: t,
   224  		}, nil
   225  	})
   226  
   227  	peerID := tester.IDs[0]
   228  
   229  	stream := NewStream("foo", "", true)
   230  	err = streamer.Subscribe(peerID, stream, NewRange(5, 8), Top)
   231  	if err != nil {
   232  		t.Fatalf("Expected no error, got %v", err)
   233  	}
   234  
   235  	chunkKey := hash0[:]
   236  	chunkData := hash1[:]
   237  	chunk, created := localStore.GetOrCreateRequest(chunkKey)
   238  
   239  	if !created {
   240  		t.Fatal("chunk already exists")
   241  	}
   242  	select {
   243  	case <-chunk.ReqC:
   244  		t.Fatal("chunk is already received")
   245  	default:
   246  	}
   247  
   248  	err = tester.TestExchanges(p2ptest.Exchange{
   249  		Label: "Subscribe message",
   250  		Expects: []p2ptest.Expect{
   251  			{
   252  				Code: 4,
   253  				Msg: &SubscribeMsg{
   254  					Stream:   stream,
   255  					History:  NewRange(5, 8),
   256  					Priority: Top,
   257  				},
   258  				Peer: peerID,
   259  			},
   260  		},
   261  	},
   262  		p2ptest.Exchange{
   263  			Label: "ChunkDeliveryRequest message",
   264  			Triggers: []p2ptest.Trigger{
   265  				{
   266  					Code: 6,
   267  					Msg: &ChunkDeliveryMsg{
   268  						Addr:  chunkKey,
   269  						SData: chunkData,
   270  					},
   271  					Peer: peerID,
   272  				},
   273  			},
   274  		})
   275  
   276  	if err != nil {
   277  		t.Fatalf("Expected no error, got %v", err)
   278  	}
   279  
   280  	timeout := time.NewTimer(1 * time.Second)
   281  
   282  	select {
   283  	case <-timeout.C:
   284  		t.Fatal("timeout receiving chunk")
   285  	case <-chunk.ReqC:
   286  	}
   287  
   288  	storedChunk, err := localStore.Get(chunkKey)
   289  	if err != nil {
   290  		t.Fatalf("Expected no error, got %v", err)
   291  	}
   292  
   293  	if !bytes.Equal(storedChunk.SData, chunkData) {
   294  		t.Fatal("Retrieved chunk has different data than original")
   295  	}
   296  
   297  }
   298  
   299  func TestDeliveryFromNodes(t *testing.T) {
   300  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
   301  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
   302  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
   303  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
   304  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
   305  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
   306  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
   307  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
   308  }
   309  
   310  func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
   311  	defaultSkipCheck = skipCheck
   312  	toAddr = network.NewAddrFromNodeID
   313  	createStoreFunc = createTestLocalStorageFromSim
   314  	conf := &streamTesting.RunConfig{
   315  		Adapter:         *adapter,
   316  		NodeCount:       nodes,
   317  		ConnLevel:       conns,
   318  		ToAddr:          toAddr,
   319  		Services:        services,
   320  		EnableMsgEvents: false,
   321  	}
   322  
   323  	sim, teardown, err := streamTesting.NewSimulation(conf)
   324  	var rpcSubscriptionsWg sync.WaitGroup
   325  	defer func() {
   326  		rpcSubscriptionsWg.Wait()
   327  		teardown()
   328  	}()
   329  	if err != nil {
   330  		t.Fatal(err.Error())
   331  	}
   332  	stores = make(map[discover.NodeID]storage.ChunkStore)
   333  	for i, id := range sim.IDs {
   334  		stores[id] = sim.Stores[i]
   335  	}
   336  	registries = make(map[discover.NodeID]*TestRegistry)
   337  	deliveries = make(map[discover.NodeID]*Delivery)
   338  	peerCount = func(id discover.NodeID) int {
   339  		if sim.IDs[0] == id || sim.IDs[nodes-1] == id {
   340  			return 1
   341  		}
   342  		return 2
   343  	}
   344  
   345  	// here we distribute chunks of a random file into Stores of nodes 1 to nodes
   346  	rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams())
   347  	size := chunkCount * chunkSize
   348  	ctx := context.TODO()
   349  	fileHash, wait, err := rrFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   350  	// wait until all chunks stored
   351  	if err != nil {
   352  		t.Fatal(err.Error())
   353  	}
   354  	err = wait(ctx)
   355  	if err != nil {
   356  		t.Fatal(err.Error())
   357  	}
   358  	errc := make(chan error, 1)
   359  	waitPeerErrC = make(chan error)
   360  	quitC := make(chan struct{})
   361  	defer close(quitC)
   362  
   363  	action := func(ctx context.Context) error {
   364  		// each node Subscribes to each other's swarmChunkServerStreamName
   365  		// need to wait till an aynchronous process registers the peers in streamer.peers
   366  		// that is used by Subscribe
   367  		// using a global err channel to share betweem action and node service
   368  		i := 0
   369  		for err := range waitPeerErrC {
   370  			if err != nil {
   371  				return fmt.Errorf("error waiting for peers: %s", err)
   372  			}
   373  			i++
   374  			if i == nodes {
   375  				break
   376  			}
   377  		}
   378  
   379  		// each node subscribes to the upstream swarm chunk server stream
   380  		// which responds to chunk retrieve requests all but the last node in the chain does not
   381  		for j := 0; j < nodes-1; j++ {
   382  			id := sim.IDs[j]
   383  			err := sim.CallClient(id, func(client *rpc.Client) error {
   384  				doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC)
   385  				if err != nil {
   386  					return err
   387  				}
   388  				rpcSubscriptionsWg.Add(1)
   389  				go func() {
   390  					<-doneC
   391  					rpcSubscriptionsWg.Done()
   392  				}()
   393  				ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
   394  				defer cancel()
   395  				sid := sim.IDs[j+1]
   396  				return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
   397  			})
   398  			if err != nil {
   399  				return err
   400  			}
   401  		}
   402  		// create a retriever FileStore for the pivot node
   403  		delivery := deliveries[sim.IDs[0]]
   404  		retrieveFunc := func(chunk *storage.Chunk) error {
   405  			return delivery.RequestFromPeers(chunk.Addr[:], skipCheck)
   406  		}
   407  		netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc)
   408  		fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   409  
   410  		go func() {
   411  			// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
   412  			// we must wait for the peer connections to have started before requesting
   413  			n, err := readAll(fileStore, fileHash)
   414  			log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   415  			if err != nil {
   416  				errc <- fmt.Errorf("requesting chunks action error: %v", err)
   417  			}
   418  		}()
   419  		return nil
   420  	}
   421  	check := func(ctx context.Context, id discover.NodeID) (bool, error) {
   422  		select {
   423  		case err := <-errc:
   424  			return false, err
   425  		case <-ctx.Done():
   426  			return false, ctx.Err()
   427  		default:
   428  		}
   429  		var total int64
   430  		err := sim.CallClient(id, func(client *rpc.Client) error {
   431  			ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
   432  			defer cancel()
   433  			return client.CallContext(ctx, &total, "stream_readAll", common.BytesToHash(fileHash))
   434  		})
   435  		log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   436  		if err != nil || total != int64(size) {
   437  			return false, nil
   438  		}
   439  		return true, nil
   440  	}
   441  
   442  	conf.Step = &simulations.Step{
   443  		Action:  action,
   444  		Trigger: streamTesting.Trigger(10*time.Millisecond, quitC, sim.IDs[0]),
   445  		// we are only testing the pivot node (net.Nodes[0])
   446  		Expect: &simulations.Expectation{
   447  			Nodes: sim.IDs[0:1],
   448  			Check: check,
   449  		},
   450  	}
   451  	startedAt := time.Now()
   452  	timeout := 300 * time.Second
   453  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   454  	defer cancel()
   455  	result, err := sim.Run(ctx, conf)
   456  	finishedAt := time.Now()
   457  	if err != nil {
   458  		t.Fatalf("Setting up simulation failed: %v", err)
   459  	}
   460  	if result.Error != nil {
   461  		t.Fatalf("Simulation failed: %s", result.Error)
   462  	}
   463  	streamTesting.CheckResult(t, result, startedAt, finishedAt)
   464  }
   465  
   466  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   467  	for chunks := 32; chunks <= 128; chunks *= 2 {
   468  		for i := 2; i < 32; i *= 2 {
   469  			b.Run(
   470  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   471  				func(b *testing.B) {
   472  					benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
   473  				},
   474  			)
   475  		}
   476  	}
   477  }
   478  
   479  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   480  	for chunks := 32; chunks <= 128; chunks *= 2 {
   481  		for i := 2; i < 32; i *= 2 {
   482  			b.Run(
   483  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   484  				func(b *testing.B) {
   485  					benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
   486  				},
   487  			)
   488  		}
   489  	}
   490  }
   491  
   492  func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
   493  	defaultSkipCheck = skipCheck
   494  	toAddr = network.NewAddrFromNodeID
   495  	createStoreFunc = createTestLocalStorageFromSim
   496  	registries = make(map[discover.NodeID]*TestRegistry)
   497  
   498  	timeout := 300 * time.Second
   499  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   500  	defer cancel()
   501  
   502  	conf := &streamTesting.RunConfig{
   503  		Adapter:         *adapter,
   504  		NodeCount:       nodes,
   505  		ConnLevel:       conns,
   506  		ToAddr:          toAddr,
   507  		Services:        services,
   508  		EnableMsgEvents: false,
   509  	}
   510  	sim, teardown, err := streamTesting.NewSimulation(conf)
   511  	var rpcSubscriptionsWg sync.WaitGroup
   512  	defer func() {
   513  		rpcSubscriptionsWg.Wait()
   514  		teardown()
   515  	}()
   516  	if err != nil {
   517  		b.Fatal(err.Error())
   518  	}
   519  
   520  	stores = make(map[discover.NodeID]storage.ChunkStore)
   521  	deliveries = make(map[discover.NodeID]*Delivery)
   522  	for i, id := range sim.IDs {
   523  		stores[id] = sim.Stores[i]
   524  	}
   525  	peerCount = func(id discover.NodeID) int {
   526  		if sim.IDs[0] == id || sim.IDs[nodes-1] == id {
   527  			return 1
   528  		}
   529  		return 2
   530  	}
   531  	// wait channel for all nodes all peer connections to set up
   532  	waitPeerErrC = make(chan error)
   533  
   534  	// create a FileStore for the last node in the chain which we are gonna write to
   535  	remoteFileStore := storage.NewFileStore(sim.Stores[nodes-1], storage.NewFileStoreParams())
   536  
   537  	// channel to signal simulation initialisation with action call complete
   538  	// or node disconnections
   539  	disconnectC := make(chan error)
   540  	quitC := make(chan struct{})
   541  
   542  	initC := make(chan error)
   543  
   544  	action := func(ctx context.Context) error {
   545  		// each node Subscribes to each other's swarmChunkServerStreamName
   546  		// need to wait till an aynchronous process registers the peers in streamer.peers
   547  		// that is used by Subscribe
   548  		// waitPeerErrC using a global err channel to share betweem action and node service
   549  		i := 0
   550  		for err := range waitPeerErrC {
   551  			if err != nil {
   552  				return fmt.Errorf("error waiting for peers: %s", err)
   553  			}
   554  			i++
   555  			if i == nodes {
   556  				break
   557  			}
   558  		}
   559  		var err error
   560  		// each node except the last one subscribes to the upstream swarm chunk server stream
   561  		// which responds to chunk retrieve requests
   562  		for j := 0; j < nodes-1; j++ {
   563  			id := sim.IDs[j]
   564  			err = sim.CallClient(id, func(client *rpc.Client) error {
   565  				doneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC)
   566  				if err != nil {
   567  					return err
   568  				}
   569  				rpcSubscriptionsWg.Add(1)
   570  				go func() {
   571  					<-doneC
   572  					rpcSubscriptionsWg.Done()
   573  				}()
   574  				ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
   575  				defer cancel()
   576  				sid := sim.IDs[j+1] // the upstream peer's id
   577  				return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
   578  			})
   579  			if err != nil {
   580  				break
   581  			}
   582  		}
   583  		initC <- err
   584  		return nil
   585  	}
   586  
   587  	// the check function is only triggered when the benchmark finishes
   588  	trigger := make(chan discover.NodeID)
   589  	check := func(ctx context.Context, id discover.NodeID) (_ bool, err error) {
   590  		return true, nil
   591  	}
   592  
   593  	conf.Step = &simulations.Step{
   594  		Action:  action,
   595  		Trigger: trigger,
   596  		// we are only testing the pivot node (net.Nodes[0])
   597  		Expect: &simulations.Expectation{
   598  			Nodes: sim.IDs[0:1],
   599  			Check: check,
   600  		},
   601  	}
   602  
   603  	// run the simulation in the background
   604  	errc := make(chan error)
   605  	go func() {
   606  		_, err := sim.Run(ctx, conf)
   607  		close(quitC)
   608  		errc <- err
   609  	}()
   610  
   611  	// wait for simulation action to complete stream subscriptions
   612  	err = <-initC
   613  	if err != nil {
   614  		b.Fatalf("simulation failed to initialise. expected no error. got %v", err)
   615  	}
   616  
   617  	// create a retriever FileStore for the pivot node
   618  	// by now deliveries are set for each node by the streamer service
   619  	delivery := deliveries[sim.IDs[0]]
   620  	retrieveFunc := func(chunk *storage.Chunk) error {
   621  		return delivery.RequestFromPeers(chunk.Addr[:], skipCheck)
   622  	}
   623  	netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc)
   624  
   625  	// benchmark loop
   626  	b.ResetTimer()
   627  	b.StopTimer()
   628  Loop:
   629  	for i := 0; i < b.N; i++ {
   630  		// uploading chunkCount random chunks to the last node
   631  		hashes := make([]storage.Address, chunkCount)
   632  		for i := 0; i < chunkCount; i++ {
   633  			// create actual size real chunks
   634  			ctx := context.TODO()
   635  			hash, wait, err := remoteFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false)
   636  			if err != nil {
   637  				b.Fatalf("expected no error. got %v", err)
   638  			}
   639  			// wait until all chunks stored
   640  			err = wait(ctx)
   641  			if err != nil {
   642  				b.Fatalf("expected no error. got %v", err)
   643  			}
   644  			// collect the hashes
   645  			hashes[i] = hash
   646  		}
   647  		// now benchmark the actual retrieval
   648  		// netstore.Get is called for each hash in a go routine and errors are collected
   649  		b.StartTimer()
   650  		errs := make(chan error)
   651  		for _, hash := range hashes {
   652  			go func(h storage.Address) {
   653  				_, err := netStore.Get(h)
   654  				log.Warn("test check netstore get", "hash", h, "err", err)
   655  				errs <- err
   656  			}(hash)
   657  		}
   658  		// count and report retrieval errors
   659  		// if there are misses then chunk timeout is too low for the distance and volume (?)
   660  		var total, misses int
   661  		for err := range errs {
   662  			if err != nil {
   663  				log.Warn(err.Error())
   664  				misses++
   665  			}
   666  			total++
   667  			if total == chunkCount {
   668  				break
   669  			}
   670  		}
   671  		b.StopTimer()
   672  
   673  		select {
   674  		case err = <-disconnectC:
   675  			if err != nil {
   676  				break Loop
   677  			}
   678  		default:
   679  		}
   680  
   681  		if misses > 0 {
   682  			err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   683  			break Loop
   684  		}
   685  	}
   686  
   687  	select {
   688  	case <-quitC:
   689  	case trigger <- sim.IDs[0]:
   690  	}
   691  	if err == nil {
   692  		err = <-errc
   693  	} else {
   694  		if e := <-errc; e != nil {
   695  			b.Errorf("sim.Run function error: %v", e)
   696  		}
   697  	}
   698  
   699  	// benchmark over, trigger the check function to conclude the simulation
   700  	if err != nil {
   701  		b.Fatalf("expected no error. got %v", err)
   702  	}
   703  }
   704  
   705  func createTestLocalStorageFromSim(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) {
   706  	return stores[id], nil
   707  }