github.com/aaa256/atlantis@v0.0.0-20210707112435-42ee889287a2/swarm/network/stream/delivery_test.go (about)

     1  // Copyright 2018 The go-athereum Authors
     2  // This file is part of the go-athereum library.
     3  //
     4  // The go-athereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-athereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-athereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	crand "crypto/rand"
    23  	"fmt"
    24  	"io"
    25  	"sync"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/athereum/go-athereum/common"
    30  	"github.com/athereum/go-athereum/p2p/discover"
    31  	"github.com/athereum/go-athereum/p2p/simulations"
    32  	p2ptest "github.com/athereum/go-athereum/p2p/testing"
    33  	"github.com/athereum/go-athereum/rpc"
    34  	"github.com/athereum/go-athereum/swarm/log"
    35  	"github.com/athereum/go-athereum/swarm/network"
    36  	streamTesting "github.com/athereum/go-athereum/swarm/network/stream/testing"
    37  	"github.com/athereum/go-athereum/swarm/storage"
    38  )
    39  
    40  func TestStreamerRetrieveRequest(t *testing.T) {
    41  	tester, streamer, _, teardown, err := newStreamerTester(t)
    42  	defer teardown()
    43  	if err != nil {
    44  		t.Fatal(err)
    45  	}
    46  
    47  	peerID := tester.IDs[0]
    48  
    49  	streamer.delivery.RequestFromPeers(hash0[:], true)
    50  
    51  	err = tester.TestExchanges(p2ptest.Exchange{
    52  		Label: "RetrieveRequestMsg",
    53  		Expects: []p2ptest.Expect{
    54  			{
    55  				Code: 5,
    56  				Msg: &RetrieveRequestMsg{
    57  					Addr:      hash0[:],
    58  					SkipCheck: true,
    59  				},
    60  				Peer: peerID,
    61  			},
    62  		},
    63  	})
    64  
    65  	if err != nil {
    66  		t.Fatalf("Expected no error, got %v", err)
    67  	}
    68  }
    69  
    70  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
    71  	tester, streamer, _, teardown, err := newStreamerTester(t)
    72  	defer teardown()
    73  	if err != nil {
    74  		t.Fatal(err)
    75  	}
    76  
    77  	peerID := tester.IDs[0]
    78  
    79  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
    80  
    81  	peer := streamer.getPeer(peerID)
    82  
    83  	peer.handleSubscribeMsg(&SubscribeMsg{
    84  		Stream:   NewStream(swarmChunkServerStreamName, "", false),
    85  		History:  nil,
    86  		Priority: Top,
    87  	})
    88  
    89  	err = tester.TestExchanges(p2ptest.Exchange{
    90  		Label: "RetrieveRequestMsg",
    91  		Triggers: []p2ptest.Trigger{
    92  			{
    93  				Code: 5,
    94  				Msg: &RetrieveRequestMsg{
    95  					Addr: chunk.Addr[:],
    96  				},
    97  				Peer: peerID,
    98  			},
    99  		},
   100  		Expects: []p2ptest.Expect{
   101  			{
   102  				Code: 1,
   103  				Msg: &OfferedHashesMsg{
   104  					HandoverProof: nil,
   105  					Hashes:        nil,
   106  					From:          0,
   107  					To:            0,
   108  				},
   109  				Peer: peerID,
   110  			},
   111  		},
   112  	})
   113  
   114  	expectedError := `exchange #0 "RetrieveRequestMsg": timed out`
   115  	if err == nil || err.Error() != expectedError {
   116  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   117  	}
   118  }
   119  
   120  // upstream request server receives a retrieve Request and responds with
   121  // offered hashes or delivery if skipHash is set to true
   122  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   123  	tester, streamer, localStore, teardown, err := newStreamerTester(t)
   124  	defer teardown()
   125  	if err != nil {
   126  		t.Fatal(err)
   127  	}
   128  
   129  	peerID := tester.IDs[0]
   130  	peer := streamer.getPeer(peerID)
   131  
   132  	stream := NewStream(swarmChunkServerStreamName, "", false)
   133  
   134  	peer.handleSubscribeMsg(&SubscribeMsg{
   135  		Stream:   stream,
   136  		History:  nil,
   137  		Priority: Top,
   138  	})
   139  
   140  	hash := storage.Address(hash0[:])
   141  	chunk := storage.NewChunk(hash, nil)
   142  	chunk.SData = hash
   143  	localStore.Put(chunk)
   144  	chunk.WaitToStore()
   145  
   146  	err = tester.TestExchanges(p2ptest.Exchange{
   147  		Label: "RetrieveRequestMsg",
   148  		Triggers: []p2ptest.Trigger{
   149  			{
   150  				Code: 5,
   151  				Msg: &RetrieveRequestMsg{
   152  					Addr: hash,
   153  				},
   154  				Peer: peerID,
   155  			},
   156  		},
   157  		Expects: []p2ptest.Expect{
   158  			{
   159  				Code: 1,
   160  				Msg: &OfferedHashesMsg{
   161  					HandoverProof: &HandoverProof{
   162  						Handover: &Handover{},
   163  					},
   164  					Hashes: hash,
   165  					From:   0,
   166  					// TODO: why is this 32???
   167  					To:     32,
   168  					Stream: stream,
   169  				},
   170  				Peer: peerID,
   171  			},
   172  		},
   173  	})
   174  
   175  	if err != nil {
   176  		t.Fatal(err)
   177  	}
   178  
   179  	hash = storage.Address(hash1[:])
   180  	chunk = storage.NewChunk(hash, nil)
   181  	chunk.SData = hash1[:]
   182  	localStore.Put(chunk)
   183  	chunk.WaitToStore()
   184  
   185  	err = tester.TestExchanges(p2ptest.Exchange{
   186  		Label: "RetrieveRequestMsg",
   187  		Triggers: []p2ptest.Trigger{
   188  			{
   189  				Code: 5,
   190  				Msg: &RetrieveRequestMsg{
   191  					Addr:      hash,
   192  					SkipCheck: true,
   193  				},
   194  				Peer: peerID,
   195  			},
   196  		},
   197  		Expects: []p2ptest.Expect{
   198  			{
   199  				Code: 6,
   200  				Msg: &ChunkDeliveryMsg{
   201  					Addr:  hash,
   202  					SData: hash,
   203  				},
   204  				Peer: peerID,
   205  			},
   206  		},
   207  	})
   208  
   209  	if err != nil {
   210  		t.Fatal(err)
   211  	}
   212  }
   213  
   214  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   215  	tester, streamer, localStore, teardown, err := newStreamerTester(t)
   216  	defer teardown()
   217  	if err != nil {
   218  		t.Fatal(err)
   219  	}
   220  
   221  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   222  		return &testClient{
   223  			t: t,
   224  		}, nil
   225  	})
   226  
   227  	peerID := tester.IDs[0]
   228  
   229  	stream := NewStream("foo", "", true)
   230  	err = streamer.Subscribe(peerID, stream, NewRange(5, 8), Top)
   231  	if err != nil {
   232  		t.Fatalf("Expected no error, got %v", err)
   233  	}
   234  
   235  	chunkKey := hash0[:]
   236  	chunkData := hash1[:]
   237  	chunk, created := localStore.GetOrCreateRequest(chunkKey)
   238  
   239  	if !created {
   240  		t.Fatal("chunk already exists")
   241  	}
   242  	select {
   243  	case <-chunk.ReqC:
   244  		t.Fatal("chunk is already received")
   245  	default:
   246  	}
   247  
   248  	err = tester.TestExchanges(p2ptest.Exchange{
   249  		Label: "Subscribe message",
   250  		Expects: []p2ptest.Expect{
   251  			{
   252  				Code: 4,
   253  				Msg: &SubscribeMsg{
   254  					Stream:   stream,
   255  					History:  NewRange(5, 8),
   256  					Priority: Top,
   257  				},
   258  				Peer: peerID,
   259  			},
   260  		},
   261  	},
   262  		p2ptest.Exchange{
   263  			Label: "ChunkDeliveryRequest message",
   264  			Triggers: []p2ptest.Trigger{
   265  				{
   266  					Code: 6,
   267  					Msg: &ChunkDeliveryMsg{
   268  						Addr:  chunkKey,
   269  						SData: chunkData,
   270  					},
   271  					Peer: peerID,
   272  				},
   273  			},
   274  		})
   275  
   276  	if err != nil {
   277  		t.Fatalf("Expected no error, got %v", err)
   278  	}
   279  
   280  	timeout := time.NewTimer(1 * time.Second)
   281  
   282  	select {
   283  	case <-timeout.C:
   284  		t.Fatal("timeout receiving chunk")
   285  	case <-chunk.ReqC:
   286  	}
   287  
   288  	storedChunk, err := localStore.Get(chunkKey)
   289  	if err != nil {
   290  		t.Fatalf("Expected no error, got %v", err)
   291  	}
   292  
   293  	if !bytes.Equal(storedChunk.SData, chunkData) {
   294  		t.Fatal("Retrieved chunk has different data than original")
   295  	}
   296  
   297  }
   298  
   299  func TestDeliveryFromNodes(t *testing.T) {
   300  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
   301  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
   302  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
   303  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
   304  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
   305  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
   306  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
   307  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
   308  }
   309  
   310  func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
   311  	defaultSkipCheck = skipCheck
   312  	toAddr = network.NewAddrFromNodeID
   313  	createStoreFunc = createTestLocalStorageFromSim
   314  	conf := &streamTesting.RunConfig{
   315  		Adapter:         *adapter,
   316  		NodeCount:       nodes,
   317  		ConnLevel:       conns,
   318  		ToAddr:          toAddr,
   319  		Services:        services,
   320  		EnableMsgEvents: false,
   321  	}
   322  
   323  	sim, teardown, err := streamTesting.NewSimulation(conf)
   324  	var rpcSubscriptionsWg sync.WaitGroup
   325  	defer func() {
   326  		rpcSubscriptionsWg.Wait()
   327  		teardown()
   328  	}()
   329  	if err != nil {
   330  		t.Fatal(err.Error())
   331  	}
   332  	stores = make(map[discover.NodeID]storage.ChunkStore)
   333  	for i, id := range sim.IDs {
   334  		stores[id] = sim.Stores[i]
   335  	}
   336  	registries = make(map[discover.NodeID]*TestRegistry)
   337  	deliveries = make(map[discover.NodeID]*Delivery)
   338  	peerCount = func(id discover.NodeID) int {
   339  		if sim.IDs[0] == id || sim.IDs[nodes-1] == id {
   340  			return 1
   341  		}
   342  		return 2
   343  	}
   344  
   345  	// here we distribute chunks of a random file into Stores of nodes 1 to nodes
   346  	rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams())
   347  	size := chunkCount * chunkSize
   348  	fileHash, wait, err := rrFileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   349  	// wait until all chunks stored
   350  	wait()
   351  	if err != nil {
   352  		t.Fatal(err.Error())
   353  	}
   354  	errc := make(chan error, 1)
   355  	waitPeerErrC = make(chan error)
   356  	quitC := make(chan struct{})
   357  	defer close(quitC)
   358  
   359  	action := func(ctx context.Context) error {
   360  		// each node Subscribes to each other's swarmChunkServerStreamName
   361  		// need to wait till an aynchronous process registers the peers in streamer.peers
   362  		// that is used by Subscribe
   363  		// using a global err channel to share betweem action and node service
   364  		i := 0
   365  		for err := range waitPeerErrC {
   366  			if err != nil {
   367  				return fmt.Errorf("error waiting for peers: %s", err)
   368  			}
   369  			i++
   370  			if i == nodes {
   371  				break
   372  			}
   373  		}
   374  
   375  		// each node subscribes to the upstream swarm chunk server stream
   376  		// which responds to chunk retrieve requests all but the last node in the chain does not
   377  		for j := 0; j < nodes-1; j++ {
   378  			id := sim.IDs[j]
   379  			err := sim.CallClient(id, func(client *rpc.Client) error {
   380  				doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC)
   381  				if err != nil {
   382  					return err
   383  				}
   384  				rpcSubscriptionsWg.Add(1)
   385  				go func() {
   386  					<-doneC
   387  					rpcSubscriptionsWg.Done()
   388  				}()
   389  				ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
   390  				defer cancel()
   391  				sid := sim.IDs[j+1]
   392  				return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
   393  			})
   394  			if err != nil {
   395  				return err
   396  			}
   397  		}
   398  		// create a retriever FileStore for the pivot node
   399  		delivery := deliveries[sim.IDs[0]]
   400  		retrieveFunc := func(chunk *storage.Chunk) error {
   401  			return delivery.RequestFromPeers(chunk.Addr[:], skipCheck)
   402  		}
   403  		netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc)
   404  		fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   405  
   406  		go func() {
   407  			// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
   408  			// we must wait for the peer connections to have started before requesting
   409  			n, err := readAll(fileStore, fileHash)
   410  			log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   411  			if err != nil {
   412  				errc <- fmt.Errorf("requesting chunks action error: %v", err)
   413  			}
   414  		}()
   415  		return nil
   416  	}
   417  	check := func(ctx context.Context, id discover.NodeID) (bool, error) {
   418  		select {
   419  		case err := <-errc:
   420  			return false, err
   421  		case <-ctx.Done():
   422  			return false, ctx.Err()
   423  		default:
   424  		}
   425  		var total int64
   426  		err := sim.CallClient(id, func(client *rpc.Client) error {
   427  			ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
   428  			defer cancel()
   429  			return client.CallContext(ctx, &total, "stream_readAll", common.BytesToHash(fileHash))
   430  		})
   431  		log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   432  		if err != nil || total != int64(size) {
   433  			return false, nil
   434  		}
   435  		return true, nil
   436  	}
   437  
   438  	conf.Step = &simulations.Step{
   439  		Action:  action,
   440  		Trigger: streamTesting.Trigger(10*time.Millisecond, quitC, sim.IDs[0]),
   441  		// we are only testing the pivot node (net.Nodes[0])
   442  		Expect: &simulations.Expectation{
   443  			Nodes: sim.IDs[0:1],
   444  			Check: check,
   445  		},
   446  	}
   447  	startedAt := time.Now()
   448  	timeout := 300 * time.Second
   449  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   450  	defer cancel()
   451  	result, err := sim.Run(ctx, conf)
   452  	finishedAt := time.Now()
   453  	if err != nil {
   454  		t.Fatalf("Setting up simulation failed: %v", err)
   455  	}
   456  	if result.Error != nil {
   457  		t.Fatalf("Simulation failed: %s", result.Error)
   458  	}
   459  	streamTesting.CheckResult(t, result, startedAt, finishedAt)
   460  }
   461  
   462  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   463  	for chunks := 32; chunks <= 128; chunks *= 2 {
   464  		for i := 2; i < 32; i *= 2 {
   465  			b.Run(
   466  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   467  				func(b *testing.B) {
   468  					benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
   469  				},
   470  			)
   471  		}
   472  	}
   473  }
   474  
   475  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   476  	for chunks := 32; chunks <= 128; chunks *= 2 {
   477  		for i := 2; i < 32; i *= 2 {
   478  			b.Run(
   479  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   480  				func(b *testing.B) {
   481  					benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
   482  				},
   483  			)
   484  		}
   485  	}
   486  }
   487  
   488  func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
   489  	defaultSkipCheck = skipCheck
   490  	toAddr = network.NewAddrFromNodeID
   491  	createStoreFunc = createTestLocalStorageFromSim
   492  	registries = make(map[discover.NodeID]*TestRegistry)
   493  
   494  	timeout := 300 * time.Second
   495  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   496  	defer cancel()
   497  
   498  	conf := &streamTesting.RunConfig{
   499  		Adapter:         *adapter,
   500  		NodeCount:       nodes,
   501  		ConnLevel:       conns,
   502  		ToAddr:          toAddr,
   503  		Services:        services,
   504  		EnableMsgEvents: false,
   505  	}
   506  	sim, teardown, err := streamTesting.NewSimulation(conf)
   507  	var rpcSubscriptionsWg sync.WaitGroup
   508  	defer func() {
   509  		rpcSubscriptionsWg.Wait()
   510  		teardown()
   511  	}()
   512  	if err != nil {
   513  		b.Fatal(err.Error())
   514  	}
   515  
   516  	stores = make(map[discover.NodeID]storage.ChunkStore)
   517  	deliveries = make(map[discover.NodeID]*Delivery)
   518  	for i, id := range sim.IDs {
   519  		stores[id] = sim.Stores[i]
   520  	}
   521  	peerCount = func(id discover.NodeID) int {
   522  		if sim.IDs[0] == id || sim.IDs[nodes-1] == id {
   523  			return 1
   524  		}
   525  		return 2
   526  	}
   527  	// wait channel for all nodes all peer connections to set up
   528  	waitPeerErrC = make(chan error)
   529  
   530  	// create a FileStore for the last node in the chain which we are gonna write to
   531  	remoteFileStore := storage.NewFileStore(sim.Stores[nodes-1], storage.NewFileStoreParams())
   532  
   533  	// channel to signal simulation initialisation with action call complete
   534  	// or node disconnections
   535  	disconnectC := make(chan error)
   536  	quitC := make(chan struct{})
   537  
   538  	initC := make(chan error)
   539  
   540  	action := func(ctx context.Context) error {
   541  		// each node Subscribes to each other's swarmChunkServerStreamName
   542  		// need to wait till an aynchronous process registers the peers in streamer.peers
   543  		// that is used by Subscribe
   544  		// waitPeerErrC using a global err channel to share betweem action and node service
   545  		i := 0
   546  		for err := range waitPeerErrC {
   547  			if err != nil {
   548  				return fmt.Errorf("error waiting for peers: %s", err)
   549  			}
   550  			i++
   551  			if i == nodes {
   552  				break
   553  			}
   554  		}
   555  		var err error
   556  		// each node except the last one subscribes to the upstream swarm chunk server stream
   557  		// which responds to chunk retrieve requests
   558  		for j := 0; j < nodes-1; j++ {
   559  			id := sim.IDs[j]
   560  			err = sim.CallClient(id, func(client *rpc.Client) error {
   561  				doneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC)
   562  				if err != nil {
   563  					return err
   564  				}
   565  				rpcSubscriptionsWg.Add(1)
   566  				go func() {
   567  					<-doneC
   568  					rpcSubscriptionsWg.Done()
   569  				}()
   570  				ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
   571  				defer cancel()
   572  				sid := sim.IDs[j+1] // the upstream peer's id
   573  				return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
   574  			})
   575  			if err != nil {
   576  				break
   577  			}
   578  		}
   579  		initC <- err
   580  		return nil
   581  	}
   582  
   583  	// the check function is only triggered when the benchmark finishes
   584  	trigger := make(chan discover.NodeID)
   585  	check := func(ctx context.Context, id discover.NodeID) (_ bool, err error) {
   586  		return true, nil
   587  	}
   588  
   589  	conf.Step = &simulations.Step{
   590  		Action:  action,
   591  		Trigger: trigger,
   592  		// we are only testing the pivot node (net.Nodes[0])
   593  		Expect: &simulations.Expectation{
   594  			Nodes: sim.IDs[0:1],
   595  			Check: check,
   596  		},
   597  	}
   598  
   599  	// run the simulation in the background
   600  	errc := make(chan error)
   601  	go func() {
   602  		_, err := sim.Run(ctx, conf)
   603  		close(quitC)
   604  		errc <- err
   605  	}()
   606  
   607  	// wait for simulation action to complete stream subscriptions
   608  	err = <-initC
   609  	if err != nil {
   610  		b.Fatalf("simulation failed to initialise. expected no error. got %v", err)
   611  	}
   612  
   613  	// create a retriever FileStore for the pivot node
   614  	// by now deliveries are set for each node by the streamer service
   615  	delivery := deliveries[sim.IDs[0]]
   616  	retrieveFunc := func(chunk *storage.Chunk) error {
   617  		return delivery.RequestFromPeers(chunk.Addr[:], skipCheck)
   618  	}
   619  	netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc)
   620  
   621  	// benchmark loop
   622  	b.ResetTimer()
   623  	b.StopTimer()
   624  Loop:
   625  	for i := 0; i < b.N; i++ {
   626  		// uploading chunkCount random chunks to the last node
   627  		hashes := make([]storage.Address, chunkCount)
   628  		for i := 0; i < chunkCount; i++ {
   629  			// create actual size real chunks
   630  			hash, wait, err := remoteFileStore.Store(io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false)
   631  			// wait until all chunks stored
   632  			wait()
   633  			if err != nil {
   634  				b.Fatalf("expected no error. got %v", err)
   635  			}
   636  			// collect the hashes
   637  			hashes[i] = hash
   638  		}
   639  		// now benchmark the actual retrieval
   640  		// netstore.Get is called for each hash in a go routine and errors are collected
   641  		b.StartTimer()
   642  		errs := make(chan error)
   643  		for _, hash := range hashes {
   644  			go func(h storage.Address) {
   645  				_, err := netStore.Get(h)
   646  				log.Warn("test check netstore get", "hash", h, "err", err)
   647  				errs <- err
   648  			}(hash)
   649  		}
   650  		// count and report retrieval errors
   651  		// if there are misses then chunk timeout is too low for the distance and volume (?)
   652  		var total, misses int
   653  		for err := range errs {
   654  			if err != nil {
   655  				log.Warn(err.Error())
   656  				misses++
   657  			}
   658  			total++
   659  			if total == chunkCount {
   660  				break
   661  			}
   662  		}
   663  		b.StopTimer()
   664  
   665  		select {
   666  		case err = <-disconnectC:
   667  			if err != nil {
   668  				break Loop
   669  			}
   670  		default:
   671  		}
   672  
   673  		if misses > 0 {
   674  			err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   675  			break Loop
   676  		}
   677  	}
   678  
   679  	select {
   680  	case <-quitC:
   681  	case trigger <- sim.IDs[0]:
   682  	}
   683  	if err == nil {
   684  		err = <-errc
   685  	} else {
   686  		if e := <-errc; e != nil {
   687  			b.Errorf("sim.Run function error: %v", e)
   688  		}
   689  	}
   690  
   691  	// benchmark over, trigger the check function to conclude the simulation
   692  	if err != nil {
   693  		b.Fatalf("expected no error. got %v", err)
   694  	}
   695  }
   696  
   697  func createTestLocalStorageFromSim(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) {
   698  	return stores[id], nil
   699  }