github.com/insight-chain/inb-go@v1.1.3-0.20191221022159-da049980ae38/swarm/network/stream/delivery_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software MiningReward, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"fmt"
    23  	"os"
    24  	"sync"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/insight-chain/inb-go/node"
    29  	"github.com/insight-chain/inb-go/p2p"
    30  	"github.com/insight-chain/inb-go/p2p/enode"
    31  	"github.com/insight-chain/inb-go/p2p/protocols"
    32  	"github.com/insight-chain/inb-go/p2p/simulations/adapters"
    33  	p2ptest "github.com/insight-chain/inb-go/p2p/testing"
    34  	"github.com/insight-chain/inb-go/swarm/log"
    35  	"github.com/insight-chain/inb-go/swarm/network"
    36  	pq "github.com/insight-chain/inb-go/swarm/network/priorityqueue"
    37  	"github.com/insight-chain/inb-go/swarm/network/simulation"
    38  	"github.com/insight-chain/inb-go/swarm/state"
    39  	"github.com/insight-chain/inb-go/swarm/storage"
    40  	"github.com/insight-chain/inb-go/swarm/testutil"
    41  )
    42  
    43  //Tests initializing a retrieve request
    44  func TestStreamerRetrieveRequest(t *testing.T) {
    45  	regOpts := &RegistryOptions{
    46  		Retrieval: RetrievalClientOnly,
    47  		Syncing:   SyncingDisabled,
    48  	}
    49  	tester, streamer, _, teardown, err := newStreamerTester(t, regOpts)
    50  	defer teardown()
    51  	if err != nil {
    52  		t.Fatal(err)
    53  	}
    54  
    55  	node := tester.Nodes[0]
    56  
    57  	ctx := context.Background()
    58  	req := network.NewRequest(
    59  		storage.Address(hash0[:]),
    60  		true,
    61  		&sync.Map{},
    62  	)
    63  	streamer.delivery.RequestFromPeers(ctx, req)
    64  
    65  	stream := NewStream(swarmChunkServerStreamName, "", true)
    66  
    67  	err = tester.TestExchanges(p2ptest.Exchange{
    68  		Label: "RetrieveRequestMsg",
    69  		Expects: []p2ptest.Expect{
    70  			{ //start expecting a subscription for RETRIEVE_REQUEST due to `RetrievalClientOnly`
    71  				Code: 4,
    72  				Msg: &SubscribeMsg{
    73  					Stream:   stream,
    74  					History:  nil,
    75  					Priority: Top,
    76  				},
    77  				Peer: node.ID(),
    78  			},
    79  			{ //expect a retrieve request message for the given hash
    80  				Code: 5,
    81  				Msg: &RetrieveRequestMsg{
    82  					Addr:      hash0[:],
    83  					SkipCheck: true,
    84  				},
    85  				Peer: node.ID(),
    86  			},
    87  		},
    88  	})
    89  
    90  	if err != nil {
    91  		t.Fatalf("Expected no error, got %v", err)
    92  	}
    93  }
    94  
    95  //Test requesting a chunk from a peer then issuing a "empty" OfferedHashesMsg (no hashes available yet)
    96  //Should time out as the peer does not have the chunk (no syncing happened previously)
    97  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
    98  	tester, streamer, _, teardown, err := newStreamerTester(t, &RegistryOptions{
    99  		Retrieval: RetrievalEnabled,
   100  		Syncing:   SyncingDisabled, //do no syncing
   101  	})
   102  	defer teardown()
   103  	if err != nil {
   104  		t.Fatal(err)
   105  	}
   106  
   107  	node := tester.Nodes[0]
   108  
   109  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
   110  
   111  	peer := streamer.getPeer(node.ID())
   112  
   113  	stream := NewStream(swarmChunkServerStreamName, "", true)
   114  	//simulate pre-subscription to RETRIEVE_REQUEST stream on peer
   115  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   116  		Stream:   stream,
   117  		History:  nil,
   118  		Priority: Top,
   119  	})
   120  
   121  	//test the exchange
   122  	err = tester.TestExchanges(p2ptest.Exchange{
   123  		Expects: []p2ptest.Expect{
   124  			{ //first expect a subscription to the RETRIEVE_REQUEST stream
   125  				Code: 4,
   126  				Msg: &SubscribeMsg{
   127  					Stream:   stream,
   128  					History:  nil,
   129  					Priority: Top,
   130  				},
   131  				Peer: node.ID(),
   132  			},
   133  		},
   134  	}, p2ptest.Exchange{
   135  		Label: "RetrieveRequestMsg",
   136  		Triggers: []p2ptest.Trigger{
   137  			{ //then the actual RETRIEVE_REQUEST....
   138  				Code: 5,
   139  				Msg: &RetrieveRequestMsg{
   140  					Addr: chunk.Address()[:],
   141  				},
   142  				Peer: node.ID(),
   143  			},
   144  		},
   145  		Expects: []p2ptest.Expect{
   146  			{ //to which the peer responds with offered hashes
   147  				Code: 1,
   148  				Msg: &OfferedHashesMsg{
   149  					HandoverProof: nil,
   150  					Hashes:        nil,
   151  					From:          0,
   152  					To:            0,
   153  				},
   154  				Peer: node.ID(),
   155  			},
   156  		},
   157  	})
   158  
   159  	//should fail with a timeout as the peer we are requesting
   160  	//the chunk from does not have the chunk
   161  	expectedError := `exchange #1 "RetrieveRequestMsg": timed out`
   162  	if err == nil || err.Error() != expectedError {
   163  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   164  	}
   165  }
   166  
   167  // upstream request server receives a retrieve Request and responds with
   168  // offered hashes or delivery if skipHash is set to true
   169  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   170  	tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
   171  		Retrieval: RetrievalEnabled,
   172  		Syncing:   SyncingDisabled,
   173  	})
   174  	defer teardown()
   175  	if err != nil {
   176  		t.Fatal(err)
   177  	}
   178  
   179  	node := tester.Nodes[0]
   180  
   181  	peer := streamer.getPeer(node.ID())
   182  
   183  	stream := NewStream(swarmChunkServerStreamName, "", true)
   184  
   185  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   186  		Stream:   stream,
   187  		History:  nil,
   188  		Priority: Top,
   189  	})
   190  
   191  	hash := storage.Address(hash0[:])
   192  	chunk := storage.NewChunk(hash, hash)
   193  	err = localStore.Put(context.TODO(), chunk)
   194  	if err != nil {
   195  		t.Fatalf("Expected no err got %v", err)
   196  	}
   197  
   198  	err = tester.TestExchanges(p2ptest.Exchange{
   199  		Expects: []p2ptest.Expect{
   200  			{
   201  				Code: 4,
   202  				Msg: &SubscribeMsg{
   203  					Stream:   stream,
   204  					History:  nil,
   205  					Priority: Top,
   206  				},
   207  				Peer: node.ID(),
   208  			},
   209  		},
   210  	}, p2ptest.Exchange{
   211  		Label: "RetrieveRequestMsg",
   212  		Triggers: []p2ptest.Trigger{
   213  			{
   214  				Code: 5,
   215  				Msg: &RetrieveRequestMsg{
   216  					Addr: hash,
   217  				},
   218  				Peer: node.ID(),
   219  			},
   220  		},
   221  		Expects: []p2ptest.Expect{
   222  			{
   223  				Code: 1,
   224  				Msg: &OfferedHashesMsg{
   225  					HandoverProof: &HandoverProof{
   226  						Handover: &Handover{},
   227  					},
   228  					Hashes: hash,
   229  					From:   0,
   230  					// TODO: why is this 32???
   231  					To:     32,
   232  					Stream: stream,
   233  				},
   234  				Peer: node.ID(),
   235  			},
   236  		},
   237  	})
   238  
   239  	if err != nil {
   240  		t.Fatal(err)
   241  	}
   242  
   243  	hash = storage.Address(hash1[:])
   244  	chunk = storage.NewChunk(hash, hash1[:])
   245  	err = localStore.Put(context.TODO(), chunk)
   246  	if err != nil {
   247  		t.Fatalf("Expected no err got %v", err)
   248  	}
   249  
   250  	err = tester.TestExchanges(p2ptest.Exchange{
   251  		Label: "RetrieveRequestMsg",
   252  		Triggers: []p2ptest.Trigger{
   253  			{
   254  				Code: 5,
   255  				Msg: &RetrieveRequestMsg{
   256  					Addr:      hash,
   257  					SkipCheck: true,
   258  				},
   259  				Peer: node.ID(),
   260  			},
   261  		},
   262  		Expects: []p2ptest.Expect{
   263  			{
   264  				Code: 6,
   265  				Msg: &ChunkDeliveryMsg{
   266  					Addr:  hash,
   267  					SData: hash,
   268  				},
   269  				Peer: node.ID(),
   270  			},
   271  		},
   272  	})
   273  
   274  	if err != nil {
   275  		t.Fatal(err)
   276  	}
   277  }
   278  
   279  // if there is one peer in the Kademlia, RequestFromPeers should return it
   280  func TestRequestFromPeers(t *testing.T) {
   281  	dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
   282  
   283  	addr := network.RandomAddr()
   284  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
   285  	delivery := NewDelivery(to, nil)
   286  	protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
   287  	peer := network.NewPeer(&network.BzzPeer{
   288  		BzzAddr:   network.RandomAddr(),
   289  		LightNode: false,
   290  		Peer:      protocolsPeer,
   291  	}, to)
   292  	to.On(peer)
   293  	r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
   294  
   295  	// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
   296  	sp := &Peer{
   297  		Peer:     protocolsPeer,
   298  		pq:       pq.New(int(PriorityQueue), PriorityQueueCap),
   299  		streamer: r,
   300  	}
   301  	r.setPeer(sp)
   302  	req := network.NewRequest(
   303  		storage.Address(hash0[:]),
   304  		true,
   305  		&sync.Map{},
   306  	)
   307  	ctx := context.Background()
   308  	id, _, err := delivery.RequestFromPeers(ctx, req)
   309  
   310  	if err != nil {
   311  		t.Fatal(err)
   312  	}
   313  	if *id != dummyPeerID {
   314  		t.Fatalf("Expected an id, got %v", id)
   315  	}
   316  }
   317  
   318  // RequestFromPeers should not return light nodes
   319  func TestRequestFromPeersWithLightNode(t *testing.T) {
   320  	dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
   321  
   322  	addr := network.RandomAddr()
   323  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
   324  	delivery := NewDelivery(to, nil)
   325  
   326  	protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
   327  	// setting up a lightnode
   328  	peer := network.NewPeer(&network.BzzPeer{
   329  		BzzAddr:   network.RandomAddr(),
   330  		LightNode: true,
   331  		Peer:      protocolsPeer,
   332  	}, to)
   333  	to.On(peer)
   334  	r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
   335  	// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
   336  	sp := &Peer{
   337  		Peer:     protocolsPeer,
   338  		pq:       pq.New(int(PriorityQueue), PriorityQueueCap),
   339  		streamer: r,
   340  	}
   341  	r.setPeer(sp)
   342  
   343  	req := network.NewRequest(
   344  		storage.Address(hash0[:]),
   345  		true,
   346  		&sync.Map{},
   347  	)
   348  
   349  	ctx := context.Background()
   350  	// making a request which should return with "no peer found"
   351  	_, _, err := delivery.RequestFromPeers(ctx, req)
   352  
   353  	expectedError := "no peer found"
   354  	if err.Error() != expectedError {
   355  		t.Fatalf("expected '%v', got %v", expectedError, err)
   356  	}
   357  }
   358  
   359  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   360  	tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
   361  		Retrieval: RetrievalDisabled,
   362  		Syncing:   SyncingDisabled,
   363  	})
   364  	defer teardown()
   365  	if err != nil {
   366  		t.Fatal(err)
   367  	}
   368  
   369  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   370  		return &testClient{
   371  			t: t,
   372  		}, nil
   373  	})
   374  
   375  	node := tester.Nodes[0]
   376  
   377  	//subscribe to custom stream
   378  	stream := NewStream("foo", "", true)
   379  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   380  	if err != nil {
   381  		t.Fatalf("Expected no error, got %v", err)
   382  	}
   383  
   384  	chunkKey := hash0[:]
   385  	chunkData := hash1[:]
   386  
   387  	err = tester.TestExchanges(p2ptest.Exchange{
   388  		Label: "Subscribe message",
   389  		Expects: []p2ptest.Expect{
   390  			{ //first expect subscription to the custom stream...
   391  				Code: 4,
   392  				Msg: &SubscribeMsg{
   393  					Stream:   stream,
   394  					History:  NewRange(5, 8),
   395  					Priority: Top,
   396  				},
   397  				Peer: node.ID(),
   398  			},
   399  		},
   400  	},
   401  		p2ptest.Exchange{
   402  			Label: "ChunkDelivery message",
   403  			Triggers: []p2ptest.Trigger{
   404  				{ //...then trigger a chunk delivery for the given chunk from peer in order for
   405  					//local node to get the chunk delivered
   406  					Code: 6,
   407  					Msg: &ChunkDeliveryMsg{
   408  						Addr:  chunkKey,
   409  						SData: chunkData,
   410  					},
   411  					Peer: node.ID(),
   412  				},
   413  			},
   414  		})
   415  
   416  	if err != nil {
   417  		t.Fatalf("Expected no error, got %v", err)
   418  	}
   419  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
   420  	defer cancel()
   421  
   422  	// wait for the chunk to get stored
   423  	storedChunk, err := localStore.Get(ctx, chunkKey)
   424  	for err != nil {
   425  		select {
   426  		case <-ctx.Done():
   427  			t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
   428  		default:
   429  		}
   430  		storedChunk, err = localStore.Get(ctx, chunkKey)
   431  		time.Sleep(50 * time.Millisecond)
   432  	}
   433  
   434  	if err != nil {
   435  		t.Fatalf("Expected no error, got %v", err)
   436  	}
   437  
   438  	if !bytes.Equal(storedChunk.Data(), chunkData) {
   439  		t.Fatal("Retrieved chunk has different data than original")
   440  	}
   441  
   442  }
   443  
   444  func TestDeliveryFromNodes(t *testing.T) {
   445  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
   446  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
   447  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
   448  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
   449  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
   450  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
   451  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
   452  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
   453  }
   454  
   455  func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
   456  
   457  	t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
   458  	sim := simulation.New(map[string]simulation.ServiceFunc{
   459  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   460  			node := ctx.Config.Node()
   461  			addr := network.NewAddr(node)
   462  			store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
   463  			if err != nil {
   464  				return nil, nil, err
   465  			}
   466  			bucket.Store(bucketKeyStore, store)
   467  			cleanup = func() {
   468  				os.RemoveAll(datadir)
   469  				store.Close()
   470  			}
   471  			localStore := store.(*storage.LocalStore)
   472  			netStore, err := storage.NewNetStore(localStore, nil)
   473  			if err != nil {
   474  				return nil, nil, err
   475  			}
   476  
   477  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   478  			delivery := NewDelivery(kad, netStore)
   479  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   480  
   481  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   482  				SkipCheck: skipCheck,
   483  				Syncing:   SyncingDisabled,
   484  				Retrieval: RetrievalEnabled,
   485  			}, nil)
   486  			bucket.Store(bucketKeyRegistry, r)
   487  
   488  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   489  			bucket.Store(bucketKeyFileStore, fileStore)
   490  
   491  			return r, cleanup, nil
   492  
   493  		},
   494  	})
   495  	defer sim.Close()
   496  
   497  	log.Info("Adding nodes to simulation")
   498  	_, err := sim.AddNodesAndConnectChain(nodes)
   499  	if err != nil {
   500  		t.Fatal(err)
   501  	}
   502  
   503  	log.Info("Starting simulation")
   504  	ctx := context.Background()
   505  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   506  		nodeIDs := sim.UpNodeIDs()
   507  		//determine the pivot node to be the first node of the simulation
   508  		sim.SetPivotNode(nodeIDs[0])
   509  		//distribute chunks of a random file into Stores of nodes 1 to nodes
   510  		//we will do this by creating a file store with an underlying round-robin store:
   511  		//the file store will create a hash for the uploaded file, but every chunk will be
   512  		//distributed to different nodes via round-robin scheduling
   513  		log.Debug("Writing file to round-robin file store")
   514  		//to do this, we create an array for chunkstores (length minus one, the pivot node)
   515  		stores := make([]storage.ChunkStore, len(nodeIDs)-1)
   516  		//we then need to get all stores from the sim....
   517  		lStores := sim.NodesItems(bucketKeyStore)
   518  		i := 0
   519  		//...iterate the buckets...
   520  		for id, bucketVal := range lStores {
   521  			//...and remove the one which is the pivot node
   522  			if id == *sim.PivotNodeID() {
   523  				continue
   524  			}
   525  			//the other ones are added to the array...
   526  			stores[i] = bucketVal.(storage.ChunkStore)
   527  			i++
   528  		}
   529  		//...which then gets passed to the round-robin file store
   530  		roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
   531  		//now we can actually upload a (random) file to the round-robin store
   532  		size := chunkCount * chunkSize
   533  		log.Debug("Storing data to file store")
   534  		fileHash, wait, err := roundRobinFileStore.Store(ctx, testutil.RandomReader(1, size), int64(size), false)
   535  		// wait until all chunks stored
   536  		if err != nil {
   537  			return err
   538  		}
   539  		err = wait(ctx)
   540  		if err != nil {
   541  			return err
   542  		}
   543  
   544  		log.Debug("Waiting for kademlia")
   545  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   546  			return err
   547  		}
   548  
   549  		//get the pivot node's filestore
   550  		item, ok := sim.NodeItem(*sim.PivotNodeID(), bucketKeyFileStore)
   551  		if !ok {
   552  			return fmt.Errorf("No filestore")
   553  		}
   554  		pivotFileStore := item.(*storage.FileStore)
   555  		log.Debug("Starting retrieval routine")
   556  		go func() {
   557  			// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
   558  			// we must wait for the peer connections to have started before requesting
   559  			n, err := readAll(pivotFileStore, fileHash)
   560  			log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   561  			if err != nil {
   562  				t.Fatalf("requesting chunks action error: %v", err)
   563  			}
   564  		}()
   565  
   566  		log.Debug("Watching for disconnections")
   567  		disconnections := sim.PeerEvents(
   568  			context.Background(),
   569  			sim.NodeIDs(),
   570  			simulation.NewPeerEventsFilter().Drop(),
   571  		)
   572  
   573  		go func() {
   574  			for d := range disconnections {
   575  				if d.Error != nil {
   576  					log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
   577  					t.Fatal(d.Error)
   578  				}
   579  			}
   580  		}()
   581  
   582  		//finally check that the pivot node gets all chunks via the root hash
   583  		log.Debug("Check retrieval")
   584  		success := true
   585  		var total int64
   586  		total, err = readAll(pivotFileStore, fileHash)
   587  		if err != nil {
   588  			return err
   589  		}
   590  		log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   591  		if err != nil || total != int64(size) {
   592  			success = false
   593  		}
   594  
   595  		if !success {
   596  			return fmt.Errorf("Test failed, chunks not available on all nodes")
   597  		}
   598  		log.Debug("Test terminated successfully")
   599  		return nil
   600  	})
   601  	if result.Error != nil {
   602  		t.Fatal(result.Error)
   603  	}
   604  }
   605  
   606  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   607  	for chunks := 32; chunks <= 128; chunks *= 2 {
   608  		for i := 2; i < 32; i *= 2 {
   609  			b.Run(
   610  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   611  				func(b *testing.B) {
   612  					benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
   613  				},
   614  			)
   615  		}
   616  	}
   617  }
   618  
   619  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   620  	for chunks := 32; chunks <= 128; chunks *= 2 {
   621  		for i := 2; i < 32; i *= 2 {
   622  			b.Run(
   623  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   624  				func(b *testing.B) {
   625  					benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
   626  				},
   627  			)
   628  		}
   629  	}
   630  }
   631  
   632  func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
   633  	sim := simulation.New(map[string]simulation.ServiceFunc{
   634  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   635  			node := ctx.Config.Node()
   636  			addr := network.NewAddr(node)
   637  			store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
   638  			if err != nil {
   639  				return nil, nil, err
   640  			}
   641  			bucket.Store(bucketKeyStore, store)
   642  			cleanup = func() {
   643  				os.RemoveAll(datadir)
   644  				store.Close()
   645  			}
   646  			localStore := store.(*storage.LocalStore)
   647  			netStore, err := storage.NewNetStore(localStore, nil)
   648  			if err != nil {
   649  				return nil, nil, err
   650  			}
   651  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   652  			delivery := NewDelivery(kad, netStore)
   653  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   654  
   655  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   656  				SkipCheck:       skipCheck,
   657  				Syncing:         SyncingDisabled,
   658  				Retrieval:       RetrievalDisabled,
   659  				SyncUpdateDelay: 0,
   660  			}, nil)
   661  
   662  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   663  			bucket.Store(bucketKeyFileStore, fileStore)
   664  
   665  			return r, cleanup, nil
   666  
   667  		},
   668  	})
   669  	defer sim.Close()
   670  
   671  	log.Info("Initializing test config")
   672  	_, err := sim.AddNodesAndConnectChain(nodes)
   673  	if err != nil {
   674  		b.Fatal(err)
   675  	}
   676  
   677  	ctx := context.Background()
   678  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   679  		nodeIDs := sim.UpNodeIDs()
   680  		node := nodeIDs[len(nodeIDs)-1]
   681  
   682  		item, ok := sim.NodeItem(node, bucketKeyFileStore)
   683  		if !ok {
   684  			b.Fatal("No filestore")
   685  		}
   686  		remoteFileStore := item.(*storage.FileStore)
   687  
   688  		pivotNode := nodeIDs[0]
   689  		item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
   690  		if !ok {
   691  			b.Fatal("No filestore")
   692  		}
   693  		netStore := item.(*storage.NetStore)
   694  
   695  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   696  			return err
   697  		}
   698  
   699  		disconnections := sim.PeerEvents(
   700  			context.Background(),
   701  			sim.NodeIDs(),
   702  			simulation.NewPeerEventsFilter().Drop(),
   703  		)
   704  
   705  		go func() {
   706  			for d := range disconnections {
   707  				if d.Error != nil {
   708  					log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
   709  					b.Fatal(d.Error)
   710  				}
   711  			}
   712  		}()
   713  		// benchmark loop
   714  		b.ResetTimer()
   715  		b.StopTimer()
   716  	Loop:
   717  		for i := 0; i < b.N; i++ {
   718  			// uploading chunkCount random chunks to the last node
   719  			hashes := make([]storage.Address, chunkCount)
   720  			for i := 0; i < chunkCount; i++ {
   721  				// create actual size real chunks
   722  				ctx := context.TODO()
   723  				hash, wait, err := remoteFileStore.Store(ctx, testutil.RandomReader(i, chunkSize), int64(chunkSize), false)
   724  				if err != nil {
   725  					b.Fatalf("expected no error. got %v", err)
   726  				}
   727  				// wait until all chunks stored
   728  				err = wait(ctx)
   729  				if err != nil {
   730  					b.Fatalf("expected no error. got %v", err)
   731  				}
   732  				// collect the hashes
   733  				hashes[i] = hash
   734  			}
   735  			// now benchmark the actual retrieval
   736  			// netstore.Get is called for each hash in a go routine and errors are collected
   737  			b.StartTimer()
   738  			errs := make(chan error)
   739  			for _, hash := range hashes {
   740  				go func(h storage.Address) {
   741  					_, err := netStore.Get(ctx, h)
   742  					log.Warn("test check netstore get", "hash", h, "err", err)
   743  					errs <- err
   744  				}(hash)
   745  			}
   746  			// count and report retrieval errors
   747  			// if there are misses then chunk timeout is too low for the distance and volume (?)
   748  			var total, misses int
   749  			for err := range errs {
   750  				if err != nil {
   751  					log.Warn(err.Error())
   752  					misses++
   753  				}
   754  				total++
   755  				if total == chunkCount {
   756  					break
   757  				}
   758  			}
   759  			b.StopTimer()
   760  
   761  			if misses > 0 {
   762  				err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   763  				break Loop
   764  			}
   765  		}
   766  		if err != nil {
   767  			b.Fatal(err)
   768  		}
   769  		return nil
   770  	})
   771  	if result.Error != nil {
   772  		b.Fatal(result.Error)
   773  	}
   774  
   775  }