github.com/etsc3259/etsc@v0.0.0-20190109113336-a9c2c10f9c95/swarm/network/stream/delivery_test.go (about)

     1  // Copyright 2018 The go-etsc Authors
     2  // This file is part of the go-etsc library.
     3  //
     4  // The go-etsc library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-etsc library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-etsc library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"fmt"
    23  	"os"
    24  	"sync"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/ETSC3259/etsc/node"
    29  	"github.com/ETSC3259/etsc/p2p"
    30  	"github.com/ETSC3259/etsc/p2p/enode"
    31  	"github.com/ETSC3259/etsc/p2p/protocols"
    32  	"github.com/ETSC3259/etsc/p2p/simulations/adapters"
    33  	p2ptest "github.com/ETSC3259/etsc/p2p/testing"
    34  	"github.com/ETSC3259/etsc/swarm/log"
    35  	"github.com/ETSC3259/etsc/swarm/network"
    36  	pq "github.com/ETSC3259/etsc/swarm/network/priorityqueue"
    37  	"github.com/ETSC3259/etsc/swarm/network/simulation"
    38  	"github.com/ETSC3259/etsc/swarm/state"
    39  	"github.com/ETSC3259/etsc/swarm/storage"
    40  	"github.com/ETSC3259/etsc/swarm/testutil"
    41  )
    42  
    43  //Tests initializing a retrieve request
    44  func TestStreamerRetrieveRequest(t *testing.T) {
    45  	regOpts := &RegistryOptions{
    46  		Retrieval: RetrievalClientOnly,
    47  		Syncing:   SyncingDisabled,
    48  	}
    49  	tester, streamer, _, teardown, err := newStreamerTester(t, regOpts)
    50  	defer teardown()
    51  	if err != nil {
    52  		t.Fatal(err)
    53  	}
    54  
    55  	node := tester.Nodes[0]
    56  
    57  	ctx := context.Background()
    58  	req := network.NewRequest(
    59  		storage.Address(hash0[:]),
    60  		true,
    61  		&sync.Map{},
    62  	)
    63  	streamer.delivery.RequestFromPeers(ctx, req)
    64  
    65  	stream := NewStream(swarmChunkServerStreamName, "", true)
    66  
    67  	err = tester.TestExchanges(p2ptest.Exchange{
    68  		Label: "RetrieveRequestMsg",
    69  		Expects: []p2ptest.Expect{
    70  			{ //start expecting a subscription for RETRIEVE_REQUEST due to `RetrievalClientOnly`
    71  				Code: 4,
    72  				Msg: &SubscribeMsg{
    73  					Stream:   stream,
    74  					History:  nil,
    75  					Priority: Top,
    76  				},
    77  				Peer: node.ID(),
    78  			},
    79  			{ //expect a retrieve request message for the given hash
    80  				Code: 5,
    81  				Msg: &RetrieveRequestMsg{
    82  					Addr:      hash0[:],
    83  					SkipCheck: true,
    84  				},
    85  				Peer: node.ID(),
    86  			},
    87  		},
    88  	})
    89  
    90  	if err != nil {
    91  		t.Fatalf("Expected no error, got %v", err)
    92  	}
    93  }
    94  
    95  //Test requesting a chunk from a peer then issuing a "empty" OfferedHashesMsg (no hashes available yet)
    96  //Should time out as the peer does not have the chunk (no syncing happened previously)
    97  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
    98  	tester, streamer, _, teardown, err := newStreamerTester(t, &RegistryOptions{
    99  		Retrieval: RetrievalEnabled,
   100  		Syncing:   SyncingDisabled, //do no syncing
   101  	})
   102  	defer teardown()
   103  	if err != nil {
   104  		t.Fatal(err)
   105  	}
   106  
   107  	node := tester.Nodes[0]
   108  
   109  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
   110  
   111  	peer := streamer.getPeer(node.ID())
   112  
   113  	stream := NewStream(swarmChunkServerStreamName, "", true)
   114  	//simulate pre-subscription to RETRIEVE_REQUEST stream on peer
   115  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   116  		Stream:   stream,
   117  		History:  nil,
   118  		Priority: Top,
   119  	})
   120  
   121  	//test the exchange
   122  	err = tester.TestExchanges(p2ptest.Exchange{
   123  		Expects: []p2ptest.Expect{
   124  			{ //first expect a subscription to the RETRIEVE_REQUEST stream
   125  				Code: 4,
   126  				Msg: &SubscribeMsg{
   127  					Stream:   stream,
   128  					History:  nil,
   129  					Priority: Top,
   130  				},
   131  				Peer: node.ID(),
   132  			},
   133  		},
   134  	}, p2ptest.Exchange{
   135  		Label: "RetrieveRequestMsg",
   136  		Triggers: []p2ptest.Trigger{
   137  			{ //then the actual RETRIEVE_REQUEST....
   138  				Code: 5,
   139  				Msg: &RetrieveRequestMsg{
   140  					Addr: chunk.Address()[:],
   141  				},
   142  				Peer: node.ID(),
   143  			},
   144  		},
   145  		Expects: []p2ptest.Expect{
   146  			{ //to which the peer responds with offered hashes
   147  				Code: 1,
   148  				Msg: &OfferedHashesMsg{
   149  					HandoverProof: nil,
   150  					Hashes:        nil,
   151  					From:          0,
   152  					To:            0,
   153  				},
   154  				Peer: node.ID(),
   155  			},
   156  		},
   157  	})
   158  
   159  	//should fail with a timeout as the peer we are requesting
   160  	//the chunk from does not have the chunk
   161  	expectedError := `exchange #1 "RetrieveRequestMsg": timed out`
   162  	if err == nil || err.Error() != expectedError {
   163  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   164  	}
   165  }
   166  
   167  // upstream request server receives a retrieve Request and responds with
   168  // offered hashes or delivery if skipHash is set to true
   169  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   170  	tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
   171  		Retrieval: RetrievalEnabled,
   172  		Syncing:   SyncingDisabled,
   173  	})
   174  	defer teardown()
   175  	if err != nil {
   176  		t.Fatal(err)
   177  	}
   178  
   179  	node := tester.Nodes[0]
   180  
   181  	peer := streamer.getPeer(node.ID())
   182  
   183  	stream := NewStream(swarmChunkServerStreamName, "", true)
   184  
   185  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   186  		Stream:   stream,
   187  		History:  nil,
   188  		Priority: Top,
   189  	})
   190  
   191  	hash := storage.Address(hash0[:])
   192  	chunk := storage.NewChunk(hash, hash)
   193  	err = localStore.Put(context.TODO(), chunk)
   194  	if err != nil {
   195  		t.Fatalf("Expected no err got %v", err)
   196  	}
   197  
   198  	err = tester.TestExchanges(p2ptest.Exchange{
   199  		Expects: []p2ptest.Expect{
   200  			{
   201  				Code: 4,
   202  				Msg: &SubscribeMsg{
   203  					Stream:   stream,
   204  					History:  nil,
   205  					Priority: Top,
   206  				},
   207  				Peer: node.ID(),
   208  			},
   209  		},
   210  	}, p2ptest.Exchange{
   211  		Label: "RetrieveRequestMsg",
   212  		Triggers: []p2ptest.Trigger{
   213  			{
   214  				Code: 5,
   215  				Msg: &RetrieveRequestMsg{
   216  					Addr: hash,
   217  				},
   218  				Peer: node.ID(),
   219  			},
   220  		},
   221  		Expects: []p2ptest.Expect{
   222  			{
   223  				Code: 1,
   224  				Msg: &OfferedHashesMsg{
   225  					HandoverProof: &HandoverProof{
   226  						Handover: &Handover{},
   227  					},
   228  					Hashes: hash,
   229  					From:   0,
   230  					// TODO: why is this 32???
   231  					To:     32,
   232  					Stream: stream,
   233  				},
   234  				Peer: node.ID(),
   235  			},
   236  		},
   237  	})
   238  
   239  	if err != nil {
   240  		t.Fatal(err)
   241  	}
   242  
   243  	hash = storage.Address(hash1[:])
   244  	chunk = storage.NewChunk(hash, hash1[:])
   245  	err = localStore.Put(context.TODO(), chunk)
   246  	if err != nil {
   247  		t.Fatalf("Expected no err got %v", err)
   248  	}
   249  
   250  	err = tester.TestExchanges(p2ptest.Exchange{
   251  		Label: "RetrieveRequestMsg",
   252  		Triggers: []p2ptest.Trigger{
   253  			{
   254  				Code: 5,
   255  				Msg: &RetrieveRequestMsg{
   256  					Addr:      hash,
   257  					SkipCheck: true,
   258  				},
   259  				Peer: node.ID(),
   260  			},
   261  		},
   262  		Expects: []p2ptest.Expect{
   263  			{
   264  				Code: 6,
   265  				Msg: &ChunkDeliveryMsg{
   266  					Addr:  hash,
   267  					SData: hash,
   268  				},
   269  				Peer: node.ID(),
   270  			},
   271  		},
   272  	})
   273  
   274  	if err != nil {
   275  		t.Fatal(err)
   276  	}
   277  }
   278  
   279  // if there is one peer in the Kademlia, RequestFromPeers should return it
   280  func TestRequestFromPeers(t *testing.T) {
   281  	dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
   282  
   283  	addr := network.RandomAddr()
   284  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
   285  	delivery := NewDelivery(to, nil)
   286  	protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
   287  	peer := network.NewPeer(&network.BzzPeer{
   288  		BzzAddr:   network.RandomAddr(),
   289  		LightNode: false,
   290  		Peer:      protocolsPeer,
   291  	}, to)
   292  	to.On(peer)
   293  	r := NewRegistry(addr.ID(), delivery, nil, nil, nil)
   294  
   295  	// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
   296  	sp := &Peer{
   297  		Peer:     protocolsPeer,
   298  		pq:       pq.New(int(PriorityQueue), PriorityQueueCap),
   299  		streamer: r,
   300  	}
   301  	r.setPeer(sp)
   302  	req := network.NewRequest(
   303  		storage.Address(hash0[:]),
   304  		true,
   305  		&sync.Map{},
   306  	)
   307  	ctx := context.Background()
   308  	id, _, err := delivery.RequestFromPeers(ctx, req)
   309  
   310  	if err != nil {
   311  		t.Fatal(err)
   312  	}
   313  	if *id != dummyPeerID {
   314  		t.Fatalf("Expected an id, got %v", id)
   315  	}
   316  }
   317  
   318  // RequestFromPeers should not return light nodes
   319  func TestRequestFromPeersWithLightNode(t *testing.T) {
   320  	dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
   321  
   322  	addr := network.RandomAddr()
   323  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
   324  	delivery := NewDelivery(to, nil)
   325  
   326  	protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
   327  	// setting up a lightnode
   328  	peer := network.NewPeer(&network.BzzPeer{
   329  		BzzAddr:   network.RandomAddr(),
   330  		LightNode: true,
   331  		Peer:      protocolsPeer,
   332  	}, to)
   333  	to.On(peer)
   334  	r := NewRegistry(addr.ID(), delivery, nil, nil, nil)
   335  	// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
   336  	sp := &Peer{
   337  		Peer:     protocolsPeer,
   338  		pq:       pq.New(int(PriorityQueue), PriorityQueueCap),
   339  		streamer: r,
   340  	}
   341  	r.setPeer(sp)
   342  
   343  	req := network.NewRequest(
   344  		storage.Address(hash0[:]),
   345  		true,
   346  		&sync.Map{},
   347  	)
   348  
   349  	ctx := context.Background()
   350  	// making a request which should return with "no peer found"
   351  	_, _, err := delivery.RequestFromPeers(ctx, req)
   352  
   353  	expectedError := "no peer found"
   354  	if err.Error() != expectedError {
   355  		t.Fatalf("expected '%v', got %v", expectedError, err)
   356  	}
   357  }
   358  
   359  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   360  	tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
   361  		Retrieval: RetrievalDisabled,
   362  		Syncing:   SyncingDisabled,
   363  	})
   364  	defer teardown()
   365  	if err != nil {
   366  		t.Fatal(err)
   367  	}
   368  
   369  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   370  		return &testClient{
   371  			t: t,
   372  		}, nil
   373  	})
   374  
   375  	node := tester.Nodes[0]
   376  
   377  	//subscribe to custom stream
   378  	stream := NewStream("foo", "", true)
   379  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   380  	if err != nil {
   381  		t.Fatalf("Expected no error, got %v", err)
   382  	}
   383  
   384  	chunkKey := hash0[:]
   385  	chunkData := hash1[:]
   386  
   387  	err = tester.TestExchanges(p2ptest.Exchange{
   388  		Label: "Subscribe message",
   389  		Expects: []p2ptest.Expect{
   390  			{ //first expect subscription to the custom stream...
   391  				Code: 4,
   392  				Msg: &SubscribeMsg{
   393  					Stream:   stream,
   394  					History:  NewRange(5, 8),
   395  					Priority: Top,
   396  				},
   397  				Peer: node.ID(),
   398  			},
   399  		},
   400  	},
   401  		p2ptest.Exchange{
   402  			Label: "ChunkDelivery message",
   403  			Triggers: []p2ptest.Trigger{
   404  				{ //...then trigger a chunk delivery for the given chunk from peer in order for
   405  					//local node to get the chunk delivered
   406  					Code: 6,
   407  					Msg: &ChunkDeliveryMsg{
   408  						Addr:  chunkKey,
   409  						SData: chunkData,
   410  					},
   411  					Peer: node.ID(),
   412  				},
   413  			},
   414  		})
   415  
   416  	if err != nil {
   417  		t.Fatalf("Expected no error, got %v", err)
   418  	}
   419  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
   420  	defer cancel()
   421  
   422  	// wait for the chunk to get stored
   423  	storedChunk, err := localStore.Get(ctx, chunkKey)
   424  	for err != nil {
   425  		select {
   426  		case <-ctx.Done():
   427  			t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
   428  		default:
   429  		}
   430  		storedChunk, err = localStore.Get(ctx, chunkKey)
   431  		time.Sleep(50 * time.Millisecond)
   432  	}
   433  
   434  	if err != nil {
   435  		t.Fatalf("Expected no error, got %v", err)
   436  	}
   437  
   438  	if !bytes.Equal(storedChunk.Data(), chunkData) {
   439  		t.Fatal("Retrieved chunk has different data than original")
   440  	}
   441  
   442  }
   443  
   444  func TestDeliveryFromNodes(t *testing.T) {
   445  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
   446  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
   447  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
   448  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
   449  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
   450  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
   451  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
   452  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
   453  }
   454  
   455  func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
   456  	sim := simulation.New(map[string]simulation.ServiceFunc{
   457  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   458  			node := ctx.Config.Node()
   459  			addr := network.NewAddr(node)
   460  			store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
   461  			if err != nil {
   462  				return nil, nil, err
   463  			}
   464  			bucket.Store(bucketKeyStore, store)
   465  			cleanup = func() {
   466  				os.RemoveAll(datadir)
   467  				store.Close()
   468  			}
   469  			localStore := store.(*storage.LocalStore)
   470  			netStore, err := storage.NewNetStore(localStore, nil)
   471  			if err != nil {
   472  				return nil, nil, err
   473  			}
   474  
   475  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   476  			delivery := NewDelivery(kad, netStore)
   477  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   478  
   479  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   480  				SkipCheck: skipCheck,
   481  				Syncing:   SyncingDisabled,
   482  				Retrieval: RetrievalEnabled,
   483  			})
   484  			bucket.Store(bucketKeyRegistry, r)
   485  
   486  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   487  			bucket.Store(bucketKeyFileStore, fileStore)
   488  
   489  			return r, cleanup, nil
   490  
   491  		},
   492  	})
   493  	defer sim.Close()
   494  
   495  	log.Info("Adding nodes to simulation")
   496  	_, err := sim.AddNodesAndConnectChain(nodes)
   497  	if err != nil {
   498  		t.Fatal(err)
   499  	}
   500  
   501  	log.Info("Starting simulation")
   502  	ctx := context.Background()
   503  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   504  		nodeIDs := sim.UpNodeIDs()
   505  		//determine the pivot node to be the first node of the simulation
   506  		sim.SetPivotNode(nodeIDs[0])
   507  		//distribute chunks of a random file into Stores of nodes 1 to nodes
   508  		//we will do this by creating a file store with an underlying round-robin store:
   509  		//the file store will create a hash for the uploaded file, but every chunk will be
   510  		//distributed to different nodes via round-robin scheduling
   511  		log.Debug("Writing file to round-robin file store")
   512  		//to do this, we create an array for chunkstores (length minus one, the pivot node)
   513  		stores := make([]storage.ChunkStore, len(nodeIDs)-1)
   514  		//we then need to get all stores from the sim....
   515  		lStores := sim.NodesItems(bucketKeyStore)
   516  		i := 0
   517  		//...iterate the buckets...
   518  		for id, bucketVal := range lStores {
   519  			//...and remove the one which is the pivot node
   520  			if id == *sim.PivotNodeID() {
   521  				continue
   522  			}
   523  			//the other ones are added to the array...
   524  			stores[i] = bucketVal.(storage.ChunkStore)
   525  			i++
   526  		}
   527  		//...which then gets passed to the round-robin file store
   528  		roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
   529  		//now we can actually upload a (random) file to the round-robin store
   530  		size := chunkCount * chunkSize
   531  		log.Debug("Storing data to file store")
   532  		fileHash, wait, err := roundRobinFileStore.Store(ctx, testutil.RandomReader(1, size), int64(size), false)
   533  		// wait until all chunks stored
   534  		if err != nil {
   535  			return err
   536  		}
   537  		err = wait(ctx)
   538  		if err != nil {
   539  			return err
   540  		}
   541  
   542  		log.Debug("Waiting for kademlia")
   543  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   544  			return err
   545  		}
   546  
   547  		//get the pivot node's filestore
   548  		item, ok := sim.NodeItem(*sim.PivotNodeID(), bucketKeyFileStore)
   549  		if !ok {
   550  			return fmt.Errorf("No filestore")
   551  		}
   552  		pivotFileStore := item.(*storage.FileStore)
   553  		log.Debug("Starting retrieval routine")
   554  		go func() {
   555  			// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
   556  			// we must wait for the peer connections to have started before requesting
   557  			n, err := readAll(pivotFileStore, fileHash)
   558  			log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   559  			if err != nil {
   560  				t.Fatalf("requesting chunks action error: %v", err)
   561  			}
   562  		}()
   563  
   564  		log.Debug("Watching for disconnections")
   565  		disconnections := sim.PeerEvents(
   566  			context.Background(),
   567  			sim.NodeIDs(),
   568  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   569  		)
   570  
   571  		go func() {
   572  			for d := range disconnections {
   573  				if d.Error != nil {
   574  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   575  					t.Fatal(d.Error)
   576  				}
   577  			}
   578  		}()
   579  
   580  		//finally check that the pivot node gets all chunks via the root hash
   581  		log.Debug("Check retrieval")
   582  		success := true
   583  		var total int64
   584  		total, err = readAll(pivotFileStore, fileHash)
   585  		if err != nil {
   586  			return err
   587  		}
   588  		log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   589  		if err != nil || total != int64(size) {
   590  			success = false
   591  		}
   592  
   593  		if !success {
   594  			return fmt.Errorf("Test failed, chunks not available on all nodes")
   595  		}
   596  		log.Debug("Test terminated successfully")
   597  		return nil
   598  	})
   599  	if result.Error != nil {
   600  		t.Fatal(result.Error)
   601  	}
   602  }
   603  
   604  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   605  	for chunks := 32; chunks <= 128; chunks *= 2 {
   606  		for i := 2; i < 32; i *= 2 {
   607  			b.Run(
   608  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   609  				func(b *testing.B) {
   610  					benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
   611  				},
   612  			)
   613  		}
   614  	}
   615  }
   616  
   617  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   618  	for chunks := 32; chunks <= 128; chunks *= 2 {
   619  		for i := 2; i < 32; i *= 2 {
   620  			b.Run(
   621  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   622  				func(b *testing.B) {
   623  					benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
   624  				},
   625  			)
   626  		}
   627  	}
   628  }
   629  
   630  func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
   631  	sim := simulation.New(map[string]simulation.ServiceFunc{
   632  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   633  			node := ctx.Config.Node()
   634  			addr := network.NewAddr(node)
   635  			store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
   636  			if err != nil {
   637  				return nil, nil, err
   638  			}
   639  			bucket.Store(bucketKeyStore, store)
   640  			cleanup = func() {
   641  				os.RemoveAll(datadir)
   642  				store.Close()
   643  			}
   644  			localStore := store.(*storage.LocalStore)
   645  			netStore, err := storage.NewNetStore(localStore, nil)
   646  			if err != nil {
   647  				return nil, nil, err
   648  			}
   649  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   650  			delivery := NewDelivery(kad, netStore)
   651  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   652  
   653  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   654  				SkipCheck:       skipCheck,
   655  				Syncing:         SyncingDisabled,
   656  				Retrieval:       RetrievalDisabled,
   657  				SyncUpdateDelay: 0,
   658  			})
   659  
   660  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   661  			bucket.Store(bucketKeyFileStore, fileStore)
   662  
   663  			return r, cleanup, nil
   664  
   665  		},
   666  	})
   667  	defer sim.Close()
   668  
   669  	log.Info("Initializing test config")
   670  	_, err := sim.AddNodesAndConnectChain(nodes)
   671  	if err != nil {
   672  		b.Fatal(err)
   673  	}
   674  
   675  	ctx := context.Background()
   676  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   677  		nodeIDs := sim.UpNodeIDs()
   678  		node := nodeIDs[len(nodeIDs)-1]
   679  
   680  		item, ok := sim.NodeItem(node, bucketKeyFileStore)
   681  		if !ok {
   682  			b.Fatal("No filestore")
   683  		}
   684  		remoteFileStore := item.(*storage.FileStore)
   685  
   686  		pivotNode := nodeIDs[0]
   687  		item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
   688  		if !ok {
   689  			b.Fatal("No filestore")
   690  		}
   691  		netStore := item.(*storage.NetStore)
   692  
   693  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   694  			return err
   695  		}
   696  
   697  		disconnections := sim.PeerEvents(
   698  			context.Background(),
   699  			sim.NodeIDs(),
   700  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   701  		)
   702  
   703  		go func() {
   704  			for d := range disconnections {
   705  				if d.Error != nil {
   706  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   707  					b.Fatal(d.Error)
   708  				}
   709  			}
   710  		}()
   711  		// benchmark loop
   712  		b.ResetTimer()
   713  		b.StopTimer()
   714  	Loop:
   715  		for i := 0; i < b.N; i++ {
   716  			// uploading chunkCount random chunks to the last node
   717  			hashes := make([]storage.Address, chunkCount)
   718  			for i := 0; i < chunkCount; i++ {
   719  				// create actual size real chunks
   720  				ctx := context.TODO()
   721  				hash, wait, err := remoteFileStore.Store(ctx, testutil.RandomReader(i, chunkSize), int64(chunkSize), false)
   722  				if err != nil {
   723  					b.Fatalf("expected no error. got %v", err)
   724  				}
   725  				// wait until all chunks stored
   726  				err = wait(ctx)
   727  				if err != nil {
   728  					b.Fatalf("expected no error. got %v", err)
   729  				}
   730  				// collect the hashes
   731  				hashes[i] = hash
   732  			}
   733  			// now benchmark the actual retrieval
   734  			// netstore.Get is called for each hash in a go routine and errors are collected
   735  			b.StartTimer()
   736  			errs := make(chan error)
   737  			for _, hash := range hashes {
   738  				go func(h storage.Address) {
   739  					_, err := netStore.Get(ctx, h)
   740  					log.Warn("test check netstore get", "hash", h, "err", err)
   741  					errs <- err
   742  				}(hash)
   743  			}
   744  			// count and report retrieval errors
   745  			// if there are misses then chunk timeout is too low for the distance and volume (?)
   746  			var total, misses int
   747  			for err := range errs {
   748  				if err != nil {
   749  					log.Warn(err.Error())
   750  					misses++
   751  				}
   752  				total++
   753  				if total == chunkCount {
   754  					break
   755  				}
   756  			}
   757  			b.StopTimer()
   758  
   759  			if misses > 0 {
   760  				err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   761  				break Loop
   762  			}
   763  		}
   764  		if err != nil {
   765  			b.Fatal(err)
   766  		}
   767  		return nil
   768  	})
   769  	if result.Error != nil {
   770  		b.Fatal(result.Error)
   771  	}
   772  
   773  }