github.com/vccomnet/occchain@v0.0.0-20181129092339-c57d4bab23fb/swarm/network/stream/delivery_test.go (about)

     1  // Copyright 2018 The go-blockchain Authors
     2  // This file is part of the go-blockchain library.
     3  //
     4  // The go-blockchain library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-blockchain library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-blockchain library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	crand "crypto/rand"
    23  	"fmt"
    24  	"io"
    25  	"os"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/blockchain/go-blockchain/node"
    31  	"github.com/blockchain/go-blockchain/p2p"
    32  	"github.com/blockchain/go-blockchain/p2p/enode"
    33  	"github.com/blockchain/go-blockchain/p2p/protocols"
    34  	"github.com/blockchain/go-blockchain/p2p/simulations/adapters"
    35  	p2ptest "github.com/blockchain/go-blockchain/p2p/testing"
    36  	"github.com/blockchain/go-blockchain/swarm/log"
    37  	"github.com/blockchain/go-blockchain/swarm/network"
    38  	pq "github.com/blockchain/go-blockchain/swarm/network/priorityqueue"
    39  	"github.com/blockchain/go-blockchain/swarm/network/simulation"
    40  	"github.com/blockchain/go-blockchain/swarm/state"
    41  	"github.com/blockchain/go-blockchain/swarm/storage"
    42  )
    43  
    44  //Tests initializing a retrieve request
    45  func TestStreamerRetrieveRequest(t *testing.T) {
    46  	regOpts := &RegistryOptions{
    47  		Retrieval: RetrievalClientOnly,
    48  		Syncing:   SyncingDisabled,
    49  	}
    50  	tester, streamer, _, teardown, err := newStreamerTester(t, regOpts)
    51  	defer teardown()
    52  	if err != nil {
    53  		t.Fatal(err)
    54  	}
    55  
    56  	node := tester.Nodes[0]
    57  
    58  	ctx := context.Background()
    59  	req := network.NewRequest(
    60  		storage.Address(hash0[:]),
    61  		true,
    62  		&sync.Map{},
    63  	)
    64  	streamer.delivery.RequestFromPeers(ctx, req)
    65  
    66  	stream := NewStream(swarmChunkServerStreamName, "", true)
    67  
    68  	err = tester.TestExchanges(p2ptest.Exchange{
    69  		Label: "RetrieveRequestMsg",
    70  		Expects: []p2ptest.Expect{
    71  			{ //start expecting a subscription for RETRIEVE_REQUEST due to `RetrievalClientOnly`
    72  				Code: 4,
    73  				Msg: &SubscribeMsg{
    74  					Stream:   stream,
    75  					History:  nil,
    76  					Priority: Top,
    77  				},
    78  				Peer: node.ID(),
    79  			},
    80  			{ //expect a retrieve request message for the given hash
    81  				Code: 5,
    82  				Msg: &RetrieveRequestMsg{
    83  					Addr:      hash0[:],
    84  					SkipCheck: true,
    85  				},
    86  				Peer: node.ID(),
    87  			},
    88  		},
    89  	})
    90  
    91  	if err != nil {
    92  		t.Fatalf("Expected no error, got %v", err)
    93  	}
    94  }
    95  
    96  //Test requesting a chunk from a peer then issuing a "empty" OfferedHashesMsg (no hashes available yet)
    97  //Should time out as the peer does not have the chunk (no syncing happened previously)
    98  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
    99  	tester, streamer, _, teardown, err := newStreamerTester(t, &RegistryOptions{
   100  		Retrieval: RetrievalEnabled,
   101  		Syncing:   SyncingDisabled, //do no syncing
   102  	})
   103  	defer teardown()
   104  	if err != nil {
   105  		t.Fatal(err)
   106  	}
   107  
   108  	node := tester.Nodes[0]
   109  
   110  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
   111  
   112  	peer := streamer.getPeer(node.ID())
   113  
   114  	stream := NewStream(swarmChunkServerStreamName, "", true)
   115  	//simulate pre-subscription to RETRIEVE_REQUEST stream on peer
   116  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   117  		Stream:   stream,
   118  		History:  nil,
   119  		Priority: Top,
   120  	})
   121  
   122  	//test the exchange
   123  	err = tester.TestExchanges(p2ptest.Exchange{
   124  		Expects: []p2ptest.Expect{
   125  			{ //first expect a subscription to the RETRIEVE_REQUEST stream
   126  				Code: 4,
   127  				Msg: &SubscribeMsg{
   128  					Stream:   stream,
   129  					History:  nil,
   130  					Priority: Top,
   131  				},
   132  				Peer: node.ID(),
   133  			},
   134  		},
   135  	}, p2ptest.Exchange{
   136  		Label: "RetrieveRequestMsg",
   137  		Triggers: []p2ptest.Trigger{
   138  			{ //then the actual RETRIEVE_REQUEST....
   139  				Code: 5,
   140  				Msg: &RetrieveRequestMsg{
   141  					Addr: chunk.Address()[:],
   142  				},
   143  				Peer: node.ID(),
   144  			},
   145  		},
   146  		Expects: []p2ptest.Expect{
   147  			{ //to which the peer responds with offered hashes
   148  				Code: 1,
   149  				Msg: &OfferedHashesMsg{
   150  					HandoverProof: nil,
   151  					Hashes:        nil,
   152  					From:          0,
   153  					To:            0,
   154  				},
   155  				Peer: node.ID(),
   156  			},
   157  		},
   158  	})
   159  
   160  	//should fail with a timeout as the peer we are requesting
   161  	//the chunk from does not have the chunk
   162  	expectedError := `exchange #1 "RetrieveRequestMsg": timed out`
   163  	if err == nil || err.Error() != expectedError {
   164  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   165  	}
   166  }
   167  
   168  // upstream request server receives a retrieve Request and responds with
   169  // offered hashes or delivery if skipHash is set to true
   170  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   171  	tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
   172  		Retrieval: RetrievalEnabled,
   173  		Syncing:   SyncingDisabled,
   174  	})
   175  	defer teardown()
   176  	if err != nil {
   177  		t.Fatal(err)
   178  	}
   179  
   180  	node := tester.Nodes[0]
   181  
   182  	peer := streamer.getPeer(node.ID())
   183  
   184  	stream := NewStream(swarmChunkServerStreamName, "", true)
   185  
   186  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   187  		Stream:   stream,
   188  		History:  nil,
   189  		Priority: Top,
   190  	})
   191  
   192  	hash := storage.Address(hash0[:])
   193  	chunk := storage.NewChunk(hash, hash)
   194  	err = localStore.Put(context.TODO(), chunk)
   195  	if err != nil {
   196  		t.Fatalf("Expected no err got %v", err)
   197  	}
   198  
   199  	err = tester.TestExchanges(p2ptest.Exchange{
   200  		Expects: []p2ptest.Expect{
   201  			{
   202  				Code: 4,
   203  				Msg: &SubscribeMsg{
   204  					Stream:   stream,
   205  					History:  nil,
   206  					Priority: Top,
   207  				},
   208  				Peer: node.ID(),
   209  			},
   210  		},
   211  	}, p2ptest.Exchange{
   212  		Label: "RetrieveRequestMsg",
   213  		Triggers: []p2ptest.Trigger{
   214  			{
   215  				Code: 5,
   216  				Msg: &RetrieveRequestMsg{
   217  					Addr: hash,
   218  				},
   219  				Peer: node.ID(),
   220  			},
   221  		},
   222  		Expects: []p2ptest.Expect{
   223  			{
   224  				Code: 1,
   225  				Msg: &OfferedHashesMsg{
   226  					HandoverProof: &HandoverProof{
   227  						Handover: &Handover{},
   228  					},
   229  					Hashes: hash,
   230  					From:   0,
   231  					// TODO: why is this 32???
   232  					To:     32,
   233  					Stream: stream,
   234  				},
   235  				Peer: node.ID(),
   236  			},
   237  		},
   238  	})
   239  
   240  	if err != nil {
   241  		t.Fatal(err)
   242  	}
   243  
   244  	hash = storage.Address(hash1[:])
   245  	chunk = storage.NewChunk(hash, hash1[:])
   246  	err = localStore.Put(context.TODO(), chunk)
   247  	if err != nil {
   248  		t.Fatalf("Expected no err got %v", err)
   249  	}
   250  
   251  	err = tester.TestExchanges(p2ptest.Exchange{
   252  		Label: "RetrieveRequestMsg",
   253  		Triggers: []p2ptest.Trigger{
   254  			{
   255  				Code: 5,
   256  				Msg: &RetrieveRequestMsg{
   257  					Addr:      hash,
   258  					SkipCheck: true,
   259  				},
   260  				Peer: node.ID(),
   261  			},
   262  		},
   263  		Expects: []p2ptest.Expect{
   264  			{
   265  				Code: 6,
   266  				Msg: &ChunkDeliveryMsg{
   267  					Addr:  hash,
   268  					SData: hash,
   269  				},
   270  				Peer: node.ID(),
   271  			},
   272  		},
   273  	})
   274  
   275  	if err != nil {
   276  		t.Fatal(err)
   277  	}
   278  }
   279  
   280  // if there is one peer in the Kademlia, RequestFromPeers should return it
   281  func TestRequestFromPeers(t *testing.T) {
   282  	dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
   283  
   284  	addr := network.RandomAddr()
   285  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
   286  	delivery := NewDelivery(to, nil)
   287  	protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
   288  	peer := network.NewPeer(&network.BzzPeer{
   289  		BzzAddr:   network.RandomAddr(),
   290  		LightNode: false,
   291  		Peer:      protocolsPeer,
   292  	}, to)
   293  	to.On(peer)
   294  	r := NewRegistry(addr.ID(), delivery, nil, nil, nil)
   295  
   296  	// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
   297  	sp := &Peer{
   298  		Peer:     protocolsPeer,
   299  		pq:       pq.New(int(PriorityQueue), PriorityQueueCap),
   300  		streamer: r,
   301  	}
   302  	r.setPeer(sp)
   303  	req := network.NewRequest(
   304  		storage.Address(hash0[:]),
   305  		true,
   306  		&sync.Map{},
   307  	)
   308  	ctx := context.Background()
   309  	id, _, err := delivery.RequestFromPeers(ctx, req)
   310  
   311  	if err != nil {
   312  		t.Fatal(err)
   313  	}
   314  	if *id != dummyPeerID {
   315  		t.Fatalf("Expected an id, got %v", id)
   316  	}
   317  }
   318  
   319  // RequestFromPeers should not return light nodes
   320  func TestRequestFromPeersWithLightNode(t *testing.T) {
   321  	dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
   322  
   323  	addr := network.RandomAddr()
   324  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
   325  	delivery := NewDelivery(to, nil)
   326  
   327  	protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
   328  	// setting up a lightnode
   329  	peer := network.NewPeer(&network.BzzPeer{
   330  		BzzAddr:   network.RandomAddr(),
   331  		LightNode: true,
   332  		Peer:      protocolsPeer,
   333  	}, to)
   334  	to.On(peer)
   335  	r := NewRegistry(addr.ID(), delivery, nil, nil, nil)
   336  	// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
   337  	sp := &Peer{
   338  		Peer:     protocolsPeer,
   339  		pq:       pq.New(int(PriorityQueue), PriorityQueueCap),
   340  		streamer: r,
   341  	}
   342  	r.setPeer(sp)
   343  
   344  	req := network.NewRequest(
   345  		storage.Address(hash0[:]),
   346  		true,
   347  		&sync.Map{},
   348  	)
   349  
   350  	ctx := context.Background()
   351  	// making a request which should return with "no peer found"
   352  	_, _, err := delivery.RequestFromPeers(ctx, req)
   353  
   354  	expectedError := "no peer found"
   355  	if err.Error() != expectedError {
   356  		t.Fatalf("expected '%v', got %v", expectedError, err)
   357  	}
   358  }
   359  
   360  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   361  	tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
   362  		Retrieval: RetrievalDisabled,
   363  		Syncing:   SyncingDisabled,
   364  	})
   365  	defer teardown()
   366  	if err != nil {
   367  		t.Fatal(err)
   368  	}
   369  
   370  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   371  		return &testClient{
   372  			t: t,
   373  		}, nil
   374  	})
   375  
   376  	node := tester.Nodes[0]
   377  
   378  	//subscribe to custom stream
   379  	stream := NewStream("foo", "", true)
   380  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   381  	if err != nil {
   382  		t.Fatalf("Expected no error, got %v", err)
   383  	}
   384  
   385  	chunkKey := hash0[:]
   386  	chunkData := hash1[:]
   387  
   388  	err = tester.TestExchanges(p2ptest.Exchange{
   389  		Label: "Subscribe message",
   390  		Expects: []p2ptest.Expect{
   391  			{ //first expect subscription to the custom stream...
   392  				Code: 4,
   393  				Msg: &SubscribeMsg{
   394  					Stream:   stream,
   395  					History:  NewRange(5, 8),
   396  					Priority: Top,
   397  				},
   398  				Peer: node.ID(),
   399  			},
   400  		},
   401  	},
   402  		p2ptest.Exchange{
   403  			Label: "ChunkDelivery message",
   404  			Triggers: []p2ptest.Trigger{
   405  				{ //...then trigger a chunk delivery for the given chunk from peer in order for
   406  					//local node to get the chunk delivered
   407  					Code: 6,
   408  					Msg: &ChunkDeliveryMsg{
   409  						Addr:  chunkKey,
   410  						SData: chunkData,
   411  					},
   412  					Peer: node.ID(),
   413  				},
   414  			},
   415  		})
   416  
   417  	if err != nil {
   418  		t.Fatalf("Expected no error, got %v", err)
   419  	}
   420  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
   421  	defer cancel()
   422  
   423  	// wait for the chunk to get stored
   424  	storedChunk, err := localStore.Get(ctx, chunkKey)
   425  	for err != nil {
   426  		select {
   427  		case <-ctx.Done():
   428  			t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
   429  		default:
   430  		}
   431  		storedChunk, err = localStore.Get(ctx, chunkKey)
   432  		time.Sleep(50 * time.Millisecond)
   433  	}
   434  
   435  	if err != nil {
   436  		t.Fatalf("Expected no error, got %v", err)
   437  	}
   438  
   439  	if !bytes.Equal(storedChunk.Data(), chunkData) {
   440  		t.Fatal("Retrieved chunk has different data than original")
   441  	}
   442  
   443  }
   444  
   445  func TestDeliveryFromNodes(t *testing.T) {
   446  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
   447  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
   448  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
   449  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
   450  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
   451  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
   452  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
   453  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
   454  }
   455  
   456  func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
   457  	sim := simulation.New(map[string]simulation.ServiceFunc{
   458  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   459  			node := ctx.Config.Node()
   460  			addr := network.NewAddr(node)
   461  			store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
   462  			if err != nil {
   463  				return nil, nil, err
   464  			}
   465  			bucket.Store(bucketKeyStore, store)
   466  			cleanup = func() {
   467  				os.RemoveAll(datadir)
   468  				store.Close()
   469  			}
   470  			localStore := store.(*storage.LocalStore)
   471  			netStore, err := storage.NewNetStore(localStore, nil)
   472  			if err != nil {
   473  				return nil, nil, err
   474  			}
   475  
   476  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   477  			delivery := NewDelivery(kad, netStore)
   478  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   479  
   480  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   481  				SkipCheck: skipCheck,
   482  				Syncing:   SyncingDisabled,
   483  				Retrieval: RetrievalEnabled,
   484  			})
   485  			bucket.Store(bucketKeyRegistry, r)
   486  
   487  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   488  			bucket.Store(bucketKeyFileStore, fileStore)
   489  
   490  			return r, cleanup, nil
   491  
   492  		},
   493  	})
   494  	defer sim.Close()
   495  
   496  	log.Info("Adding nodes to simulation")
   497  	_, err := sim.AddNodesAndConnectChain(nodes)
   498  	if err != nil {
   499  		t.Fatal(err)
   500  	}
   501  
   502  	log.Info("Starting simulation")
   503  	ctx := context.Background()
   504  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   505  		nodeIDs := sim.UpNodeIDs()
   506  		//determine the pivot node to be the first node of the simulation
   507  		sim.SetPivotNode(nodeIDs[0])
   508  		//distribute chunks of a random file into Stores of nodes 1 to nodes
   509  		//we will do this by creating a file store with an underlying round-robin store:
   510  		//the file store will create a hash for the uploaded file, but every chunk will be
   511  		//distributed to different nodes via round-robin scheduling
   512  		log.Debug("Writing file to round-robin file store")
   513  		//to do this, we create an array for chunkstores (length minus one, the pivot node)
   514  		stores := make([]storage.ChunkStore, len(nodeIDs)-1)
   515  		//we then need to get all stores from the sim....
   516  		lStores := sim.NodesItems(bucketKeyStore)
   517  		i := 0
   518  		//...iterate the buckets...
   519  		for id, bucketVal := range lStores {
   520  			//...and remove the one which is the pivot node
   521  			if id == *sim.PivotNodeID() {
   522  				continue
   523  			}
   524  			//the other ones are added to the array...
   525  			stores[i] = bucketVal.(storage.ChunkStore)
   526  			i++
   527  		}
   528  		//...which then gets passed to the round-robin file store
   529  		roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
   530  		//now we can actually upload a (random) file to the round-robin store
   531  		size := chunkCount * chunkSize
   532  		log.Debug("Storing data to file store")
   533  		fileHash, wait, err := roundRobinFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   534  		// wait until all chunks stored
   535  		if err != nil {
   536  			return err
   537  		}
   538  		err = wait(ctx)
   539  		if err != nil {
   540  			return err
   541  		}
   542  
   543  		log.Debug("Waiting for kademlia")
   544  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   545  			return err
   546  		}
   547  
   548  		//get the pivot node's filestore
   549  		item, ok := sim.NodeItem(*sim.PivotNodeID(), bucketKeyFileStore)
   550  		if !ok {
   551  			return fmt.Errorf("No filestore")
   552  		}
   553  		pivotFileStore := item.(*storage.FileStore)
   554  		log.Debug("Starting retrieval routine")
   555  		go func() {
   556  			// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
   557  			// we must wait for the peer connections to have started before requesting
   558  			n, err := readAll(pivotFileStore, fileHash)
   559  			log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   560  			if err != nil {
   561  				t.Fatalf("requesting chunks action error: %v", err)
   562  			}
   563  		}()
   564  
   565  		log.Debug("Watching for disconnections")
   566  		disconnections := sim.PeerEvents(
   567  			context.Background(),
   568  			sim.NodeIDs(),
   569  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   570  		)
   571  
   572  		go func() {
   573  			for d := range disconnections {
   574  				if d.Error != nil {
   575  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   576  					t.Fatal(d.Error)
   577  				}
   578  			}
   579  		}()
   580  
   581  		//finally check that the pivot node gets all chunks via the root hash
   582  		log.Debug("Check retrieval")
   583  		success := true
   584  		var total int64
   585  		total, err = readAll(pivotFileStore, fileHash)
   586  		if err != nil {
   587  			return err
   588  		}
   589  		log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   590  		if err != nil || total != int64(size) {
   591  			success = false
   592  		}
   593  
   594  		if !success {
   595  			return fmt.Errorf("Test failed, chunks not available on all nodes")
   596  		}
   597  		log.Debug("Test terminated successfully")
   598  		return nil
   599  	})
   600  	if result.Error != nil {
   601  		t.Fatal(result.Error)
   602  	}
   603  }
   604  
   605  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   606  	for chunks := 32; chunks <= 128; chunks *= 2 {
   607  		for i := 2; i < 32; i *= 2 {
   608  			b.Run(
   609  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   610  				func(b *testing.B) {
   611  					benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
   612  				},
   613  			)
   614  		}
   615  	}
   616  }
   617  
   618  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   619  	for chunks := 32; chunks <= 128; chunks *= 2 {
   620  		for i := 2; i < 32; i *= 2 {
   621  			b.Run(
   622  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   623  				func(b *testing.B) {
   624  					benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
   625  				},
   626  			)
   627  		}
   628  	}
   629  }
   630  
   631  func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
   632  	sim := simulation.New(map[string]simulation.ServiceFunc{
   633  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   634  			node := ctx.Config.Node()
   635  			addr := network.NewAddr(node)
   636  			store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
   637  			if err != nil {
   638  				return nil, nil, err
   639  			}
   640  			bucket.Store(bucketKeyStore, store)
   641  			cleanup = func() {
   642  				os.RemoveAll(datadir)
   643  				store.Close()
   644  			}
   645  			localStore := store.(*storage.LocalStore)
   646  			netStore, err := storage.NewNetStore(localStore, nil)
   647  			if err != nil {
   648  				return nil, nil, err
   649  			}
   650  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   651  			delivery := NewDelivery(kad, netStore)
   652  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   653  
   654  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   655  				SkipCheck:       skipCheck,
   656  				Syncing:         SyncingDisabled,
   657  				Retrieval:       RetrievalDisabled,
   658  				SyncUpdateDelay: 0,
   659  			})
   660  
   661  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   662  			bucket.Store(bucketKeyFileStore, fileStore)
   663  
   664  			return r, cleanup, nil
   665  
   666  		},
   667  	})
   668  	defer sim.Close()
   669  
   670  	log.Info("Initializing test config")
   671  	_, err := sim.AddNodesAndConnectChain(nodes)
   672  	if err != nil {
   673  		b.Fatal(err)
   674  	}
   675  
   676  	ctx := context.Background()
   677  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   678  		nodeIDs := sim.UpNodeIDs()
   679  		node := nodeIDs[len(nodeIDs)-1]
   680  
   681  		item, ok := sim.NodeItem(node, bucketKeyFileStore)
   682  		if !ok {
   683  			b.Fatal("No filestore")
   684  		}
   685  		remoteFileStore := item.(*storage.FileStore)
   686  
   687  		pivotNode := nodeIDs[0]
   688  		item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
   689  		if !ok {
   690  			b.Fatal("No filestore")
   691  		}
   692  		netStore := item.(*storage.NetStore)
   693  
   694  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   695  			return err
   696  		}
   697  
   698  		disconnections := sim.PeerEvents(
   699  			context.Background(),
   700  			sim.NodeIDs(),
   701  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   702  		)
   703  
   704  		go func() {
   705  			for d := range disconnections {
   706  				if d.Error != nil {
   707  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   708  					b.Fatal(d.Error)
   709  				}
   710  			}
   711  		}()
   712  		// benchmark loop
   713  		b.ResetTimer()
   714  		b.StopTimer()
   715  	Loop:
   716  		for i := 0; i < b.N; i++ {
   717  			// uploading chunkCount random chunks to the last node
   718  			hashes := make([]storage.Address, chunkCount)
   719  			for i := 0; i < chunkCount; i++ {
   720  				// create actual size real chunks
   721  				ctx := context.TODO()
   722  				hash, wait, err := remoteFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false)
   723  				if err != nil {
   724  					b.Fatalf("expected no error. got %v", err)
   725  				}
   726  				// wait until all chunks stored
   727  				err = wait(ctx)
   728  				if err != nil {
   729  					b.Fatalf("expected no error. got %v", err)
   730  				}
   731  				// collect the hashes
   732  				hashes[i] = hash
   733  			}
   734  			// now benchmark the actual retrieval
   735  			// netstore.Get is called for each hash in a go routine and errors are collected
   736  			b.StartTimer()
   737  			errs := make(chan error)
   738  			for _, hash := range hashes {
   739  				go func(h storage.Address) {
   740  					_, err := netStore.Get(ctx, h)
   741  					log.Warn("test check netstore get", "hash", h, "err", err)
   742  					errs <- err
   743  				}(hash)
   744  			}
   745  			// count and report retrieval errors
   746  			// if there are misses then chunk timeout is too low for the distance and volume (?)
   747  			var total, misses int
   748  			for err := range errs {
   749  				if err != nil {
   750  					log.Warn(err.Error())
   751  					misses++
   752  				}
   753  				total++
   754  				if total == chunkCount {
   755  					break
   756  				}
   757  			}
   758  			b.StopTimer()
   759  
   760  			if misses > 0 {
   761  				err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   762  				break Loop
   763  			}
   764  		}
   765  		if err != nil {
   766  			b.Fatal(err)
   767  		}
   768  		return nil
   769  	})
   770  	if result.Error != nil {
   771  		b.Fatal(result.Error)
   772  	}
   773  
   774  }