github.com/shyftnetwork/go-empyrean@v1.8.3-0.20191127201940-fbfca9338f04/swarm/network/stream/delivery_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"errors"
    23  	"fmt"
    24  	"os"
    25  	"sync"
    26  	"sync/atomic"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/ShyftNetwork/go-empyrean/node"
    31  	"github.com/ShyftNetwork/go-empyrean/p2p"
    32  	"github.com/ShyftNetwork/go-empyrean/p2p/enode"
    33  	"github.com/ShyftNetwork/go-empyrean/p2p/protocols"
    34  	"github.com/ShyftNetwork/go-empyrean/p2p/simulations/adapters"
    35  	p2ptest "github.com/ShyftNetwork/go-empyrean/p2p/testing"
    36  	"github.com/ShyftNetwork/go-empyrean/swarm/log"
    37  	"github.com/ShyftNetwork/go-empyrean/swarm/network"
    38  	pq "github.com/ShyftNetwork/go-empyrean/swarm/network/priorityqueue"
    39  	"github.com/ShyftNetwork/go-empyrean/swarm/network/simulation"
    40  	"github.com/ShyftNetwork/go-empyrean/swarm/state"
    41  	"github.com/ShyftNetwork/go-empyrean/swarm/storage"
    42  	"github.com/ShyftNetwork/go-empyrean/swarm/testutil"
    43  )
    44  
    45  //Tests initializing a retrieve request
    46  func TestStreamerRetrieveRequest(t *testing.T) {
    47  	regOpts := &RegistryOptions{
    48  		Retrieval: RetrievalClientOnly,
    49  		Syncing:   SyncingDisabled,
    50  	}
    51  	tester, streamer, _, teardown, err := newStreamerTester(t, regOpts)
    52  	defer teardown()
    53  	if err != nil {
    54  		t.Fatal(err)
    55  	}
    56  
    57  	node := tester.Nodes[0]
    58  
    59  	ctx := context.Background()
    60  	req := network.NewRequest(
    61  		storage.Address(hash0[:]),
    62  		true,
    63  		&sync.Map{},
    64  	)
    65  	streamer.delivery.RequestFromPeers(ctx, req)
    66  
    67  	stream := NewStream(swarmChunkServerStreamName, "", true)
    68  
    69  	err = tester.TestExchanges(p2ptest.Exchange{
    70  		Label: "RetrieveRequestMsg",
    71  		Expects: []p2ptest.Expect{
    72  			{ //start expecting a subscription for RETRIEVE_REQUEST due to `RetrievalClientOnly`
    73  				Code: 4,
    74  				Msg: &SubscribeMsg{
    75  					Stream:   stream,
    76  					History:  nil,
    77  					Priority: Top,
    78  				},
    79  				Peer: node.ID(),
    80  			},
    81  			{ //expect a retrieve request message for the given hash
    82  				Code: 5,
    83  				Msg: &RetrieveRequestMsg{
    84  					Addr:      hash0[:],
    85  					SkipCheck: true,
    86  				},
    87  				Peer: node.ID(),
    88  			},
    89  		},
    90  	})
    91  
    92  	if err != nil {
    93  		t.Fatalf("Expected no error, got %v", err)
    94  	}
    95  }
    96  
    97  //Test requesting a chunk from a peer then issuing a "empty" OfferedHashesMsg (no hashes available yet)
    98  //Should time out as the peer does not have the chunk (no syncing happened previously)
    99  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
   100  	tester, streamer, _, teardown, err := newStreamerTester(t, &RegistryOptions{
   101  		Retrieval: RetrievalEnabled,
   102  		Syncing:   SyncingDisabled, //do no syncing
   103  	})
   104  	defer teardown()
   105  	if err != nil {
   106  		t.Fatal(err)
   107  	}
   108  
   109  	node := tester.Nodes[0]
   110  
   111  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
   112  
   113  	peer := streamer.getPeer(node.ID())
   114  
   115  	stream := NewStream(swarmChunkServerStreamName, "", true)
   116  	//simulate pre-subscription to RETRIEVE_REQUEST stream on peer
   117  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   118  		Stream:   stream,
   119  		History:  nil,
   120  		Priority: Top,
   121  	})
   122  
   123  	//test the exchange
   124  	err = tester.TestExchanges(p2ptest.Exchange{
   125  		Expects: []p2ptest.Expect{
   126  			{ //first expect a subscription to the RETRIEVE_REQUEST stream
   127  				Code: 4,
   128  				Msg: &SubscribeMsg{
   129  					Stream:   stream,
   130  					History:  nil,
   131  					Priority: Top,
   132  				},
   133  				Peer: node.ID(),
   134  			},
   135  		},
   136  	}, p2ptest.Exchange{
   137  		Label: "RetrieveRequestMsg",
   138  		Triggers: []p2ptest.Trigger{
   139  			{ //then the actual RETRIEVE_REQUEST....
   140  				Code: 5,
   141  				Msg: &RetrieveRequestMsg{
   142  					Addr: chunk.Address()[:],
   143  				},
   144  				Peer: node.ID(),
   145  			},
   146  		},
   147  		Expects: []p2ptest.Expect{
   148  			{ //to which the peer responds with offered hashes
   149  				Code: 1,
   150  				Msg: &OfferedHashesMsg{
   151  					HandoverProof: nil,
   152  					Hashes:        nil,
   153  					From:          0,
   154  					To:            0,
   155  				},
   156  				Peer: node.ID(),
   157  			},
   158  		},
   159  	})
   160  
   161  	//should fail with a timeout as the peer we are requesting
   162  	//the chunk from does not have the chunk
   163  	expectedError := `exchange #1 "RetrieveRequestMsg": timed out`
   164  	if err == nil || err.Error() != expectedError {
   165  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   166  	}
   167  }
   168  
   169  // upstream request server receives a retrieve Request and responds with
   170  // offered hashes or delivery if skipHash is set to true
   171  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   172  	tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
   173  		Retrieval: RetrievalEnabled,
   174  		Syncing:   SyncingDisabled,
   175  	})
   176  	defer teardown()
   177  	if err != nil {
   178  		t.Fatal(err)
   179  	}
   180  
   181  	node := tester.Nodes[0]
   182  
   183  	peer := streamer.getPeer(node.ID())
   184  
   185  	stream := NewStream(swarmChunkServerStreamName, "", true)
   186  
   187  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   188  		Stream:   stream,
   189  		History:  nil,
   190  		Priority: Top,
   191  	})
   192  
   193  	hash := storage.Address(hash0[:])
   194  	chunk := storage.NewChunk(hash, hash)
   195  	err = localStore.Put(context.TODO(), chunk)
   196  	if err != nil {
   197  		t.Fatalf("Expected no err got %v", err)
   198  	}
   199  
   200  	err = tester.TestExchanges(p2ptest.Exchange{
   201  		Expects: []p2ptest.Expect{
   202  			{
   203  				Code: 4,
   204  				Msg: &SubscribeMsg{
   205  					Stream:   stream,
   206  					History:  nil,
   207  					Priority: Top,
   208  				},
   209  				Peer: node.ID(),
   210  			},
   211  		},
   212  	}, p2ptest.Exchange{
   213  		Label: "RetrieveRequestMsg",
   214  		Triggers: []p2ptest.Trigger{
   215  			{
   216  				Code: 5,
   217  				Msg: &RetrieveRequestMsg{
   218  					Addr: hash,
   219  				},
   220  				Peer: node.ID(),
   221  			},
   222  		},
   223  		Expects: []p2ptest.Expect{
   224  			{
   225  				Code: 1,
   226  				Msg: &OfferedHashesMsg{
   227  					HandoverProof: &HandoverProof{
   228  						Handover: &Handover{},
   229  					},
   230  					Hashes: hash,
   231  					From:   0,
   232  					// TODO: why is this 32???
   233  					To:     32,
   234  					Stream: stream,
   235  				},
   236  				Peer: node.ID(),
   237  			},
   238  		},
   239  	})
   240  
   241  	if err != nil {
   242  		t.Fatal(err)
   243  	}
   244  
   245  	hash = storage.Address(hash1[:])
   246  	chunk = storage.NewChunk(hash, hash1[:])
   247  	err = localStore.Put(context.TODO(), chunk)
   248  	if err != nil {
   249  		t.Fatalf("Expected no err got %v", err)
   250  	}
   251  
   252  	err = tester.TestExchanges(p2ptest.Exchange{
   253  		Label: "RetrieveRequestMsg",
   254  		Triggers: []p2ptest.Trigger{
   255  			{
   256  				Code: 5,
   257  				Msg: &RetrieveRequestMsg{
   258  					Addr:      hash,
   259  					SkipCheck: true,
   260  				},
   261  				Peer: node.ID(),
   262  			},
   263  		},
   264  		Expects: []p2ptest.Expect{
   265  			{
   266  				Code: 6,
   267  				Msg: &ChunkDeliveryMsg{
   268  					Addr:  hash,
   269  					SData: hash,
   270  				},
   271  				Peer: node.ID(),
   272  			},
   273  		},
   274  	})
   275  
   276  	if err != nil {
   277  		t.Fatal(err)
   278  	}
   279  }
   280  
   281  // if there is one peer in the Kademlia, RequestFromPeers should return it
   282  func TestRequestFromPeers(t *testing.T) {
   283  	dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
   284  
   285  	addr := network.RandomAddr()
   286  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
   287  	delivery := NewDelivery(to, nil)
   288  	protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
   289  	peer := network.NewPeer(&network.BzzPeer{
   290  		BzzAddr:   network.RandomAddr(),
   291  		LightNode: false,
   292  		Peer:      protocolsPeer,
   293  	}, to)
   294  	to.On(peer)
   295  	r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
   296  
   297  	// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
   298  	sp := &Peer{
   299  		Peer:     protocolsPeer,
   300  		pq:       pq.New(int(PriorityQueue), PriorityQueueCap),
   301  		streamer: r,
   302  	}
   303  	r.setPeer(sp)
   304  	req := network.NewRequest(
   305  		storage.Address(hash0[:]),
   306  		true,
   307  		&sync.Map{},
   308  	)
   309  	ctx := context.Background()
   310  	id, _, err := delivery.RequestFromPeers(ctx, req)
   311  
   312  	if err != nil {
   313  		t.Fatal(err)
   314  	}
   315  	if *id != dummyPeerID {
   316  		t.Fatalf("Expected an id, got %v", id)
   317  	}
   318  }
   319  
   320  // RequestFromPeers should not return light nodes
   321  func TestRequestFromPeersWithLightNode(t *testing.T) {
   322  	dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
   323  
   324  	addr := network.RandomAddr()
   325  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
   326  	delivery := NewDelivery(to, nil)
   327  
   328  	protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
   329  	// setting up a lightnode
   330  	peer := network.NewPeer(&network.BzzPeer{
   331  		BzzAddr:   network.RandomAddr(),
   332  		LightNode: true,
   333  		Peer:      protocolsPeer,
   334  	}, to)
   335  	to.On(peer)
   336  	r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
   337  	// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
   338  	sp := &Peer{
   339  		Peer:     protocolsPeer,
   340  		pq:       pq.New(int(PriorityQueue), PriorityQueueCap),
   341  		streamer: r,
   342  	}
   343  	r.setPeer(sp)
   344  
   345  	req := network.NewRequest(
   346  		storage.Address(hash0[:]),
   347  		true,
   348  		&sync.Map{},
   349  	)
   350  
   351  	ctx := context.Background()
   352  	// making a request which should return with "no peer found"
   353  	_, _, err := delivery.RequestFromPeers(ctx, req)
   354  
   355  	expectedError := "no peer found"
   356  	if err.Error() != expectedError {
   357  		t.Fatalf("expected '%v', got %v", expectedError, err)
   358  	}
   359  }
   360  
   361  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   362  	tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
   363  		Retrieval: RetrievalDisabled,
   364  		Syncing:   SyncingDisabled,
   365  	})
   366  	defer teardown()
   367  	if err != nil {
   368  		t.Fatal(err)
   369  	}
   370  
   371  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   372  		return &testClient{
   373  			t: t,
   374  		}, nil
   375  	})
   376  
   377  	node := tester.Nodes[0]
   378  
   379  	//subscribe to custom stream
   380  	stream := NewStream("foo", "", true)
   381  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   382  	if err != nil {
   383  		t.Fatalf("Expected no error, got %v", err)
   384  	}
   385  
   386  	chunkKey := hash0[:]
   387  	chunkData := hash1[:]
   388  
   389  	err = tester.TestExchanges(p2ptest.Exchange{
   390  		Label: "Subscribe message",
   391  		Expects: []p2ptest.Expect{
   392  			{ //first expect subscription to the custom stream...
   393  				Code: 4,
   394  				Msg: &SubscribeMsg{
   395  					Stream:   stream,
   396  					History:  NewRange(5, 8),
   397  					Priority: Top,
   398  				},
   399  				Peer: node.ID(),
   400  			},
   401  		},
   402  	},
   403  		p2ptest.Exchange{
   404  			Label: "ChunkDelivery message",
   405  			Triggers: []p2ptest.Trigger{
   406  				{ //...then trigger a chunk delivery for the given chunk from peer in order for
   407  					//local node to get the chunk delivered
   408  					Code: 6,
   409  					Msg: &ChunkDeliveryMsg{
   410  						Addr:  chunkKey,
   411  						SData: chunkData,
   412  					},
   413  					Peer: node.ID(),
   414  				},
   415  			},
   416  		})
   417  
   418  	if err != nil {
   419  		t.Fatalf("Expected no error, got %v", err)
   420  	}
   421  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
   422  	defer cancel()
   423  
   424  	// wait for the chunk to get stored
   425  	storedChunk, err := localStore.Get(ctx, chunkKey)
   426  	for err != nil {
   427  		select {
   428  		case <-ctx.Done():
   429  			t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
   430  		default:
   431  		}
   432  		storedChunk, err = localStore.Get(ctx, chunkKey)
   433  		time.Sleep(50 * time.Millisecond)
   434  	}
   435  
   436  	if err != nil {
   437  		t.Fatalf("Expected no error, got %v", err)
   438  	}
   439  
   440  	if !bytes.Equal(storedChunk.Data(), chunkData) {
   441  		t.Fatal("Retrieved chunk has different data than original")
   442  	}
   443  
   444  }
   445  
   446  func TestDeliveryFromNodes(t *testing.T) {
   447  	testDeliveryFromNodes(t, 2, dataChunkCount, true)
   448  	testDeliveryFromNodes(t, 2, dataChunkCount, false)
   449  	testDeliveryFromNodes(t, 4, dataChunkCount, true)
   450  	testDeliveryFromNodes(t, 4, dataChunkCount, false)
   451  	testDeliveryFromNodes(t, 8, dataChunkCount, true)
   452  	testDeliveryFromNodes(t, 8, dataChunkCount, false)
   453  	testDeliveryFromNodes(t, 16, dataChunkCount, true)
   454  	testDeliveryFromNodes(t, 16, dataChunkCount, false)
   455  }
   456  
   457  func testDeliveryFromNodes(t *testing.T, nodes, chunkCount int, skipCheck bool) {
   458  	sim := simulation.New(map[string]simulation.ServiceFunc{
   459  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   460  			node := ctx.Config.Node()
   461  			addr := network.NewAddr(node)
   462  			store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
   463  			if err != nil {
   464  				return nil, nil, err
   465  			}
   466  			bucket.Store(bucketKeyStore, store)
   467  			cleanup = func() {
   468  				os.RemoveAll(datadir)
   469  				store.Close()
   470  			}
   471  			localStore := store.(*storage.LocalStore)
   472  			netStore, err := storage.NewNetStore(localStore, nil)
   473  			if err != nil {
   474  				return nil, nil, err
   475  			}
   476  
   477  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   478  			delivery := NewDelivery(kad, netStore)
   479  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   480  
   481  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   482  				SkipCheck: skipCheck,
   483  				Syncing:   SyncingDisabled,
   484  				Retrieval: RetrievalEnabled,
   485  			}, nil)
   486  			bucket.Store(bucketKeyRegistry, r)
   487  
   488  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   489  			bucket.Store(bucketKeyFileStore, fileStore)
   490  
   491  			return r, cleanup, nil
   492  
   493  		},
   494  	})
   495  	defer sim.Close()
   496  
   497  	log.Info("Adding nodes to simulation")
   498  	_, err := sim.AddNodesAndConnectChain(nodes)
   499  	if err != nil {
   500  		t.Fatal(err)
   501  	}
   502  
   503  	log.Info("Starting simulation")
   504  	ctx := context.Background()
   505  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
   506  		nodeIDs := sim.UpNodeIDs()
   507  		//determine the pivot node to be the first node of the simulation
   508  		pivot := nodeIDs[0]
   509  
   510  		//distribute chunks of a random file into Stores of nodes 1 to nodes
   511  		//we will do this by creating a file store with an underlying round-robin store:
   512  		//the file store will create a hash for the uploaded file, but every chunk will be
   513  		//distributed to different nodes via round-robin scheduling
   514  		log.Debug("Writing file to round-robin file store")
   515  		//to do this, we create an array for chunkstores (length minus one, the pivot node)
   516  		stores := make([]storage.ChunkStore, len(nodeIDs)-1)
   517  		//we then need to get all stores from the sim....
   518  		lStores := sim.NodesItems(bucketKeyStore)
   519  		i := 0
   520  		//...iterate the buckets...
   521  		for id, bucketVal := range lStores {
   522  			//...and remove the one which is the pivot node
   523  			if id == pivot {
   524  				continue
   525  			}
   526  			//the other ones are added to the array...
   527  			stores[i] = bucketVal.(storage.ChunkStore)
   528  			i++
   529  		}
   530  		//...which then gets passed to the round-robin file store
   531  		roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
   532  		//now we can actually upload a (random) file to the round-robin store
   533  		size := chunkCount * chunkSize
   534  		log.Debug("Storing data to file store")
   535  		fileHash, wait, err := roundRobinFileStore.Store(ctx, testutil.RandomReader(1, size), int64(size), false)
   536  		// wait until all chunks stored
   537  		if err != nil {
   538  			return err
   539  		}
   540  		err = wait(ctx)
   541  		if err != nil {
   542  			return err
   543  		}
   544  
   545  		log.Debug("Waiting for kademlia")
   546  		// TODO this does not seem to be correct usage of the function, as the simulation may have no kademlias
   547  		if _, err := sim.WaitTillHealthy(ctx); err != nil {
   548  			return err
   549  		}
   550  
   551  		//get the pivot node's filestore
   552  		item, ok := sim.NodeItem(pivot, bucketKeyFileStore)
   553  		if !ok {
   554  			return fmt.Errorf("No filestore")
   555  		}
   556  		pivotFileStore := item.(*storage.FileStore)
   557  		log.Debug("Starting retrieval routine")
   558  		retErrC := make(chan error)
   559  		go func() {
   560  			// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
   561  			// we must wait for the peer connections to have started before requesting
   562  			n, err := readAll(pivotFileStore, fileHash)
   563  			log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   564  			retErrC <- err
   565  		}()
   566  
   567  		log.Debug("Watching for disconnections")
   568  		disconnections := sim.PeerEvents(
   569  			context.Background(),
   570  			sim.NodeIDs(),
   571  			simulation.NewPeerEventsFilter().Drop(),
   572  		)
   573  
   574  		var disconnected atomic.Value
   575  		go func() {
   576  			for d := range disconnections {
   577  				if d.Error != nil {
   578  					log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
   579  					disconnected.Store(true)
   580  				}
   581  			}
   582  		}()
   583  		defer func() {
   584  			if err != nil {
   585  				if yes, ok := disconnected.Load().(bool); ok && yes {
   586  					err = errors.New("disconnect events received")
   587  				}
   588  			}
   589  		}()
   590  
   591  		//finally check that the pivot node gets all chunks via the root hash
   592  		log.Debug("Check retrieval")
   593  		success := true
   594  		var total int64
   595  		total, err = readAll(pivotFileStore, fileHash)
   596  		if err != nil {
   597  			return err
   598  		}
   599  		log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   600  		if err != nil || total != int64(size) {
   601  			success = false
   602  		}
   603  
   604  		if !success {
   605  			return fmt.Errorf("Test failed, chunks not available on all nodes")
   606  		}
   607  		if err := <-retErrC; err != nil {
   608  			t.Fatalf("requesting chunks: %v", err)
   609  		}
   610  		log.Debug("Test terminated successfully")
   611  		return nil
   612  	})
   613  	if result.Error != nil {
   614  		t.Fatal(result.Error)
   615  	}
   616  }
   617  
   618  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   619  	for chunks := 32; chunks <= 128; chunks *= 2 {
   620  		for i := 2; i < 32; i *= 2 {
   621  			b.Run(
   622  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   623  				func(b *testing.B) {
   624  					benchmarkDeliveryFromNodes(b, i, chunks, true)
   625  				},
   626  			)
   627  		}
   628  	}
   629  }
   630  
   631  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   632  	for chunks := 32; chunks <= 128; chunks *= 2 {
   633  		for i := 2; i < 32; i *= 2 {
   634  			b.Run(
   635  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   636  				func(b *testing.B) {
   637  					benchmarkDeliveryFromNodes(b, i, chunks, false)
   638  				},
   639  			)
   640  		}
   641  	}
   642  }
   643  
   644  func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck bool) {
   645  	sim := simulation.New(map[string]simulation.ServiceFunc{
   646  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   647  			node := ctx.Config.Node()
   648  			addr := network.NewAddr(node)
   649  			store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
   650  			if err != nil {
   651  				return nil, nil, err
   652  			}
   653  			bucket.Store(bucketKeyStore, store)
   654  			cleanup = func() {
   655  				os.RemoveAll(datadir)
   656  				store.Close()
   657  			}
   658  			localStore := store.(*storage.LocalStore)
   659  			netStore, err := storage.NewNetStore(localStore, nil)
   660  			if err != nil {
   661  				return nil, nil, err
   662  			}
   663  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   664  			delivery := NewDelivery(kad, netStore)
   665  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   666  
   667  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   668  				SkipCheck:       skipCheck,
   669  				Syncing:         SyncingDisabled,
   670  				Retrieval:       RetrievalDisabled,
   671  				SyncUpdateDelay: 0,
   672  			}, nil)
   673  
   674  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   675  			bucket.Store(bucketKeyFileStore, fileStore)
   676  
   677  			return r, cleanup, nil
   678  
   679  		},
   680  	})
   681  	defer sim.Close()
   682  
   683  	log.Info("Initializing test config")
   684  	_, err := sim.AddNodesAndConnectChain(nodes)
   685  	if err != nil {
   686  		b.Fatal(err)
   687  	}
   688  
   689  	ctx := context.Background()
   690  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
   691  		nodeIDs := sim.UpNodeIDs()
   692  		node := nodeIDs[len(nodeIDs)-1]
   693  
   694  		item, ok := sim.NodeItem(node, bucketKeyFileStore)
   695  		if !ok {
   696  			b.Fatal("No filestore")
   697  		}
   698  		remoteFileStore := item.(*storage.FileStore)
   699  
   700  		pivotNode := nodeIDs[0]
   701  		item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
   702  		if !ok {
   703  			b.Fatal("No filestore")
   704  		}
   705  		netStore := item.(*storage.NetStore)
   706  
   707  		if _, err := sim.WaitTillHealthy(ctx); err != nil {
   708  			return err
   709  		}
   710  
   711  		disconnections := sim.PeerEvents(
   712  			context.Background(),
   713  			sim.NodeIDs(),
   714  			simulation.NewPeerEventsFilter().Drop(),
   715  		)
   716  
   717  		var disconnected atomic.Value
   718  		go func() {
   719  			for d := range disconnections {
   720  				if d.Error != nil {
   721  					log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
   722  					disconnected.Store(true)
   723  				}
   724  			}
   725  		}()
   726  		defer func() {
   727  			if err != nil {
   728  				if yes, ok := disconnected.Load().(bool); ok && yes {
   729  					err = errors.New("disconnect events received")
   730  				}
   731  			}
   732  		}()
   733  		// benchmark loop
   734  		b.ResetTimer()
   735  		b.StopTimer()
   736  	Loop:
   737  		for i := 0; i < b.N; i++ {
   738  			// uploading chunkCount random chunks to the last node
   739  			hashes := make([]storage.Address, chunkCount)
   740  			for i := 0; i < chunkCount; i++ {
   741  				// create actual size real chunks
   742  				ctx := context.TODO()
   743  				hash, wait, err := remoteFileStore.Store(ctx, testutil.RandomReader(i, chunkSize), int64(chunkSize), false)
   744  				if err != nil {
   745  					b.Fatalf("expected no error. got %v", err)
   746  				}
   747  				// wait until all chunks stored
   748  				err = wait(ctx)
   749  				if err != nil {
   750  					b.Fatalf("expected no error. got %v", err)
   751  				}
   752  				// collect the hashes
   753  				hashes[i] = hash
   754  			}
   755  			// now benchmark the actual retrieval
   756  			// netstore.Get is called for each hash in a go routine and errors are collected
   757  			b.StartTimer()
   758  			errs := make(chan error)
   759  			for _, hash := range hashes {
   760  				go func(h storage.Address) {
   761  					_, err := netStore.Get(ctx, h)
   762  					log.Warn("test check netstore get", "hash", h, "err", err)
   763  					errs <- err
   764  				}(hash)
   765  			}
   766  			// count and report retrieval errors
   767  			// if there are misses then chunk timeout is too low for the distance and volume (?)
   768  			var total, misses int
   769  			for err := range errs {
   770  				if err != nil {
   771  					log.Warn(err.Error())
   772  					misses++
   773  				}
   774  				total++
   775  				if total == chunkCount {
   776  					break
   777  				}
   778  			}
   779  			b.StopTimer()
   780  
   781  			if misses > 0 {
   782  				err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   783  				break Loop
   784  			}
   785  		}
   786  		if err != nil {
   787  			b.Fatal(err)
   788  		}
   789  		return nil
   790  	})
   791  	if result.Error != nil {
   792  		b.Fatal(result.Error)
   793  	}
   794  
   795  }