github.com/FUSIONFoundation/efsn@v3.6.2-0.20200916075423-dbb5dd5d2cc7+incompatible/swarm/network/stream/delivery_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	crand "crypto/rand"
    23  	"fmt"
    24  	"io"
    25  	"os"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/FusionFoundation/efsn/node"
    31  	"github.com/FusionFoundation/efsn/p2p"
    32  	"github.com/FusionFoundation/efsn/p2p/simulations/adapters"
    33  	p2ptest "github.com/FusionFoundation/efsn/p2p/testing"
    34  	"github.com/FusionFoundation/efsn/swarm/log"
    35  	"github.com/FusionFoundation/efsn/swarm/network"
    36  	"github.com/FusionFoundation/efsn/swarm/network/simulation"
    37  	"github.com/FusionFoundation/efsn/swarm/state"
    38  	"github.com/FusionFoundation/efsn/swarm/storage"
    39  )
    40  
    41  func TestStreamerRetrieveRequest(t *testing.T) {
    42  	tester, streamer, _, teardown, err := newStreamerTester(t)
    43  	defer teardown()
    44  	if err != nil {
    45  		t.Fatal(err)
    46  	}
    47  
    48  	peerID := tester.IDs[0]
    49  
    50  	ctx := context.Background()
    51  	req := network.NewRequest(
    52  		storage.Address(hash0[:]),
    53  		true,
    54  		&sync.Map{},
    55  	)
    56  	streamer.delivery.RequestFromPeers(ctx, req)
    57  
    58  	err = tester.TestExchanges(p2ptest.Exchange{
    59  		Label: "RetrieveRequestMsg",
    60  		Expects: []p2ptest.Expect{
    61  			{
    62  				Code: 5,
    63  				Msg: &RetrieveRequestMsg{
    64  					Addr:      hash0[:],
    65  					SkipCheck: true,
    66  				},
    67  				Peer: peerID,
    68  			},
    69  		},
    70  	})
    71  
    72  	if err != nil {
    73  		t.Fatalf("Expected no error, got %v", err)
    74  	}
    75  }
    76  
    77  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
    78  	tester, streamer, _, teardown, err := newStreamerTester(t)
    79  	defer teardown()
    80  	if err != nil {
    81  		t.Fatal(err)
    82  	}
    83  
    84  	peerID := tester.IDs[0]
    85  
    86  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
    87  
    88  	peer := streamer.getPeer(peerID)
    89  
    90  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
    91  		Stream:   NewStream(swarmChunkServerStreamName, "", false),
    92  		History:  nil,
    93  		Priority: Top,
    94  	})
    95  
    96  	err = tester.TestExchanges(p2ptest.Exchange{
    97  		Label: "RetrieveRequestMsg",
    98  		Triggers: []p2ptest.Trigger{
    99  			{
   100  				Code: 5,
   101  				Msg: &RetrieveRequestMsg{
   102  					Addr: chunk.Address()[:],
   103  				},
   104  				Peer: peerID,
   105  			},
   106  		},
   107  		Expects: []p2ptest.Expect{
   108  			{
   109  				Code: 1,
   110  				Msg: &OfferedHashesMsg{
   111  					HandoverProof: nil,
   112  					Hashes:        nil,
   113  					From:          0,
   114  					To:            0,
   115  				},
   116  				Peer: peerID,
   117  			},
   118  		},
   119  	})
   120  
   121  	expectedError := `exchange #0 "RetrieveRequestMsg": timed out`
   122  	if err == nil || err.Error() != expectedError {
   123  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   124  	}
   125  }
   126  
   127  // upstream request server receives a retrieve Request and responds with
   128  // offered hashes or delivery if skipHash is set to true
   129  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   130  	tester, streamer, localStore, teardown, err := newStreamerTester(t)
   131  	defer teardown()
   132  	if err != nil {
   133  		t.Fatal(err)
   134  	}
   135  
   136  	peerID := tester.IDs[0]
   137  	peer := streamer.getPeer(peerID)
   138  
   139  	stream := NewStream(swarmChunkServerStreamName, "", false)
   140  
   141  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   142  		Stream:   stream,
   143  		History:  nil,
   144  		Priority: Top,
   145  	})
   146  
   147  	hash := storage.Address(hash0[:])
   148  	chunk := storage.NewChunk(hash, hash)
   149  	err = localStore.Put(context.TODO(), chunk)
   150  	if err != nil {
   151  		t.Fatalf("Expected no err got %v", err)
   152  	}
   153  
   154  	err = tester.TestExchanges(p2ptest.Exchange{
   155  		Label: "RetrieveRequestMsg",
   156  		Triggers: []p2ptest.Trigger{
   157  			{
   158  				Code: 5,
   159  				Msg: &RetrieveRequestMsg{
   160  					Addr: hash,
   161  				},
   162  				Peer: peerID,
   163  			},
   164  		},
   165  		Expects: []p2ptest.Expect{
   166  			{
   167  				Code: 1,
   168  				Msg: &OfferedHashesMsg{
   169  					HandoverProof: &HandoverProof{
   170  						Handover: &Handover{},
   171  					},
   172  					Hashes: hash,
   173  					From:   0,
   174  					// TODO: why is this 32???
   175  					To:     32,
   176  					Stream: stream,
   177  				},
   178  				Peer: peerID,
   179  			},
   180  		},
   181  	})
   182  
   183  	if err != nil {
   184  		t.Fatal(err)
   185  	}
   186  
   187  	hash = storage.Address(hash1[:])
   188  	chunk = storage.NewChunk(hash, hash1[:])
   189  	err = localStore.Put(context.TODO(), chunk)
   190  	if err != nil {
   191  		t.Fatalf("Expected no err got %v", err)
   192  	}
   193  
   194  	err = tester.TestExchanges(p2ptest.Exchange{
   195  		Label: "RetrieveRequestMsg",
   196  		Triggers: []p2ptest.Trigger{
   197  			{
   198  				Code: 5,
   199  				Msg: &RetrieveRequestMsg{
   200  					Addr:      hash,
   201  					SkipCheck: true,
   202  				},
   203  				Peer: peerID,
   204  			},
   205  		},
   206  		Expects: []p2ptest.Expect{
   207  			{
   208  				Code: 6,
   209  				Msg: &ChunkDeliveryMsg{
   210  					Addr:  hash,
   211  					SData: hash,
   212  				},
   213  				Peer: peerID,
   214  			},
   215  		},
   216  	})
   217  
   218  	if err != nil {
   219  		t.Fatal(err)
   220  	}
   221  }
   222  
   223  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   224  	tester, streamer, localStore, teardown, err := newStreamerTester(t)
   225  	defer teardown()
   226  	if err != nil {
   227  		t.Fatal(err)
   228  	}
   229  
   230  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   231  		return &testClient{
   232  			t: t,
   233  		}, nil
   234  	})
   235  
   236  	peerID := tester.IDs[0]
   237  
   238  	stream := NewStream("foo", "", true)
   239  	err = streamer.Subscribe(peerID, stream, NewRange(5, 8), Top)
   240  	if err != nil {
   241  		t.Fatalf("Expected no error, got %v", err)
   242  	}
   243  
   244  	chunkKey := hash0[:]
   245  	chunkData := hash1[:]
   246  
   247  	err = tester.TestExchanges(p2ptest.Exchange{
   248  		Label: "Subscribe message",
   249  		Expects: []p2ptest.Expect{
   250  			{
   251  				Code: 4,
   252  				Msg: &SubscribeMsg{
   253  					Stream:   stream,
   254  					History:  NewRange(5, 8),
   255  					Priority: Top,
   256  				},
   257  				Peer: peerID,
   258  			},
   259  		},
   260  	},
   261  		p2ptest.Exchange{
   262  			Label: "ChunkDelivery message",
   263  			Triggers: []p2ptest.Trigger{
   264  				{
   265  					Code: 6,
   266  					Msg: &ChunkDeliveryMsg{
   267  						Addr:  chunkKey,
   268  						SData: chunkData,
   269  					},
   270  					Peer: peerID,
   271  				},
   272  			},
   273  		})
   274  
   275  	if err != nil {
   276  		t.Fatalf("Expected no error, got %v", err)
   277  	}
   278  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
   279  	defer cancel()
   280  
   281  	// wait for the chunk to get stored
   282  	storedChunk, err := localStore.Get(ctx, chunkKey)
   283  	for err != nil {
   284  		select {
   285  		case <-ctx.Done():
   286  			t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
   287  		default:
   288  		}
   289  		storedChunk, err = localStore.Get(ctx, chunkKey)
   290  		time.Sleep(50 * time.Millisecond)
   291  	}
   292  
   293  	if err != nil {
   294  		t.Fatalf("Expected no error, got %v", err)
   295  	}
   296  
   297  	if !bytes.Equal(storedChunk.Data(), chunkData) {
   298  		t.Fatal("Retrieved chunk has different data than original")
   299  	}
   300  
   301  }
   302  
   303  func TestDeliveryFromNodes(t *testing.T) {
   304  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
   305  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
   306  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
   307  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
   308  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
   309  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
   310  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
   311  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
   312  }
   313  
   314  func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
   315  	sim := simulation.New(map[string]simulation.ServiceFunc{
   316  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   317  
   318  			id := ctx.Config.ID
   319  			addr := network.NewAddrFromNodeID(id)
   320  			store, datadir, err := createTestLocalStorageForID(id, addr)
   321  			if err != nil {
   322  				return nil, nil, err
   323  			}
   324  			bucket.Store(bucketKeyStore, store)
   325  			cleanup = func() {
   326  				os.RemoveAll(datadir)
   327  				store.Close()
   328  			}
   329  			localStore := store.(*storage.LocalStore)
   330  			netStore, err := storage.NewNetStore(localStore, nil)
   331  			if err != nil {
   332  				return nil, nil, err
   333  			}
   334  
   335  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   336  			delivery := NewDelivery(kad, netStore)
   337  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   338  
   339  			r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   340  				SkipCheck: skipCheck,
   341  			})
   342  			bucket.Store(bucketKeyRegistry, r)
   343  
   344  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   345  			bucket.Store(bucketKeyFileStore, fileStore)
   346  
   347  			return r, cleanup, nil
   348  
   349  		},
   350  	})
   351  	defer sim.Close()
   352  
   353  	log.Info("Adding nodes to simulation")
   354  	_, err := sim.AddNodesAndConnectChain(nodes)
   355  	if err != nil {
   356  		t.Fatal(err)
   357  	}
   358  
   359  	log.Info("Starting simulation")
   360  	ctx := context.Background()
   361  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   362  		nodeIDs := sim.UpNodeIDs()
   363  		//determine the pivot node to be the first node of the simulation
   364  		sim.SetPivotNode(nodeIDs[0])
   365  		//distribute chunks of a random file into Stores of nodes 1 to nodes
   366  		//we will do this by creating a file store with an underlying round-robin store:
   367  		//the file store will create a hash for the uploaded file, but every chunk will be
   368  		//distributed to different nodes via round-robin scheduling
   369  		log.Debug("Writing file to round-robin file store")
   370  		//to do this, we create an array for chunkstores (length minus one, the pivot node)
   371  		stores := make([]storage.ChunkStore, len(nodeIDs)-1)
   372  		//we then need to get all stores from the sim....
   373  		lStores := sim.NodesItems(bucketKeyStore)
   374  		i := 0
   375  		//...iterate the buckets...
   376  		for id, bucketVal := range lStores {
   377  			//...and remove the one which is the pivot node
   378  			if id == *sim.PivotNodeID() {
   379  				continue
   380  			}
   381  			//the other ones are added to the array...
   382  			stores[i] = bucketVal.(storage.ChunkStore)
   383  			i++
   384  		}
   385  		//...which then gets passed to the round-robin file store
   386  		roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
   387  		//now we can actually upload a (random) file to the round-robin store
   388  		size := chunkCount * chunkSize
   389  		log.Debug("Storing data to file store")
   390  		fileHash, wait, err := roundRobinFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   391  		// wait until all chunks stored
   392  		if err != nil {
   393  			return err
   394  		}
   395  		err = wait(ctx)
   396  		if err != nil {
   397  			return err
   398  		}
   399  
   400  		log.Debug("Waiting for kademlia")
   401  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   402  			return err
   403  		}
   404  
   405  		//each of the nodes (except pivot node) subscribes to the stream of the next node
   406  		for j, node := range nodeIDs[0 : nodes-1] {
   407  			sid := nodeIDs[j+1]
   408  			item, ok := sim.NodeItem(node, bucketKeyRegistry)
   409  			if !ok {
   410  				return fmt.Errorf("No registry")
   411  			}
   412  			registry := item.(*Registry)
   413  			err = registry.Subscribe(sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
   414  			if err != nil {
   415  				return err
   416  			}
   417  		}
   418  
   419  		//get the pivot node's filestore
   420  		item, ok := sim.NodeItem(*sim.PivotNodeID(), bucketKeyFileStore)
   421  		if !ok {
   422  			return fmt.Errorf("No filestore")
   423  		}
   424  		pivotFileStore := item.(*storage.FileStore)
   425  		log.Debug("Starting retrieval routine")
   426  		go func() {
   427  			// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
   428  			// we must wait for the peer connections to have started before requesting
   429  			n, err := readAll(pivotFileStore, fileHash)
   430  			log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   431  			if err != nil {
   432  				t.Fatalf("requesting chunks action error: %v", err)
   433  			}
   434  		}()
   435  
   436  		log.Debug("Watching for disconnections")
   437  		disconnections := sim.PeerEvents(
   438  			context.Background(),
   439  			sim.NodeIDs(),
   440  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   441  		)
   442  
   443  		go func() {
   444  			for d := range disconnections {
   445  				if d.Error != nil {
   446  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   447  					t.Fatal(d.Error)
   448  				}
   449  			}
   450  		}()
   451  
   452  		//finally check that the pivot node gets all chunks via the root hash
   453  		log.Debug("Check retrieval")
   454  		success := true
   455  		var total int64
   456  		total, err = readAll(pivotFileStore, fileHash)
   457  		if err != nil {
   458  			return err
   459  		}
   460  		log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   461  		if err != nil || total != int64(size) {
   462  			success = false
   463  		}
   464  
   465  		if !success {
   466  			return fmt.Errorf("Test failed, chunks not available on all nodes")
   467  		}
   468  		log.Debug("Test terminated successfully")
   469  		return nil
   470  	})
   471  	if result.Error != nil {
   472  		t.Fatal(result.Error)
   473  	}
   474  }
   475  
   476  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   477  	for chunks := 32; chunks <= 128; chunks *= 2 {
   478  		for i := 2; i < 32; i *= 2 {
   479  			b.Run(
   480  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   481  				func(b *testing.B) {
   482  					benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
   483  				},
   484  			)
   485  		}
   486  	}
   487  }
   488  
   489  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   490  	for chunks := 32; chunks <= 128; chunks *= 2 {
   491  		for i := 2; i < 32; i *= 2 {
   492  			b.Run(
   493  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   494  				func(b *testing.B) {
   495  					benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
   496  				},
   497  			)
   498  		}
   499  	}
   500  }
   501  
   502  func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
   503  	sim := simulation.New(map[string]simulation.ServiceFunc{
   504  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   505  			id := ctx.Config.ID
   506  			addr := network.NewAddrFromNodeID(id)
   507  			store, datadir, err := createTestLocalStorageForID(id, addr)
   508  			if err != nil {
   509  				return nil, nil, err
   510  			}
   511  			bucket.Store(bucketKeyStore, store)
   512  			cleanup = func() {
   513  				os.RemoveAll(datadir)
   514  				store.Close()
   515  			}
   516  			localStore := store.(*storage.LocalStore)
   517  			netStore, err := storage.NewNetStore(localStore, nil)
   518  			if err != nil {
   519  				return nil, nil, err
   520  			}
   521  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   522  			delivery := NewDelivery(kad, netStore)
   523  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   524  
   525  			r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   526  				SkipCheck:       skipCheck,
   527  				DoSync:          true,
   528  				SyncUpdateDelay: 0,
   529  			})
   530  
   531  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   532  			bucket.Store(bucketKeyFileStore, fileStore)
   533  
   534  			return r, cleanup, nil
   535  
   536  		},
   537  	})
   538  	defer sim.Close()
   539  
   540  	log.Info("Initializing test config")
   541  	_, err := sim.AddNodesAndConnectChain(nodes)
   542  	if err != nil {
   543  		b.Fatal(err)
   544  	}
   545  
   546  	ctx := context.Background()
   547  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   548  		nodeIDs := sim.UpNodeIDs()
   549  		node := nodeIDs[len(nodeIDs)-1]
   550  
   551  		item, ok := sim.NodeItem(node, bucketKeyFileStore)
   552  		if !ok {
   553  			b.Fatal("No filestore")
   554  		}
   555  		remoteFileStore := item.(*storage.FileStore)
   556  
   557  		pivotNode := nodeIDs[0]
   558  		item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
   559  		if !ok {
   560  			b.Fatal("No filestore")
   561  		}
   562  		netStore := item.(*storage.NetStore)
   563  
   564  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   565  			return err
   566  		}
   567  
   568  		disconnections := sim.PeerEvents(
   569  			context.Background(),
   570  			sim.NodeIDs(),
   571  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   572  		)
   573  
   574  		go func() {
   575  			for d := range disconnections {
   576  				if d.Error != nil {
   577  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   578  					b.Fatal(d.Error)
   579  				}
   580  			}
   581  		}()
   582  		// benchmark loop
   583  		b.ResetTimer()
   584  		b.StopTimer()
   585  	Loop:
   586  		for i := 0; i < b.N; i++ {
   587  			// uploading chunkCount random chunks to the last node
   588  			hashes := make([]storage.Address, chunkCount)
   589  			for i := 0; i < chunkCount; i++ {
   590  				// create actual size real chunks
   591  				ctx := context.TODO()
   592  				hash, wait, err := remoteFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false)
   593  				if err != nil {
   594  					b.Fatalf("expected no error. got %v", err)
   595  				}
   596  				// wait until all chunks stored
   597  				err = wait(ctx)
   598  				if err != nil {
   599  					b.Fatalf("expected no error. got %v", err)
   600  				}
   601  				// collect the hashes
   602  				hashes[i] = hash
   603  			}
   604  			// now benchmark the actual retrieval
   605  			// netstore.Get is called for each hash in a go routine and errors are collected
   606  			b.StartTimer()
   607  			errs := make(chan error)
   608  			for _, hash := range hashes {
   609  				go func(h storage.Address) {
   610  					_, err := netStore.Get(ctx, h)
   611  					log.Warn("test check netstore get", "hash", h, "err", err)
   612  					errs <- err
   613  				}(hash)
   614  			}
   615  			// count and report retrieval errors
   616  			// if there are misses then chunk timeout is too low for the distance and volume (?)
   617  			var total, misses int
   618  			for err := range errs {
   619  				if err != nil {
   620  					log.Warn(err.Error())
   621  					misses++
   622  				}
   623  				total++
   624  				if total == chunkCount {
   625  					break
   626  				}
   627  			}
   628  			b.StopTimer()
   629  
   630  			if misses > 0 {
   631  				err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   632  				break Loop
   633  			}
   634  		}
   635  		if err != nil {
   636  			b.Fatal(err)
   637  		}
   638  		return nil
   639  	})
   640  	if result.Error != nil {
   641  		b.Fatal(result.Error)
   642  	}
   643  
   644  }