github.com/Ethersocial/go-esn@v0.3.7/swarm/network/stream/delivery_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	crand "crypto/rand"
    23  	"fmt"
    24  	"io"
    25  	"os"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/ethersocial/go-esn/node"
    31  	"github.com/ethersocial/go-esn/p2p"
    32  	"github.com/ethersocial/go-esn/p2p/simulations/adapters"
    33  	p2ptest "github.com/ethersocial/go-esn/p2p/testing"
    34  	"github.com/ethersocial/go-esn/swarm/log"
    35  	"github.com/ethersocial/go-esn/swarm/network"
    36  	"github.com/ethersocial/go-esn/swarm/network/simulation"
    37  	"github.com/ethersocial/go-esn/swarm/state"
    38  	"github.com/ethersocial/go-esn/swarm/storage"
    39  )
    40  
    41  func TestStreamerRetrieveRequest(t *testing.T) {
    42  	tester, streamer, _, teardown, err := newStreamerTester(t, nil)
    43  	defer teardown()
    44  	if err != nil {
    45  		t.Fatal(err)
    46  	}
    47  
    48  	node := tester.Nodes[0]
    49  
    50  	ctx := context.Background()
    51  	req := network.NewRequest(
    52  		storage.Address(hash0[:]),
    53  		true,
    54  		&sync.Map{},
    55  	)
    56  	streamer.delivery.RequestFromPeers(ctx, req)
    57  
    58  	err = tester.TestExchanges(p2ptest.Exchange{
    59  		Label: "RetrieveRequestMsg",
    60  		Expects: []p2ptest.Expect{
    61  			{
    62  				Code: 5,
    63  				Msg: &RetrieveRequestMsg{
    64  					Addr:      hash0[:],
    65  					SkipCheck: true,
    66  				},
    67  				Peer: node.ID(),
    68  			},
    69  		},
    70  	})
    71  
    72  	if err != nil {
    73  		t.Fatalf("Expected no error, got %v", err)
    74  	}
    75  }
    76  
    77  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
    78  	tester, streamer, _, teardown, err := newStreamerTester(t, nil)
    79  	defer teardown()
    80  	if err != nil {
    81  		t.Fatal(err)
    82  	}
    83  
    84  	node := tester.Nodes[0]
    85  
    86  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
    87  
    88  	peer := streamer.getPeer(node.ID())
    89  
    90  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
    91  		Stream:   NewStream(swarmChunkServerStreamName, "", false),
    92  		History:  nil,
    93  		Priority: Top,
    94  	})
    95  
    96  	err = tester.TestExchanges(p2ptest.Exchange{
    97  		Label: "RetrieveRequestMsg",
    98  		Triggers: []p2ptest.Trigger{
    99  			{
   100  				Code: 5,
   101  				Msg: &RetrieveRequestMsg{
   102  					Addr: chunk.Address()[:],
   103  				},
   104  				Peer: node.ID(),
   105  			},
   106  		},
   107  		Expects: []p2ptest.Expect{
   108  			{
   109  				Code: 1,
   110  				Msg: &OfferedHashesMsg{
   111  					HandoverProof: nil,
   112  					Hashes:        nil,
   113  					From:          0,
   114  					To:            0,
   115  				},
   116  				Peer: node.ID(),
   117  			},
   118  		},
   119  	})
   120  
   121  	expectedError := `exchange #0 "RetrieveRequestMsg": timed out`
   122  	if err == nil || err.Error() != expectedError {
   123  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   124  	}
   125  }
   126  
   127  // upstream request server receives a retrieve Request and responds with
   128  // offered hashes or delivery if skipHash is set to true
   129  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   130  	tester, streamer, localStore, teardown, err := newStreamerTester(t, nil)
   131  	defer teardown()
   132  	if err != nil {
   133  		t.Fatal(err)
   134  	}
   135  
   136  	node := tester.Nodes[0]
   137  	peer := streamer.getPeer(node.ID())
   138  
   139  	stream := NewStream(swarmChunkServerStreamName, "", false)
   140  
   141  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   142  		Stream:   stream,
   143  		History:  nil,
   144  		Priority: Top,
   145  	})
   146  
   147  	hash := storage.Address(hash0[:])
   148  	chunk := storage.NewChunk(hash, hash)
   149  	err = localStore.Put(context.TODO(), chunk)
   150  	if err != nil {
   151  		t.Fatalf("Expected no err got %v", err)
   152  	}
   153  
   154  	err = tester.TestExchanges(p2ptest.Exchange{
   155  		Label: "RetrieveRequestMsg",
   156  		Triggers: []p2ptest.Trigger{
   157  			{
   158  				Code: 5,
   159  				Msg: &RetrieveRequestMsg{
   160  					Addr: hash,
   161  				},
   162  				Peer: node.ID(),
   163  			},
   164  		},
   165  		Expects: []p2ptest.Expect{
   166  			{
   167  				Code: 1,
   168  				Msg: &OfferedHashesMsg{
   169  					HandoverProof: &HandoverProof{
   170  						Handover: &Handover{},
   171  					},
   172  					Hashes: hash,
   173  					From:   0,
   174  					// TODO: why is this 32???
   175  					To:     32,
   176  					Stream: stream,
   177  				},
   178  				Peer: node.ID(),
   179  			},
   180  		},
   181  	})
   182  
   183  	if err != nil {
   184  		t.Fatal(err)
   185  	}
   186  
   187  	hash = storage.Address(hash1[:])
   188  	chunk = storage.NewChunk(hash, hash1[:])
   189  	err = localStore.Put(context.TODO(), chunk)
   190  	if err != nil {
   191  		t.Fatalf("Expected no err got %v", err)
   192  	}
   193  
   194  	err = tester.TestExchanges(p2ptest.Exchange{
   195  		Label: "RetrieveRequestMsg",
   196  		Triggers: []p2ptest.Trigger{
   197  			{
   198  				Code: 5,
   199  				Msg: &RetrieveRequestMsg{
   200  					Addr:      hash,
   201  					SkipCheck: true,
   202  				},
   203  				Peer: node.ID(),
   204  			},
   205  		},
   206  		Expects: []p2ptest.Expect{
   207  			{
   208  				Code: 6,
   209  				Msg: &ChunkDeliveryMsg{
   210  					Addr:  hash,
   211  					SData: hash,
   212  				},
   213  				Peer: node.ID(),
   214  			},
   215  		},
   216  	})
   217  
   218  	if err != nil {
   219  		t.Fatal(err)
   220  	}
   221  }
   222  
   223  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   224  	tester, streamer, localStore, teardown, err := newStreamerTester(t, nil)
   225  	defer teardown()
   226  	if err != nil {
   227  		t.Fatal(err)
   228  	}
   229  
   230  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   231  		return &testClient{
   232  			t: t,
   233  		}, nil
   234  	})
   235  
   236  	node := tester.Nodes[0]
   237  
   238  	stream := NewStream("foo", "", true)
   239  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   240  	if err != nil {
   241  		t.Fatalf("Expected no error, got %v", err)
   242  	}
   243  
   244  	chunkKey := hash0[:]
   245  	chunkData := hash1[:]
   246  
   247  	err = tester.TestExchanges(p2ptest.Exchange{
   248  		Label: "Subscribe message",
   249  		Expects: []p2ptest.Expect{
   250  			{
   251  				Code: 4,
   252  				Msg: &SubscribeMsg{
   253  					Stream:   stream,
   254  					History:  NewRange(5, 8),
   255  					Priority: Top,
   256  				},
   257  				Peer: node.ID(),
   258  			},
   259  		},
   260  	},
   261  		p2ptest.Exchange{
   262  			Label: "ChunkDelivery message",
   263  			Triggers: []p2ptest.Trigger{
   264  				{
   265  					Code: 6,
   266  					Msg: &ChunkDeliveryMsg{
   267  						Addr:  chunkKey,
   268  						SData: chunkData,
   269  					},
   270  					Peer: node.ID(),
   271  				},
   272  			},
   273  		})
   274  
   275  	if err != nil {
   276  		t.Fatalf("Expected no error, got %v", err)
   277  	}
   278  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
   279  	defer cancel()
   280  
   281  	// wait for the chunk to get stored
   282  	storedChunk, err := localStore.Get(ctx, chunkKey)
   283  	for err != nil {
   284  		select {
   285  		case <-ctx.Done():
   286  			t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
   287  		default:
   288  		}
   289  		storedChunk, err = localStore.Get(ctx, chunkKey)
   290  		time.Sleep(50 * time.Millisecond)
   291  	}
   292  
   293  	if err != nil {
   294  		t.Fatalf("Expected no error, got %v", err)
   295  	}
   296  
   297  	if !bytes.Equal(storedChunk.Data(), chunkData) {
   298  		t.Fatal("Retrieved chunk has different data than original")
   299  	}
   300  
   301  }
   302  
   303  func TestDeliveryFromNodes(t *testing.T) {
   304  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
   305  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
   306  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
   307  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
   308  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
   309  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
   310  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
   311  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
   312  }
   313  
   314  func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
   315  	sim := simulation.New(map[string]simulation.ServiceFunc{
   316  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   317  			node := ctx.Config.Node()
   318  			addr := network.NewAddr(node)
   319  			store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
   320  			if err != nil {
   321  				return nil, nil, err
   322  			}
   323  			bucket.Store(bucketKeyStore, store)
   324  			cleanup = func() {
   325  				os.RemoveAll(datadir)
   326  				store.Close()
   327  			}
   328  			localStore := store.(*storage.LocalStore)
   329  			netStore, err := storage.NewNetStore(localStore, nil)
   330  			if err != nil {
   331  				return nil, nil, err
   332  			}
   333  
   334  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   335  			delivery := NewDelivery(kad, netStore)
   336  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   337  
   338  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   339  				SkipCheck: skipCheck,
   340  			})
   341  			bucket.Store(bucketKeyRegistry, r)
   342  
   343  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   344  			bucket.Store(bucketKeyFileStore, fileStore)
   345  
   346  			return r, cleanup, nil
   347  
   348  		},
   349  	})
   350  	defer sim.Close()
   351  
   352  	log.Info("Adding nodes to simulation")
   353  	_, err := sim.AddNodesAndConnectChain(nodes)
   354  	if err != nil {
   355  		t.Fatal(err)
   356  	}
   357  
   358  	log.Info("Starting simulation")
   359  	ctx := context.Background()
   360  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   361  		nodeIDs := sim.UpNodeIDs()
   362  		//determine the pivot node to be the first node of the simulation
   363  		sim.SetPivotNode(nodeIDs[0])
   364  		//distribute chunks of a random file into Stores of nodes 1 to nodes
   365  		//we will do this by creating a file store with an underlying round-robin store:
   366  		//the file store will create a hash for the uploaded file, but every chunk will be
   367  		//distributed to different nodes via round-robin scheduling
   368  		log.Debug("Writing file to round-robin file store")
   369  		//to do this, we create an array for chunkstores (length minus one, the pivot node)
   370  		stores := make([]storage.ChunkStore, len(nodeIDs)-1)
   371  		//we then need to get all stores from the sim....
   372  		lStores := sim.NodesItems(bucketKeyStore)
   373  		i := 0
   374  		//...iterate the buckets...
   375  		for id, bucketVal := range lStores {
   376  			//...and remove the one which is the pivot node
   377  			if id == *sim.PivotNodeID() {
   378  				continue
   379  			}
   380  			//the other ones are added to the array...
   381  			stores[i] = bucketVal.(storage.ChunkStore)
   382  			i++
   383  		}
   384  		//...which then gets passed to the round-robin file store
   385  		roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
   386  		//now we can actually upload a (random) file to the round-robin store
   387  		size := chunkCount * chunkSize
   388  		log.Debug("Storing data to file store")
   389  		fileHash, wait, err := roundRobinFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   390  		// wait until all chunks stored
   391  		if err != nil {
   392  			return err
   393  		}
   394  		err = wait(ctx)
   395  		if err != nil {
   396  			return err
   397  		}
   398  
   399  		log.Debug("Waiting for kademlia")
   400  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   401  			return err
   402  		}
   403  
   404  		//each of the nodes (except pivot node) subscribes to the stream of the next node
   405  		for j, node := range nodeIDs[0 : nodes-1] {
   406  			sid := nodeIDs[j+1]
   407  			item, ok := sim.NodeItem(node, bucketKeyRegistry)
   408  			if !ok {
   409  				return fmt.Errorf("No registry")
   410  			}
   411  			registry := item.(*Registry)
   412  			err = registry.Subscribe(sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
   413  			if err != nil {
   414  				return err
   415  			}
   416  		}
   417  
   418  		//get the pivot node's filestore
   419  		item, ok := sim.NodeItem(*sim.PivotNodeID(), bucketKeyFileStore)
   420  		if !ok {
   421  			return fmt.Errorf("No filestore")
   422  		}
   423  		pivotFileStore := item.(*storage.FileStore)
   424  		log.Debug("Starting retrieval routine")
   425  		go func() {
   426  			// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
   427  			// we must wait for the peer connections to have started before requesting
   428  			n, err := readAll(pivotFileStore, fileHash)
   429  			log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   430  			if err != nil {
   431  				t.Fatalf("requesting chunks action error: %v", err)
   432  			}
   433  		}()
   434  
   435  		log.Debug("Watching for disconnections")
   436  		disconnections := sim.PeerEvents(
   437  			context.Background(),
   438  			sim.NodeIDs(),
   439  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   440  		)
   441  
   442  		go func() {
   443  			for d := range disconnections {
   444  				if d.Error != nil {
   445  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   446  					t.Fatal(d.Error)
   447  				}
   448  			}
   449  		}()
   450  
   451  		//finally check that the pivot node gets all chunks via the root hash
   452  		log.Debug("Check retrieval")
   453  		success := true
   454  		var total int64
   455  		total, err = readAll(pivotFileStore, fileHash)
   456  		if err != nil {
   457  			return err
   458  		}
   459  		log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   460  		if err != nil || total != int64(size) {
   461  			success = false
   462  		}
   463  
   464  		if !success {
   465  			return fmt.Errorf("Test failed, chunks not available on all nodes")
   466  		}
   467  		log.Debug("Test terminated successfully")
   468  		return nil
   469  	})
   470  	if result.Error != nil {
   471  		t.Fatal(result.Error)
   472  	}
   473  }
   474  
   475  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   476  	for chunks := 32; chunks <= 128; chunks *= 2 {
   477  		for i := 2; i < 32; i *= 2 {
   478  			b.Run(
   479  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   480  				func(b *testing.B) {
   481  					benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
   482  				},
   483  			)
   484  		}
   485  	}
   486  }
   487  
   488  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   489  	for chunks := 32; chunks <= 128; chunks *= 2 {
   490  		for i := 2; i < 32; i *= 2 {
   491  			b.Run(
   492  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   493  				func(b *testing.B) {
   494  					benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
   495  				},
   496  			)
   497  		}
   498  	}
   499  }
   500  
   501  func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
   502  	sim := simulation.New(map[string]simulation.ServiceFunc{
   503  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   504  			node := ctx.Config.Node()
   505  			addr := network.NewAddr(node)
   506  			store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
   507  			if err != nil {
   508  				return nil, nil, err
   509  			}
   510  			bucket.Store(bucketKeyStore, store)
   511  			cleanup = func() {
   512  				os.RemoveAll(datadir)
   513  				store.Close()
   514  			}
   515  			localStore := store.(*storage.LocalStore)
   516  			netStore, err := storage.NewNetStore(localStore, nil)
   517  			if err != nil {
   518  				return nil, nil, err
   519  			}
   520  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   521  			delivery := NewDelivery(kad, netStore)
   522  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   523  
   524  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   525  				SkipCheck:       skipCheck,
   526  				DoSync:          true,
   527  				SyncUpdateDelay: 0,
   528  			})
   529  
   530  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   531  			bucket.Store(bucketKeyFileStore, fileStore)
   532  
   533  			return r, cleanup, nil
   534  
   535  		},
   536  	})
   537  	defer sim.Close()
   538  
   539  	log.Info("Initializing test config")
   540  	_, err := sim.AddNodesAndConnectChain(nodes)
   541  	if err != nil {
   542  		b.Fatal(err)
   543  	}
   544  
   545  	ctx := context.Background()
   546  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   547  		nodeIDs := sim.UpNodeIDs()
   548  		node := nodeIDs[len(nodeIDs)-1]
   549  
   550  		item, ok := sim.NodeItem(node, bucketKeyFileStore)
   551  		if !ok {
   552  			b.Fatal("No filestore")
   553  		}
   554  		remoteFileStore := item.(*storage.FileStore)
   555  
   556  		pivotNode := nodeIDs[0]
   557  		item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
   558  		if !ok {
   559  			b.Fatal("No filestore")
   560  		}
   561  		netStore := item.(*storage.NetStore)
   562  
   563  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   564  			return err
   565  		}
   566  
   567  		disconnections := sim.PeerEvents(
   568  			context.Background(),
   569  			sim.NodeIDs(),
   570  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   571  		)
   572  
   573  		go func() {
   574  			for d := range disconnections {
   575  				if d.Error != nil {
   576  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   577  					b.Fatal(d.Error)
   578  				}
   579  			}
   580  		}()
   581  		// benchmark loop
   582  		b.ResetTimer()
   583  		b.StopTimer()
   584  	Loop:
   585  		for i := 0; i < b.N; i++ {
   586  			// uploading chunkCount random chunks to the last node
   587  			hashes := make([]storage.Address, chunkCount)
   588  			for i := 0; i < chunkCount; i++ {
   589  				// create actual size real chunks
   590  				ctx := context.TODO()
   591  				hash, wait, err := remoteFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false)
   592  				if err != nil {
   593  					b.Fatalf("expected no error. got %v", err)
   594  				}
   595  				// wait until all chunks stored
   596  				err = wait(ctx)
   597  				if err != nil {
   598  					b.Fatalf("expected no error. got %v", err)
   599  				}
   600  				// collect the hashes
   601  				hashes[i] = hash
   602  			}
   603  			// now benchmark the actual retrieval
   604  			// netstore.Get is called for each hash in a go routine and errors are collected
   605  			b.StartTimer()
   606  			errs := make(chan error)
   607  			for _, hash := range hashes {
   608  				go func(h storage.Address) {
   609  					_, err := netStore.Get(ctx, h)
   610  					log.Warn("test check netstore get", "hash", h, "err", err)
   611  					errs <- err
   612  				}(hash)
   613  			}
   614  			// count and report retrieval errors
   615  			// if there are misses then chunk timeout is too low for the distance and volume (?)
   616  			var total, misses int
   617  			for err := range errs {
   618  				if err != nil {
   619  					log.Warn(err.Error())
   620  					misses++
   621  				}
   622  				total++
   623  				if total == chunkCount {
   624  					break
   625  				}
   626  			}
   627  			b.StopTimer()
   628  
   629  			if misses > 0 {
   630  				err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   631  				break Loop
   632  			}
   633  		}
   634  		if err != nil {
   635  			b.Fatal(err)
   636  		}
   637  		return nil
   638  	})
   639  	if result.Error != nil {
   640  		b.Fatal(result.Error)
   641  	}
   642  
   643  }