github.com/divan/go-ethereum@v1.8.14-0.20180820134928-1de9ada4016d/swarm/network/stream/delivery_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	crand "crypto/rand"
    23  	"fmt"
    24  	"io"
    25  	"os"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/ethereum/go-ethereum/node"
    31  	"github.com/ethereum/go-ethereum/p2p"
    32  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    33  	p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
    34  	"github.com/ethereum/go-ethereum/swarm/log"
    35  	"github.com/ethereum/go-ethereum/swarm/network"
    36  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    37  	"github.com/ethereum/go-ethereum/swarm/state"
    38  	"github.com/ethereum/go-ethereum/swarm/storage"
    39  )
    40  
    41  func TestStreamerRetrieveRequest(t *testing.T) {
    42  	tester, streamer, _, teardown, err := newStreamerTester(t)
    43  	defer teardown()
    44  	if err != nil {
    45  		t.Fatal(err)
    46  	}
    47  
    48  	peerID := tester.IDs[0]
    49  
    50  	streamer.delivery.RequestFromPeers(context.TODO(), hash0[:], true)
    51  
    52  	err = tester.TestExchanges(p2ptest.Exchange{
    53  		Label: "RetrieveRequestMsg",
    54  		Expects: []p2ptest.Expect{
    55  			{
    56  				Code: 5,
    57  				Msg: &RetrieveRequestMsg{
    58  					Addr:      hash0[:],
    59  					SkipCheck: true,
    60  				},
    61  				Peer: peerID,
    62  			},
    63  		},
    64  	})
    65  
    66  	if err != nil {
    67  		t.Fatalf("Expected no error, got %v", err)
    68  	}
    69  }
    70  
    71  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
    72  	tester, streamer, _, teardown, err := newStreamerTester(t)
    73  	defer teardown()
    74  	if err != nil {
    75  		t.Fatal(err)
    76  	}
    77  
    78  	peerID := tester.IDs[0]
    79  
    80  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
    81  
    82  	peer := streamer.getPeer(peerID)
    83  
    84  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
    85  		Stream:   NewStream(swarmChunkServerStreamName, "", false),
    86  		History:  nil,
    87  		Priority: Top,
    88  	})
    89  
    90  	err = tester.TestExchanges(p2ptest.Exchange{
    91  		Label: "RetrieveRequestMsg",
    92  		Triggers: []p2ptest.Trigger{
    93  			{
    94  				Code: 5,
    95  				Msg: &RetrieveRequestMsg{
    96  					Addr: chunk.Addr[:],
    97  				},
    98  				Peer: peerID,
    99  			},
   100  		},
   101  		Expects: []p2ptest.Expect{
   102  			{
   103  				Code: 1,
   104  				Msg: &OfferedHashesMsg{
   105  					HandoverProof: nil,
   106  					Hashes:        nil,
   107  					From:          0,
   108  					To:            0,
   109  				},
   110  				Peer: peerID,
   111  			},
   112  		},
   113  	})
   114  
   115  	expectedError := `exchange #0 "RetrieveRequestMsg": timed out`
   116  	if err == nil || err.Error() != expectedError {
   117  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   118  	}
   119  }
   120  
   121  // upstream request server receives a retrieve Request and responds with
   122  // offered hashes or delivery if skipHash is set to true
   123  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   124  	tester, streamer, localStore, teardown, err := newStreamerTester(t)
   125  	defer teardown()
   126  	if err != nil {
   127  		t.Fatal(err)
   128  	}
   129  
   130  	peerID := tester.IDs[0]
   131  	peer := streamer.getPeer(peerID)
   132  
   133  	stream := NewStream(swarmChunkServerStreamName, "", false)
   134  
   135  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   136  		Stream:   stream,
   137  		History:  nil,
   138  		Priority: Top,
   139  	})
   140  
   141  	hash := storage.Address(hash0[:])
   142  	chunk := storage.NewChunk(hash, nil)
   143  	chunk.SData = hash
   144  	localStore.Put(context.TODO(), chunk)
   145  	chunk.WaitToStore()
   146  
   147  	err = tester.TestExchanges(p2ptest.Exchange{
   148  		Label: "RetrieveRequestMsg",
   149  		Triggers: []p2ptest.Trigger{
   150  			{
   151  				Code: 5,
   152  				Msg: &RetrieveRequestMsg{
   153  					Addr: hash,
   154  				},
   155  				Peer: peerID,
   156  			},
   157  		},
   158  		Expects: []p2ptest.Expect{
   159  			{
   160  				Code: 1,
   161  				Msg: &OfferedHashesMsg{
   162  					HandoverProof: &HandoverProof{
   163  						Handover: &Handover{},
   164  					},
   165  					Hashes: hash,
   166  					From:   0,
   167  					// TODO: why is this 32???
   168  					To:     32,
   169  					Stream: stream,
   170  				},
   171  				Peer: peerID,
   172  			},
   173  		},
   174  	})
   175  
   176  	if err != nil {
   177  		t.Fatal(err)
   178  	}
   179  
   180  	hash = storage.Address(hash1[:])
   181  	chunk = storage.NewChunk(hash, nil)
   182  	chunk.SData = hash1[:]
   183  	localStore.Put(context.TODO(), chunk)
   184  	chunk.WaitToStore()
   185  
   186  	err = tester.TestExchanges(p2ptest.Exchange{
   187  		Label: "RetrieveRequestMsg",
   188  		Triggers: []p2ptest.Trigger{
   189  			{
   190  				Code: 5,
   191  				Msg: &RetrieveRequestMsg{
   192  					Addr:      hash,
   193  					SkipCheck: true,
   194  				},
   195  				Peer: peerID,
   196  			},
   197  		},
   198  		Expects: []p2ptest.Expect{
   199  			{
   200  				Code: 6,
   201  				Msg: &ChunkDeliveryMsg{
   202  					Addr:  hash,
   203  					SData: hash,
   204  				},
   205  				Peer: peerID,
   206  			},
   207  		},
   208  	})
   209  
   210  	if err != nil {
   211  		t.Fatal(err)
   212  	}
   213  }
   214  
   215  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   216  	tester, streamer, localStore, teardown, err := newStreamerTester(t)
   217  	defer teardown()
   218  	if err != nil {
   219  		t.Fatal(err)
   220  	}
   221  
   222  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   223  		return &testClient{
   224  			t: t,
   225  		}, nil
   226  	})
   227  
   228  	peerID := tester.IDs[0]
   229  
   230  	stream := NewStream("foo", "", true)
   231  	err = streamer.Subscribe(peerID, stream, NewRange(5, 8), Top)
   232  	if err != nil {
   233  		t.Fatalf("Expected no error, got %v", err)
   234  	}
   235  
   236  	chunkKey := hash0[:]
   237  	chunkData := hash1[:]
   238  	chunk, created := localStore.GetOrCreateRequest(context.TODO(), chunkKey)
   239  
   240  	if !created {
   241  		t.Fatal("chunk already exists")
   242  	}
   243  	select {
   244  	case <-chunk.ReqC:
   245  		t.Fatal("chunk is already received")
   246  	default:
   247  	}
   248  
   249  	err = tester.TestExchanges(p2ptest.Exchange{
   250  		Label: "Subscribe message",
   251  		Expects: []p2ptest.Expect{
   252  			{
   253  				Code: 4,
   254  				Msg: &SubscribeMsg{
   255  					Stream:   stream,
   256  					History:  NewRange(5, 8),
   257  					Priority: Top,
   258  				},
   259  				Peer: peerID,
   260  			},
   261  		},
   262  	},
   263  		p2ptest.Exchange{
   264  			Label: "ChunkDeliveryRequest message",
   265  			Triggers: []p2ptest.Trigger{
   266  				{
   267  					Code: 6,
   268  					Msg: &ChunkDeliveryMsg{
   269  						Addr:  chunkKey,
   270  						SData: chunkData,
   271  					},
   272  					Peer: peerID,
   273  				},
   274  			},
   275  		})
   276  
   277  	if err != nil {
   278  		t.Fatalf("Expected no error, got %v", err)
   279  	}
   280  
   281  	timeout := time.NewTimer(1 * time.Second)
   282  
   283  	select {
   284  	case <-timeout.C:
   285  		t.Fatal("timeout receiving chunk")
   286  	case <-chunk.ReqC:
   287  	}
   288  
   289  	storedChunk, err := localStore.Get(context.TODO(), chunkKey)
   290  	if err != nil {
   291  		t.Fatalf("Expected no error, got %v", err)
   292  	}
   293  
   294  	if !bytes.Equal(storedChunk.SData, chunkData) {
   295  		t.Fatal("Retrieved chunk has different data than original")
   296  	}
   297  
   298  }
   299  
   300  func TestDeliveryFromNodes(t *testing.T) {
   301  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
   302  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
   303  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
   304  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
   305  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
   306  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
   307  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
   308  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
   309  }
   310  
   311  func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
   312  	sim := simulation.New(map[string]simulation.ServiceFunc{
   313  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   314  
   315  			id := ctx.Config.ID
   316  			addr := network.NewAddrFromNodeID(id)
   317  			store, datadir, err := createTestLocalStorageForID(id, addr)
   318  			if err != nil {
   319  				return nil, nil, err
   320  			}
   321  			bucket.Store(bucketKeyStore, store)
   322  			cleanup = func() {
   323  				os.RemoveAll(datadir)
   324  				store.Close()
   325  			}
   326  			localStore := store.(*storage.LocalStore)
   327  			db := storage.NewDBAPI(localStore)
   328  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   329  			delivery := NewDelivery(kad, db)
   330  
   331  			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
   332  				SkipCheck: skipCheck,
   333  			})
   334  			bucket.Store(bucketKeyRegistry, r)
   335  
   336  			retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error {
   337  				return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck)
   338  			}
   339  			netStore := storage.NewNetStore(localStore, retrieveFunc)
   340  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   341  			bucket.Store(bucketKeyFileStore, fileStore)
   342  
   343  			return r, cleanup, nil
   344  
   345  		},
   346  	})
   347  	defer sim.Close()
   348  
   349  	log.Info("Adding nodes to simulation")
   350  	_, err := sim.AddNodesAndConnectChain(nodes)
   351  	if err != nil {
   352  		t.Fatal(err)
   353  	}
   354  
   355  	log.Info("Starting simulation")
   356  	ctx := context.Background()
   357  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   358  		nodeIDs := sim.UpNodeIDs()
   359  		//determine the pivot node to be the first node of the simulation
   360  		sim.SetPivotNode(nodeIDs[0])
   361  		//distribute chunks of a random file into Stores of nodes 1 to nodes
   362  		//we will do this by creating a file store with an underlying round-robin store:
   363  		//the file store will create a hash for the uploaded file, but every chunk will be
   364  		//distributed to different nodes via round-robin scheduling
   365  		log.Debug("Writing file to round-robin file store")
   366  		//to do this, we create an array for chunkstores (length minus one, the pivot node)
   367  		stores := make([]storage.ChunkStore, len(nodeIDs)-1)
   368  		//we then need to get all stores from the sim....
   369  		lStores := sim.NodesItems(bucketKeyStore)
   370  		i := 0
   371  		//...iterate the buckets...
   372  		for id, bucketVal := range lStores {
   373  			//...and remove the one which is the pivot node
   374  			if id == *sim.PivotNodeID() {
   375  				continue
   376  			}
   377  			//the other ones are added to the array...
   378  			stores[i] = bucketVal.(storage.ChunkStore)
   379  			i++
   380  		}
   381  		//...which then gets passed to the round-robin file store
   382  		roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
   383  		//now we can actually upload a (random) file to the round-robin store
   384  		size := chunkCount * chunkSize
   385  		log.Debug("Storing data to file store")
   386  		fileHash, wait, err := roundRobinFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   387  		// wait until all chunks stored
   388  		if err != nil {
   389  			return err
   390  		}
   391  		err = wait(ctx)
   392  		if err != nil {
   393  			return err
   394  		}
   395  
   396  		log.Debug("Waiting for kademlia")
   397  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   398  			return err
   399  		}
   400  
   401  		//each of the nodes (except pivot node) subscribes to the stream of the next node
   402  		for j, node := range nodeIDs[0 : nodes-1] {
   403  			sid := nodeIDs[j+1]
   404  			item, ok := sim.NodeItem(node, bucketKeyRegistry)
   405  			if !ok {
   406  				return fmt.Errorf("No registry")
   407  			}
   408  			registry := item.(*Registry)
   409  			err = registry.Subscribe(sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
   410  			if err != nil {
   411  				return err
   412  			}
   413  		}
   414  
   415  		//get the pivot node's filestore
   416  		item, ok := sim.NodeItem(*sim.PivotNodeID(), bucketKeyFileStore)
   417  		if !ok {
   418  			return fmt.Errorf("No filestore")
   419  		}
   420  		pivotFileStore := item.(*storage.FileStore)
   421  		log.Debug("Starting retrieval routine")
   422  		go func() {
   423  			// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
   424  			// we must wait for the peer connections to have started before requesting
   425  			n, err := readAll(pivotFileStore, fileHash)
   426  			log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   427  			if err != nil {
   428  				t.Fatalf("requesting chunks action error: %v", err)
   429  			}
   430  		}()
   431  
   432  		log.Debug("Watching for disconnections")
   433  		disconnections := sim.PeerEvents(
   434  			context.Background(),
   435  			sim.NodeIDs(),
   436  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   437  		)
   438  
   439  		go func() {
   440  			for d := range disconnections {
   441  				if d.Error != nil {
   442  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   443  					t.Fatal(d.Error)
   444  				}
   445  			}
   446  		}()
   447  
   448  		//finally check that the pivot node gets all chunks via the root hash
   449  		log.Debug("Check retrieval")
   450  		success := true
   451  		var total int64
   452  		total, err = readAll(pivotFileStore, fileHash)
   453  		if err != nil {
   454  			return err
   455  		}
   456  		log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   457  		if err != nil || total != int64(size) {
   458  			success = false
   459  		}
   460  
   461  		if !success {
   462  			return fmt.Errorf("Test failed, chunks not available on all nodes")
   463  		}
   464  		log.Debug("Test terminated successfully")
   465  		return nil
   466  	})
   467  	if result.Error != nil {
   468  		t.Fatal(result.Error)
   469  	}
   470  }
   471  
   472  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   473  	for chunks := 32; chunks <= 128; chunks *= 2 {
   474  		for i := 2; i < 32; i *= 2 {
   475  			b.Run(
   476  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   477  				func(b *testing.B) {
   478  					benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
   479  				},
   480  			)
   481  		}
   482  	}
   483  }
   484  
   485  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   486  	for chunks := 32; chunks <= 128; chunks *= 2 {
   487  		for i := 2; i < 32; i *= 2 {
   488  			b.Run(
   489  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   490  				func(b *testing.B) {
   491  					benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
   492  				},
   493  			)
   494  		}
   495  	}
   496  }
   497  
   498  func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
   499  	sim := simulation.New(map[string]simulation.ServiceFunc{
   500  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   501  
   502  			id := ctx.Config.ID
   503  			addr := network.NewAddrFromNodeID(id)
   504  			store, datadir, err := createTestLocalStorageForID(id, addr)
   505  			if err != nil {
   506  				return nil, nil, err
   507  			}
   508  			bucket.Store(bucketKeyStore, store)
   509  			cleanup = func() {
   510  				os.RemoveAll(datadir)
   511  				store.Close()
   512  			}
   513  			localStore := store.(*storage.LocalStore)
   514  			db := storage.NewDBAPI(localStore)
   515  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   516  			delivery := NewDelivery(kad, db)
   517  
   518  			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
   519  				SkipCheck:       skipCheck,
   520  				DoSync:          true,
   521  				SyncUpdateDelay: 0,
   522  			})
   523  
   524  			retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error {
   525  				return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck)
   526  			}
   527  			netStore := storage.NewNetStore(localStore, retrieveFunc)
   528  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   529  			bucket.Store(bucketKeyFileStore, fileStore)
   530  
   531  			return r, cleanup, nil
   532  
   533  		},
   534  	})
   535  	defer sim.Close()
   536  
   537  	log.Info("Initializing test config")
   538  	_, err := sim.AddNodesAndConnectChain(nodes)
   539  	if err != nil {
   540  		b.Fatal(err)
   541  	}
   542  
   543  	ctx := context.Background()
   544  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   545  		nodeIDs := sim.UpNodeIDs()
   546  		node := nodeIDs[len(nodeIDs)-1]
   547  
   548  		item, ok := sim.NodeItem(node, bucketKeyFileStore)
   549  		if !ok {
   550  			b.Fatal("No filestore")
   551  		}
   552  		remoteFileStore := item.(*storage.FileStore)
   553  
   554  		pivotNode := nodeIDs[0]
   555  		item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
   556  		if !ok {
   557  			b.Fatal("No filestore")
   558  		}
   559  		netStore := item.(*storage.NetStore)
   560  
   561  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   562  			return err
   563  		}
   564  
   565  		disconnections := sim.PeerEvents(
   566  			context.Background(),
   567  			sim.NodeIDs(),
   568  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   569  		)
   570  
   571  		go func() {
   572  			for d := range disconnections {
   573  				if d.Error != nil {
   574  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   575  					b.Fatal(d.Error)
   576  				}
   577  			}
   578  		}()
   579  		// benchmark loop
   580  		b.ResetTimer()
   581  		b.StopTimer()
   582  	Loop:
   583  		for i := 0; i < b.N; i++ {
   584  			// uploading chunkCount random chunks to the last node
   585  			hashes := make([]storage.Address, chunkCount)
   586  			for i := 0; i < chunkCount; i++ {
   587  				// create actual size real chunks
   588  				ctx := context.TODO()
   589  				hash, wait, err := remoteFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false)
   590  				if err != nil {
   591  					b.Fatalf("expected no error. got %v", err)
   592  				}
   593  				// wait until all chunks stored
   594  				err = wait(ctx)
   595  				if err != nil {
   596  					b.Fatalf("expected no error. got %v", err)
   597  				}
   598  				// collect the hashes
   599  				hashes[i] = hash
   600  			}
   601  			// now benchmark the actual retrieval
   602  			// netstore.Get is called for each hash in a go routine and errors are collected
   603  			b.StartTimer()
   604  			errs := make(chan error)
   605  			for _, hash := range hashes {
   606  				go func(h storage.Address) {
   607  					_, err := netStore.Get(ctx, h)
   608  					log.Warn("test check netstore get", "hash", h, "err", err)
   609  					errs <- err
   610  				}(hash)
   611  			}
   612  			// count and report retrieval errors
   613  			// if there are misses then chunk timeout is too low for the distance and volume (?)
   614  			var total, misses int
   615  			for err := range errs {
   616  				if err != nil {
   617  					log.Warn(err.Error())
   618  					misses++
   619  				}
   620  				total++
   621  				if total == chunkCount {
   622  					break
   623  				}
   624  			}
   625  			b.StopTimer()
   626  
   627  			if misses > 0 {
   628  				err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   629  				break Loop
   630  			}
   631  		}
   632  		if err != nil {
   633  			b.Fatal(err)
   634  		}
   635  		return nil
   636  	})
   637  	if result.Error != nil {
   638  		b.Fatal(result.Error)
   639  	}
   640  
   641  }