github.com/linapex/ethereum-dpos-chinese@v0.0.0-20190316121959-b78b3a4a1ece/swarm/network/stream/delivery_test.go (about)

     1  
     2  //<developer>
     3  //    <name>linapex 曹一峰</name>
     4  //    <email>linapex@163.com</email>
     5  //    <wx>superexc</wx>
     6  //    <qqgroup>128148617</qqgroup>
     7  //    <url>https://jsq.ink</url>
     8  //    <role>pku engineer</role>
     9  //    <date>2019-03-16 12:09:48</date>
    10  //</624342675281154048>
    11  
    12  //
    13  //
    14  //
    15  //
    16  //
    17  //
    18  //
    19  //
    20  //
    21  //
    22  //
    23  //
    24  //
    25  //
    26  //
    27  
    28  package stream
    29  
    30  import (
    31  	"bytes"
    32  	"context"
    33  	crand "crypto/rand"
    34  	"fmt"
    35  	"io"
    36  	"os"
    37  	"sync"
    38  	"testing"
    39  	"time"
    40  
    41  	"github.com/ethereum/go-ethereum/node"
    42  	"github.com/ethereum/go-ethereum/p2p"
    43  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    44  	p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
    45  	"github.com/ethereum/go-ethereum/swarm/log"
    46  	"github.com/ethereum/go-ethereum/swarm/network"
    47  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    48  	"github.com/ethereum/go-ethereum/swarm/state"
    49  	"github.com/ethereum/go-ethereum/swarm/storage"
    50  )
    51  
    52  func TestStreamerRetrieveRequest(t *testing.T) {
    53  	tester, streamer, _, teardown, err := newStreamerTester(t)
    54  	defer teardown()
    55  	if err != nil {
    56  		t.Fatal(err)
    57  	}
    58  
    59  	peerID := tester.IDs[0]
    60  
    61  	streamer.delivery.RequestFromPeers(context.TODO(), hash0[:], true)
    62  
    63  	err = tester.TestExchanges(p2ptest.Exchange{
    64  		Label: "RetrieveRequestMsg",
    65  		Expects: []p2ptest.Expect{
    66  			{
    67  				Code: 5,
    68  				Msg: &RetrieveRequestMsg{
    69  					Addr:      hash0[:],
    70  					SkipCheck: true,
    71  				},
    72  				Peer: peerID,
    73  			},
    74  		},
    75  	})
    76  
    77  	if err != nil {
    78  		t.Fatalf("Expected no error, got %v", err)
    79  	}
    80  }
    81  
    82  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
    83  	tester, streamer, _, teardown, err := newStreamerTester(t)
    84  	defer teardown()
    85  	if err != nil {
    86  		t.Fatal(err)
    87  	}
    88  
    89  	peerID := tester.IDs[0]
    90  
    91  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
    92  
    93  	peer := streamer.getPeer(peerID)
    94  
    95  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
    96  		Stream:   NewStream(swarmChunkServerStreamName, "", false),
    97  		History:  nil,
    98  		Priority: Top,
    99  	})
   100  
   101  	err = tester.TestExchanges(p2ptest.Exchange{
   102  		Label: "RetrieveRequestMsg",
   103  		Triggers: []p2ptest.Trigger{
   104  			{
   105  				Code: 5,
   106  				Msg: &RetrieveRequestMsg{
   107  					Addr: chunk.Addr[:],
   108  				},
   109  				Peer: peerID,
   110  			},
   111  		},
   112  		Expects: []p2ptest.Expect{
   113  			{
   114  				Code: 1,
   115  				Msg: &OfferedHashesMsg{
   116  					HandoverProof: nil,
   117  					Hashes:        nil,
   118  					From:          0,
   119  					To:            0,
   120  				},
   121  				Peer: peerID,
   122  			},
   123  		},
   124  	})
   125  
   126  	expectedError := `exchange #0 "RetrieveRequestMsg": timed out`
   127  	if err == nil || err.Error() != expectedError {
   128  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   129  	}
   130  }
   131  
   132  //
   133  //
   134  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   135  	tester, streamer, localStore, teardown, err := newStreamerTester(t)
   136  	defer teardown()
   137  	if err != nil {
   138  		t.Fatal(err)
   139  	}
   140  
   141  	peerID := tester.IDs[0]
   142  	peer := streamer.getPeer(peerID)
   143  
   144  	stream := NewStream(swarmChunkServerStreamName, "", false)
   145  
   146  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   147  		Stream:   stream,
   148  		History:  nil,
   149  		Priority: Top,
   150  	})
   151  
   152  	hash := storage.Address(hash0[:])
   153  	chunk := storage.NewChunk(hash, nil)
   154  	chunk.SData = hash
   155  	localStore.Put(context.TODO(), chunk)
   156  	chunk.WaitToStore()
   157  
   158  	err = tester.TestExchanges(p2ptest.Exchange{
   159  		Label: "RetrieveRequestMsg",
   160  		Triggers: []p2ptest.Trigger{
   161  			{
   162  				Code: 5,
   163  				Msg: &RetrieveRequestMsg{
   164  					Addr: hash,
   165  				},
   166  				Peer: peerID,
   167  			},
   168  		},
   169  		Expects: []p2ptest.Expect{
   170  			{
   171  				Code: 1,
   172  				Msg: &OfferedHashesMsg{
   173  					HandoverProof: &HandoverProof{
   174  						Handover: &Handover{},
   175  					},
   176  					Hashes: hash,
   177  					From:   0,
   178  //
   179  					To:     32,
   180  					Stream: stream,
   181  				},
   182  				Peer: peerID,
   183  			},
   184  		},
   185  	})
   186  
   187  	if err != nil {
   188  		t.Fatal(err)
   189  	}
   190  
   191  	hash = storage.Address(hash1[:])
   192  	chunk = storage.NewChunk(hash, nil)
   193  	chunk.SData = hash1[:]
   194  	localStore.Put(context.TODO(), chunk)
   195  	chunk.WaitToStore()
   196  
   197  	err = tester.TestExchanges(p2ptest.Exchange{
   198  		Label: "RetrieveRequestMsg",
   199  		Triggers: []p2ptest.Trigger{
   200  			{
   201  				Code: 5,
   202  				Msg: &RetrieveRequestMsg{
   203  					Addr:      hash,
   204  					SkipCheck: true,
   205  				},
   206  				Peer: peerID,
   207  			},
   208  		},
   209  		Expects: []p2ptest.Expect{
   210  			{
   211  				Code: 6,
   212  				Msg: &ChunkDeliveryMsg{
   213  					Addr:  hash,
   214  					SData: hash,
   215  				},
   216  				Peer: peerID,
   217  			},
   218  		},
   219  	})
   220  
   221  	if err != nil {
   222  		t.Fatal(err)
   223  	}
   224  }
   225  
   226  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   227  	tester, streamer, localStore, teardown, err := newStreamerTester(t)
   228  	defer teardown()
   229  	if err != nil {
   230  		t.Fatal(err)
   231  	}
   232  
   233  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   234  		return &testClient{
   235  			t: t,
   236  		}, nil
   237  	})
   238  
   239  	peerID := tester.IDs[0]
   240  
   241  	stream := NewStream("foo", "", true)
   242  	err = streamer.Subscribe(peerID, stream, NewRange(5, 8), Top)
   243  	if err != nil {
   244  		t.Fatalf("Expected no error, got %v", err)
   245  	}
   246  
   247  	chunkKey := hash0[:]
   248  	chunkData := hash1[:]
   249  	chunk, created := localStore.GetOrCreateRequest(context.TODO(), chunkKey)
   250  
   251  	if !created {
   252  		t.Fatal("chunk already exists")
   253  	}
   254  	select {
   255  	case <-chunk.ReqC:
   256  		t.Fatal("chunk is already received")
   257  	default:
   258  	}
   259  
   260  	err = tester.TestExchanges(p2ptest.Exchange{
   261  		Label: "Subscribe message",
   262  		Expects: []p2ptest.Expect{
   263  			{
   264  				Code: 4,
   265  				Msg: &SubscribeMsg{
   266  					Stream:   stream,
   267  					History:  NewRange(5, 8),
   268  					Priority: Top,
   269  				},
   270  				Peer: peerID,
   271  			},
   272  		},
   273  	},
   274  		p2ptest.Exchange{
   275  			Label: "ChunkDeliveryRequest message",
   276  			Triggers: []p2ptest.Trigger{
   277  				{
   278  					Code: 6,
   279  					Msg: &ChunkDeliveryMsg{
   280  						Addr:  chunkKey,
   281  						SData: chunkData,
   282  					},
   283  					Peer: peerID,
   284  				},
   285  			},
   286  		})
   287  
   288  	if err != nil {
   289  		t.Fatalf("Expected no error, got %v", err)
   290  	}
   291  
   292  	timeout := time.NewTimer(1 * time.Second)
   293  
   294  	select {
   295  	case <-timeout.C:
   296  		t.Fatal("timeout receiving chunk")
   297  	case <-chunk.ReqC:
   298  	}
   299  
   300  	storedChunk, err := localStore.Get(context.TODO(), chunkKey)
   301  	if err != nil {
   302  		t.Fatalf("Expected no error, got %v", err)
   303  	}
   304  
   305  	if !bytes.Equal(storedChunk.SData, chunkData) {
   306  		t.Fatal("Retrieved chunk has different data than original")
   307  	}
   308  
   309  }
   310  
   311  func TestDeliveryFromNodes(t *testing.T) {
   312  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
   313  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
   314  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
   315  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
   316  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
   317  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
   318  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
   319  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
   320  }
   321  
   322  func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
   323  	sim := simulation.New(map[string]simulation.ServiceFunc{
   324  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   325  
   326  			id := ctx.Config.ID
   327  			addr := network.NewAddrFromNodeID(id)
   328  			store, datadir, err := createTestLocalStorageForID(id, addr)
   329  			if err != nil {
   330  				return nil, nil, err
   331  			}
   332  			bucket.Store(bucketKeyStore, store)
   333  			cleanup = func() {
   334  				os.RemoveAll(datadir)
   335  				store.Close()
   336  			}
   337  			localStore := store.(*storage.LocalStore)
   338  			db := storage.NewDBAPI(localStore)
   339  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   340  			delivery := NewDelivery(kad, db)
   341  
   342  			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
   343  				SkipCheck: skipCheck,
   344  			})
   345  			bucket.Store(bucketKeyRegistry, r)
   346  
   347  			retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error {
   348  				return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck)
   349  			}
   350  			netStore := storage.NewNetStore(localStore, retrieveFunc)
   351  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   352  			bucket.Store(bucketKeyFileStore, fileStore)
   353  
   354  			return r, cleanup, nil
   355  
   356  		},
   357  	})
   358  	defer sim.Close()
   359  
   360  	log.Info("Adding nodes to simulation")
   361  	_, err := sim.AddNodesAndConnectChain(nodes)
   362  	if err != nil {
   363  		t.Fatal(err)
   364  	}
   365  
   366  	log.Info("Starting simulation")
   367  	ctx := context.Background()
   368  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   369  		nodeIDs := sim.UpNodeIDs()
   370  //
   371  		sim.SetPivotNode(nodeIDs[0])
   372  //
   373  //
   374  //
   375  //
   376  		log.Debug("Writing file to round-robin file store")
   377  //
   378  		stores := make([]storage.ChunkStore, len(nodeIDs)-1)
   379  //
   380  		lStores := sim.NodesItems(bucketKeyStore)
   381  		i := 0
   382  //
   383  		for id, bucketVal := range lStores {
   384  //
   385  			if id == *sim.PivotNodeID() {
   386  				continue
   387  			}
   388  //
   389  			stores[i] = bucketVal.(storage.ChunkStore)
   390  			i++
   391  		}
   392  //
   393  		roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
   394  //
   395  		size := chunkCount * chunkSize
   396  		log.Debug("Storing data to file store")
   397  		fileHash, wait, err := roundRobinFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   398  //
   399  		if err != nil {
   400  			return err
   401  		}
   402  		err = wait(ctx)
   403  		if err != nil {
   404  			return err
   405  		}
   406  
   407  		log.Debug("Waiting for kademlia")
   408  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   409  			return err
   410  		}
   411  
   412  //
   413  		for j, node := range nodeIDs[0 : nodes-1] {
   414  			sid := nodeIDs[j+1]
   415  			item, ok := sim.NodeItem(node, bucketKeyRegistry)
   416  			if !ok {
   417  				return fmt.Errorf("No registry")
   418  			}
   419  			registry := item.(*Registry)
   420  			err = registry.Subscribe(sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
   421  			if err != nil {
   422  				return err
   423  			}
   424  		}
   425  
   426  //
   427  		item, ok := sim.NodeItem(*sim.PivotNodeID(), bucketKeyFileStore)
   428  		if !ok {
   429  			return fmt.Errorf("No filestore")
   430  		}
   431  		pivotFileStore := item.(*storage.FileStore)
   432  		log.Debug("Starting retrieval routine")
   433  		go func() {
   434  //
   435  //
   436  			n, err := readAll(pivotFileStore, fileHash)
   437  			log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   438  			if err != nil {
   439  				t.Fatalf("requesting chunks action error: %v", err)
   440  			}
   441  		}()
   442  
   443  		log.Debug("Watching for disconnections")
   444  		disconnections := sim.PeerEvents(
   445  			context.Background(),
   446  			sim.NodeIDs(),
   447  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   448  		)
   449  
   450  		go func() {
   451  			for d := range disconnections {
   452  				if d.Error != nil {
   453  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   454  					t.Fatal(d.Error)
   455  				}
   456  			}
   457  		}()
   458  
   459  //
   460  		log.Debug("Check retrieval")
   461  		success := true
   462  		var total int64
   463  		total, err = readAll(pivotFileStore, fileHash)
   464  		if err != nil {
   465  			return err
   466  		}
   467  		log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   468  		if err != nil || total != int64(size) {
   469  			success = false
   470  		}
   471  
   472  		if !success {
   473  			return fmt.Errorf("Test failed, chunks not available on all nodes")
   474  		}
   475  		log.Debug("Test terminated successfully")
   476  		return nil
   477  	})
   478  	if result.Error != nil {
   479  		t.Fatal(result.Error)
   480  	}
   481  }
   482  
   483  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   484  	for chunks := 32; chunks <= 128; chunks *= 2 {
   485  		for i := 2; i < 32; i *= 2 {
   486  			b.Run(
   487  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   488  				func(b *testing.B) {
   489  					benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
   490  				},
   491  			)
   492  		}
   493  	}
   494  }
   495  
   496  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   497  	for chunks := 32; chunks <= 128; chunks *= 2 {
   498  		for i := 2; i < 32; i *= 2 {
   499  			b.Run(
   500  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   501  				func(b *testing.B) {
   502  					benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
   503  				},
   504  			)
   505  		}
   506  	}
   507  }
   508  
   509  func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
   510  	sim := simulation.New(map[string]simulation.ServiceFunc{
   511  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   512  
   513  			id := ctx.Config.ID
   514  			addr := network.NewAddrFromNodeID(id)
   515  			store, datadir, err := createTestLocalStorageForID(id, addr)
   516  			if err != nil {
   517  				return nil, nil, err
   518  			}
   519  			bucket.Store(bucketKeyStore, store)
   520  			cleanup = func() {
   521  				os.RemoveAll(datadir)
   522  				store.Close()
   523  			}
   524  			localStore := store.(*storage.LocalStore)
   525  			db := storage.NewDBAPI(localStore)
   526  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   527  			delivery := NewDelivery(kad, db)
   528  
   529  			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
   530  				SkipCheck:       skipCheck,
   531  				DoSync:          true,
   532  				SyncUpdateDelay: 0,
   533  			})
   534  
   535  			retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error {
   536  				return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck)
   537  			}
   538  			netStore := storage.NewNetStore(localStore, retrieveFunc)
   539  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   540  			bucket.Store(bucketKeyFileStore, fileStore)
   541  
   542  			return r, cleanup, nil
   543  
   544  		},
   545  	})
   546  	defer sim.Close()
   547  
   548  	log.Info("Initializing test config")
   549  	_, err := sim.AddNodesAndConnectChain(nodes)
   550  	if err != nil {
   551  		b.Fatal(err)
   552  	}
   553  
   554  	ctx := context.Background()
   555  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   556  		nodeIDs := sim.UpNodeIDs()
   557  		node := nodeIDs[len(nodeIDs)-1]
   558  
   559  		item, ok := sim.NodeItem(node, bucketKeyFileStore)
   560  		if !ok {
   561  			b.Fatal("No filestore")
   562  		}
   563  		remoteFileStore := item.(*storage.FileStore)
   564  
   565  		pivotNode := nodeIDs[0]
   566  		item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
   567  		if !ok {
   568  			b.Fatal("No filestore")
   569  		}
   570  		netStore := item.(*storage.NetStore)
   571  
   572  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   573  			return err
   574  		}
   575  
   576  		disconnections := sim.PeerEvents(
   577  			context.Background(),
   578  			sim.NodeIDs(),
   579  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   580  		)
   581  
   582  		go func() {
   583  			for d := range disconnections {
   584  				if d.Error != nil {
   585  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   586  					b.Fatal(d.Error)
   587  				}
   588  			}
   589  		}()
   590  //
   591  		b.ResetTimer()
   592  		b.StopTimer()
   593  	Loop:
   594  		for i := 0; i < b.N; i++ {
   595  //
   596  			hashes := make([]storage.Address, chunkCount)
   597  			for i := 0; i < chunkCount; i++ {
   598  //
   599  				ctx := context.TODO()
   600  				hash, wait, err := remoteFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false)
   601  				if err != nil {
   602  					b.Fatalf("expected no error. got %v", err)
   603  				}
   604  //
   605  				err = wait(ctx)
   606  				if err != nil {
   607  					b.Fatalf("expected no error. got %v", err)
   608  				}
   609  //
   610  				hashes[i] = hash
   611  			}
   612  //
   613  //
   614  			b.StartTimer()
   615  			errs := make(chan error)
   616  			for _, hash := range hashes {
   617  				go func(h storage.Address) {
   618  					_, err := netStore.Get(ctx, h)
   619  					log.Warn("test check netstore get", "hash", h, "err", err)
   620  					errs <- err
   621  				}(hash)
   622  			}
   623  //
   624  //
   625  			var total, misses int
   626  			for err := range errs {
   627  				if err != nil {
   628  					log.Warn(err.Error())
   629  					misses++
   630  				}
   631  				total++
   632  				if total == chunkCount {
   633  					break
   634  				}
   635  			}
   636  			b.StopTimer()
   637  
   638  			if misses > 0 {
   639  				err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   640  				break Loop
   641  			}
   642  		}
   643  		if err != nil {
   644  			b.Fatal(err)
   645  		}
   646  		return nil
   647  	})
   648  	if result.Error != nil {
   649  		b.Fatal(result.Error)
   650  	}
   651  
   652  }
   653