github.com/yinchengtsinghua/golang-Eos-dpos-Ethereum@v0.0.0-20190121132951-92cc4225ed8e/swarm/network/stream/delivery_test.go (about)

     1  
     2  //此源码被清华学神尹成大魔王专业翻译分析并修改
     3  //尹成QQ77025077
     4  //尹成微信18510341407
     5  //尹成所在QQ群721929980
     6  //尹成邮箱 yinc13@mails.tsinghua.edu.cn
     7  //尹成毕业于清华大学,微软区块链领域全球最有价值专家
     8  //https://mvp.microsoft.com/zh-cn/PublicProfile/4033620
     9  //
    10  //
    11  //
    12  //
    13  //
    14  //
    15  //
    16  //
    17  //
    18  //
    19  //
    20  //
    21  //
    22  //
    23  //
    24  
    25  package stream
    26  
    27  import (
    28  	"bytes"
    29  	"context"
    30  	crand "crypto/rand"
    31  	"fmt"
    32  	"io"
    33  	"os"
    34  	"sync"
    35  	"testing"
    36  	"time"
    37  
    38  	"github.com/ethereum/go-ethereum/node"
    39  	"github.com/ethereum/go-ethereum/p2p"
    40  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    41  	p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
    42  	"github.com/ethereum/go-ethereum/swarm/log"
    43  	"github.com/ethereum/go-ethereum/swarm/network"
    44  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    45  	"github.com/ethereum/go-ethereum/swarm/state"
    46  	"github.com/ethereum/go-ethereum/swarm/storage"
    47  )
    48  
    49  func TestStreamerRetrieveRequest(t *testing.T) {
    50  	tester, streamer, _, teardown, err := newStreamerTester(t)
    51  	defer teardown()
    52  	if err != nil {
    53  		t.Fatal(err)
    54  	}
    55  
    56  	peerID := tester.IDs[0]
    57  
    58  	streamer.delivery.RequestFromPeers(context.TODO(), hash0[:], true)
    59  
    60  	err = tester.TestExchanges(p2ptest.Exchange{
    61  		Label: "RetrieveRequestMsg",
    62  		Expects: []p2ptest.Expect{
    63  			{
    64  				Code: 5,
    65  				Msg: &RetrieveRequestMsg{
    66  					Addr:      hash0[:],
    67  					SkipCheck: true,
    68  				},
    69  				Peer: peerID,
    70  			},
    71  		},
    72  	})
    73  
    74  	if err != nil {
    75  		t.Fatalf("Expected no error, got %v", err)
    76  	}
    77  }
    78  
    79  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
    80  	tester, streamer, _, teardown, err := newStreamerTester(t)
    81  	defer teardown()
    82  	if err != nil {
    83  		t.Fatal(err)
    84  	}
    85  
    86  	peerID := tester.IDs[0]
    87  
    88  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
    89  
    90  	peer := streamer.getPeer(peerID)
    91  
    92  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
    93  		Stream:   NewStream(swarmChunkServerStreamName, "", false),
    94  		History:  nil,
    95  		Priority: Top,
    96  	})
    97  
    98  	err = tester.TestExchanges(p2ptest.Exchange{
    99  		Label: "RetrieveRequestMsg",
   100  		Triggers: []p2ptest.Trigger{
   101  			{
   102  				Code: 5,
   103  				Msg: &RetrieveRequestMsg{
   104  					Addr: chunk.Addr[:],
   105  				},
   106  				Peer: peerID,
   107  			},
   108  		},
   109  		Expects: []p2ptest.Expect{
   110  			{
   111  				Code: 1,
   112  				Msg: &OfferedHashesMsg{
   113  					HandoverProof: nil,
   114  					Hashes:        nil,
   115  					From:          0,
   116  					To:            0,
   117  				},
   118  				Peer: peerID,
   119  			},
   120  		},
   121  	})
   122  
   123  	expectedError := `exchange #0 "RetrieveRequestMsg": timed out`
   124  	if err == nil || err.Error() != expectedError {
   125  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   126  	}
   127  }
   128  
   129  //
   130  //
   131  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   132  	tester, streamer, localStore, teardown, err := newStreamerTester(t)
   133  	defer teardown()
   134  	if err != nil {
   135  		t.Fatal(err)
   136  	}
   137  
   138  	peerID := tester.IDs[0]
   139  	peer := streamer.getPeer(peerID)
   140  
   141  	stream := NewStream(swarmChunkServerStreamName, "", false)
   142  
   143  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   144  		Stream:   stream,
   145  		History:  nil,
   146  		Priority: Top,
   147  	})
   148  
   149  	hash := storage.Address(hash0[:])
   150  	chunk := storage.NewChunk(hash, nil)
   151  	chunk.SData = hash
   152  	localStore.Put(context.TODO(), chunk)
   153  	chunk.WaitToStore()
   154  
   155  	err = tester.TestExchanges(p2ptest.Exchange{
   156  		Label: "RetrieveRequestMsg",
   157  		Triggers: []p2ptest.Trigger{
   158  			{
   159  				Code: 5,
   160  				Msg: &RetrieveRequestMsg{
   161  					Addr: hash,
   162  				},
   163  				Peer: peerID,
   164  			},
   165  		},
   166  		Expects: []p2ptest.Expect{
   167  			{
   168  				Code: 1,
   169  				Msg: &OfferedHashesMsg{
   170  					HandoverProof: &HandoverProof{
   171  						Handover: &Handover{},
   172  					},
   173  					Hashes: hash,
   174  					From:   0,
   175  //
   176  					To:     32,
   177  					Stream: stream,
   178  				},
   179  				Peer: peerID,
   180  			},
   181  		},
   182  	})
   183  
   184  	if err != nil {
   185  		t.Fatal(err)
   186  	}
   187  
   188  	hash = storage.Address(hash1[:])
   189  	chunk = storage.NewChunk(hash, nil)
   190  	chunk.SData = hash1[:]
   191  	localStore.Put(context.TODO(), chunk)
   192  	chunk.WaitToStore()
   193  
   194  	err = tester.TestExchanges(p2ptest.Exchange{
   195  		Label: "RetrieveRequestMsg",
   196  		Triggers: []p2ptest.Trigger{
   197  			{
   198  				Code: 5,
   199  				Msg: &RetrieveRequestMsg{
   200  					Addr:      hash,
   201  					SkipCheck: true,
   202  				},
   203  				Peer: peerID,
   204  			},
   205  		},
   206  		Expects: []p2ptest.Expect{
   207  			{
   208  				Code: 6,
   209  				Msg: &ChunkDeliveryMsg{
   210  					Addr:  hash,
   211  					SData: hash,
   212  				},
   213  				Peer: peerID,
   214  			},
   215  		},
   216  	})
   217  
   218  	if err != nil {
   219  		t.Fatal(err)
   220  	}
   221  }
   222  
   223  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   224  	tester, streamer, localStore, teardown, err := newStreamerTester(t)
   225  	defer teardown()
   226  	if err != nil {
   227  		t.Fatal(err)
   228  	}
   229  
   230  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   231  		return &testClient{
   232  			t: t,
   233  		}, nil
   234  	})
   235  
   236  	peerID := tester.IDs[0]
   237  
   238  	stream := NewStream("foo", "", true)
   239  	err = streamer.Subscribe(peerID, stream, NewRange(5, 8), Top)
   240  	if err != nil {
   241  		t.Fatalf("Expected no error, got %v", err)
   242  	}
   243  
   244  	chunkKey := hash0[:]
   245  	chunkData := hash1[:]
   246  	chunk, created := localStore.GetOrCreateRequest(context.TODO(), chunkKey)
   247  
   248  	if !created {
   249  		t.Fatal("chunk already exists")
   250  	}
   251  	select {
   252  	case <-chunk.ReqC:
   253  		t.Fatal("chunk is already received")
   254  	default:
   255  	}
   256  
   257  	err = tester.TestExchanges(p2ptest.Exchange{
   258  		Label: "Subscribe message",
   259  		Expects: []p2ptest.Expect{
   260  			{
   261  				Code: 4,
   262  				Msg: &SubscribeMsg{
   263  					Stream:   stream,
   264  					History:  NewRange(5, 8),
   265  					Priority: Top,
   266  				},
   267  				Peer: peerID,
   268  			},
   269  		},
   270  	},
   271  		p2ptest.Exchange{
   272  			Label: "ChunkDeliveryRequest message",
   273  			Triggers: []p2ptest.Trigger{
   274  				{
   275  					Code: 6,
   276  					Msg: &ChunkDeliveryMsg{
   277  						Addr:  chunkKey,
   278  						SData: chunkData,
   279  					},
   280  					Peer: peerID,
   281  				},
   282  			},
   283  		})
   284  
   285  	if err != nil {
   286  		t.Fatalf("Expected no error, got %v", err)
   287  	}
   288  
   289  	timeout := time.NewTimer(1 * time.Second)
   290  
   291  	select {
   292  	case <-timeout.C:
   293  		t.Fatal("timeout receiving chunk")
   294  	case <-chunk.ReqC:
   295  	}
   296  
   297  	storedChunk, err := localStore.Get(context.TODO(), chunkKey)
   298  	if err != nil {
   299  		t.Fatalf("Expected no error, got %v", err)
   300  	}
   301  
   302  	if !bytes.Equal(storedChunk.SData, chunkData) {
   303  		t.Fatal("Retrieved chunk has different data than original")
   304  	}
   305  
   306  }
   307  
   308  func TestDeliveryFromNodes(t *testing.T) {
   309  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
   310  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
   311  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
   312  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
   313  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
   314  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
   315  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
   316  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
   317  }
   318  
   319  func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
   320  	sim := simulation.New(map[string]simulation.ServiceFunc{
   321  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   322  
   323  			id := ctx.Config.ID
   324  			addr := network.NewAddrFromNodeID(id)
   325  			store, datadir, err := createTestLocalStorageForID(id, addr)
   326  			if err != nil {
   327  				return nil, nil, err
   328  			}
   329  			bucket.Store(bucketKeyStore, store)
   330  			cleanup = func() {
   331  				os.RemoveAll(datadir)
   332  				store.Close()
   333  			}
   334  			localStore := store.(*storage.LocalStore)
   335  			db := storage.NewDBAPI(localStore)
   336  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   337  			delivery := NewDelivery(kad, db)
   338  
   339  			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
   340  				SkipCheck: skipCheck,
   341  			})
   342  			bucket.Store(bucketKeyRegistry, r)
   343  
   344  			retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error {
   345  				return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck)
   346  			}
   347  			netStore := storage.NewNetStore(localStore, retrieveFunc)
   348  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   349  			bucket.Store(bucketKeyFileStore, fileStore)
   350  
   351  			return r, cleanup, nil
   352  
   353  		},
   354  	})
   355  	defer sim.Close()
   356  
   357  	log.Info("Adding nodes to simulation")
   358  	_, err := sim.AddNodesAndConnectChain(nodes)
   359  	if err != nil {
   360  		t.Fatal(err)
   361  	}
   362  
   363  	log.Info("Starting simulation")
   364  	ctx := context.Background()
   365  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   366  		nodeIDs := sim.UpNodeIDs()
   367  //
   368  		sim.SetPivotNode(nodeIDs[0])
   369  //
   370  //
   371  //
   372  //
   373  		log.Debug("Writing file to round-robin file store")
   374  //
   375  		stores := make([]storage.ChunkStore, len(nodeIDs)-1)
   376  //
   377  		lStores := sim.NodesItems(bucketKeyStore)
   378  		i := 0
   379  //
   380  		for id, bucketVal := range lStores {
   381  //
   382  			if id == *sim.PivotNodeID() {
   383  				continue
   384  			}
   385  //
   386  			stores[i] = bucketVal.(storage.ChunkStore)
   387  			i++
   388  		}
   389  //
   390  		roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
   391  //
   392  		size := chunkCount * chunkSize
   393  		log.Debug("Storing data to file store")
   394  		fileHash, wait, err := roundRobinFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   395  //
   396  		if err != nil {
   397  			return err
   398  		}
   399  		err = wait(ctx)
   400  		if err != nil {
   401  			return err
   402  		}
   403  
   404  		log.Debug("Waiting for kademlia")
   405  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   406  			return err
   407  		}
   408  
   409  //
   410  		for j, node := range nodeIDs[0 : nodes-1] {
   411  			sid := nodeIDs[j+1]
   412  			item, ok := sim.NodeItem(node, bucketKeyRegistry)
   413  			if !ok {
   414  				return fmt.Errorf("No registry")
   415  			}
   416  			registry := item.(*Registry)
   417  			err = registry.Subscribe(sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
   418  			if err != nil {
   419  				return err
   420  			}
   421  		}
   422  
   423  //
   424  		item, ok := sim.NodeItem(*sim.PivotNodeID(), bucketKeyFileStore)
   425  		if !ok {
   426  			return fmt.Errorf("No filestore")
   427  		}
   428  		pivotFileStore := item.(*storage.FileStore)
   429  		log.Debug("Starting retrieval routine")
   430  		go func() {
   431  //
   432  //
   433  			n, err := readAll(pivotFileStore, fileHash)
   434  			log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   435  			if err != nil {
   436  				t.Fatalf("requesting chunks action error: %v", err)
   437  			}
   438  		}()
   439  
   440  		log.Debug("Watching for disconnections")
   441  		disconnections := sim.PeerEvents(
   442  			context.Background(),
   443  			sim.NodeIDs(),
   444  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   445  		)
   446  
   447  		go func() {
   448  			for d := range disconnections {
   449  				if d.Error != nil {
   450  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   451  					t.Fatal(d.Error)
   452  				}
   453  			}
   454  		}()
   455  
   456  //
   457  		log.Debug("Check retrieval")
   458  		success := true
   459  		var total int64
   460  		total, err = readAll(pivotFileStore, fileHash)
   461  		if err != nil {
   462  			return err
   463  		}
   464  		log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   465  		if err != nil || total != int64(size) {
   466  			success = false
   467  		}
   468  
   469  		if !success {
   470  			return fmt.Errorf("Test failed, chunks not available on all nodes")
   471  		}
   472  		log.Debug("Test terminated successfully")
   473  		return nil
   474  	})
   475  	if result.Error != nil {
   476  		t.Fatal(result.Error)
   477  	}
   478  }
   479  
   480  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   481  	for chunks := 32; chunks <= 128; chunks *= 2 {
   482  		for i := 2; i < 32; i *= 2 {
   483  			b.Run(
   484  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   485  				func(b *testing.B) {
   486  					benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
   487  				},
   488  			)
   489  		}
   490  	}
   491  }
   492  
   493  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   494  	for chunks := 32; chunks <= 128; chunks *= 2 {
   495  		for i := 2; i < 32; i *= 2 {
   496  			b.Run(
   497  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   498  				func(b *testing.B) {
   499  					benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
   500  				},
   501  			)
   502  		}
   503  	}
   504  }
   505  
   506  func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
   507  	sim := simulation.New(map[string]simulation.ServiceFunc{
   508  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   509  
   510  			id := ctx.Config.ID
   511  			addr := network.NewAddrFromNodeID(id)
   512  			store, datadir, err := createTestLocalStorageForID(id, addr)
   513  			if err != nil {
   514  				return nil, nil, err
   515  			}
   516  			bucket.Store(bucketKeyStore, store)
   517  			cleanup = func() {
   518  				os.RemoveAll(datadir)
   519  				store.Close()
   520  			}
   521  			localStore := store.(*storage.LocalStore)
   522  			db := storage.NewDBAPI(localStore)
   523  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   524  			delivery := NewDelivery(kad, db)
   525  
   526  			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
   527  				SkipCheck:       skipCheck,
   528  				DoSync:          true,
   529  				SyncUpdateDelay: 0,
   530  			})
   531  
   532  			retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error {
   533  				return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck)
   534  			}
   535  			netStore := storage.NewNetStore(localStore, retrieveFunc)
   536  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   537  			bucket.Store(bucketKeyFileStore, fileStore)
   538  
   539  			return r, cleanup, nil
   540  
   541  		},
   542  	})
   543  	defer sim.Close()
   544  
   545  	log.Info("Initializing test config")
   546  	_, err := sim.AddNodesAndConnectChain(nodes)
   547  	if err != nil {
   548  		b.Fatal(err)
   549  	}
   550  
   551  	ctx := context.Background()
   552  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   553  		nodeIDs := sim.UpNodeIDs()
   554  		node := nodeIDs[len(nodeIDs)-1]
   555  
   556  		item, ok := sim.NodeItem(node, bucketKeyFileStore)
   557  		if !ok {
   558  			b.Fatal("No filestore")
   559  		}
   560  		remoteFileStore := item.(*storage.FileStore)
   561  
   562  		pivotNode := nodeIDs[0]
   563  		item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
   564  		if !ok {
   565  			b.Fatal("No filestore")
   566  		}
   567  		netStore := item.(*storage.NetStore)
   568  
   569  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   570  			return err
   571  		}
   572  
   573  		disconnections := sim.PeerEvents(
   574  			context.Background(),
   575  			sim.NodeIDs(),
   576  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   577  		)
   578  
   579  		go func() {
   580  			for d := range disconnections {
   581  				if d.Error != nil {
   582  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   583  					b.Fatal(d.Error)
   584  				}
   585  			}
   586  		}()
   587  //
   588  		b.ResetTimer()
   589  		b.StopTimer()
   590  	Loop:
   591  		for i := 0; i < b.N; i++ {
   592  //
   593  			hashes := make([]storage.Address, chunkCount)
   594  			for i := 0; i < chunkCount; i++ {
   595  //
   596  				ctx := context.TODO()
   597  				hash, wait, err := remoteFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false)
   598  				if err != nil {
   599  					b.Fatalf("expected no error. got %v", err)
   600  				}
   601  //
   602  				err = wait(ctx)
   603  				if err != nil {
   604  					b.Fatalf("expected no error. got %v", err)
   605  				}
   606  //
   607  				hashes[i] = hash
   608  			}
   609  //
   610  //
   611  			b.StartTimer()
   612  			errs := make(chan error)
   613  			for _, hash := range hashes {
   614  				go func(h storage.Address) {
   615  					_, err := netStore.Get(ctx, h)
   616  					log.Warn("test check netstore get", "hash", h, "err", err)
   617  					errs <- err
   618  				}(hash)
   619  			}
   620  //
   621  //
   622  			var total, misses int
   623  			for err := range errs {
   624  				if err != nil {
   625  					log.Warn(err.Error())
   626  					misses++
   627  				}
   628  				total++
   629  				if total == chunkCount {
   630  					break
   631  				}
   632  			}
   633  			b.StopTimer()
   634  
   635  			if misses > 0 {
   636  				err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   637  				break Loop
   638  			}
   639  		}
   640  		if err != nil {
   641  			b.Fatal(err)
   642  		}
   643  		return nil
   644  	})
   645  	if result.Error != nil {
   646  		b.Fatal(result.Error)
   647  	}
   648  
   649  }