gitee.com/liu-zhao234568/cntest@v1.0.0/eth/handler_eth_test.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package eth
    18  
    19  import (
    20  	"fmt"
    21  	"math/big"
    22  	"math/rand"
    23  	"sync/atomic"
    24  	"testing"
    25  	"time"
    26  
    27  	"gitee.com/liu-zhao234568/cntest/common"
    28  	"gitee.com/liu-zhao234568/cntest/consensus/ethash"
    29  	"gitee.com/liu-zhao234568/cntest/core"
    30  	"gitee.com/liu-zhao234568/cntest/core/forkid"
    31  	"gitee.com/liu-zhao234568/cntest/core/rawdb"
    32  	"gitee.com/liu-zhao234568/cntest/core/types"
    33  	"gitee.com/liu-zhao234568/cntest/core/vm"
    34  	"gitee.com/liu-zhao234568/cntest/eth/downloader"
    35  	"gitee.com/liu-zhao234568/cntest/eth/protocols/eth"
    36  	"gitee.com/liu-zhao234568/cntest/event"
    37  	"gitee.com/liu-zhao234568/cntest/p2p"
    38  	"gitee.com/liu-zhao234568/cntest/p2p/enode"
    39  	"gitee.com/liu-zhao234568/cntest/params"
    40  	"gitee.com/liu-zhao234568/cntest/trie"
    41  )
    42  
    43  // testEthHandler is a mock event handler to listen for inbound network requests
    44  // on the `eth` protocol and convert them into a more easily testable form.
    45  type testEthHandler struct {
    46  	blockBroadcasts event.Feed
    47  	txAnnounces     event.Feed
    48  	txBroadcasts    event.Feed
    49  }
    50  
    51  func (h *testEthHandler) Chain() *core.BlockChain              { panic("no backing chain") }
    52  func (h *testEthHandler) StateBloom() *trie.SyncBloom          { panic("no backing state bloom") }
    53  func (h *testEthHandler) TxPool() eth.TxPool                   { panic("no backing tx pool") }
    54  func (h *testEthHandler) AcceptTxs() bool                      { return true }
    55  func (h *testEthHandler) RunPeer(*eth.Peer, eth.Handler) error { panic("not used in tests") }
    56  func (h *testEthHandler) PeerInfo(enode.ID) interface{}        { panic("not used in tests") }
    57  
    58  func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
    59  	switch packet := packet.(type) {
    60  	case *eth.NewBlockPacket:
    61  		h.blockBroadcasts.Send(packet.Block)
    62  		return nil
    63  
    64  	case *eth.NewPooledTransactionHashesPacket:
    65  		h.txAnnounces.Send(([]common.Hash)(*packet))
    66  		return nil
    67  
    68  	case *eth.TransactionsPacket:
    69  		h.txBroadcasts.Send(([]*types.Transaction)(*packet))
    70  		return nil
    71  
    72  	case *eth.PooledTransactionsPacket:
    73  		h.txBroadcasts.Send(([]*types.Transaction)(*packet))
    74  		return nil
    75  
    76  	default:
    77  		panic(fmt.Sprintf("unexpected eth packet type in tests: %T", packet))
    78  	}
    79  }
    80  
    81  // Tests that peers are correctly accepted (or rejected) based on the advertised
    82  // fork IDs in the protocol handshake.
    83  func TestForkIDSplit65(t *testing.T) { testForkIDSplit(t, eth.ETH65) }
    84  func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) }
    85  
    86  func testForkIDSplit(t *testing.T, protocol uint) {
    87  	t.Parallel()
    88  
    89  	var (
    90  		engine = ethash.NewFaker()
    91  
    92  		configNoFork  = &params.ChainConfig{HomesteadBlock: big.NewInt(1)}
    93  		configProFork = &params.ChainConfig{
    94  			HomesteadBlock: big.NewInt(1),
    95  			EIP150Block:    big.NewInt(2),
    96  			EIP155Block:    big.NewInt(2),
    97  			EIP158Block:    big.NewInt(2),
    98  			ByzantiumBlock: big.NewInt(3),
    99  		}
   100  		dbNoFork  = rawdb.NewMemoryDatabase()
   101  		dbProFork = rawdb.NewMemoryDatabase()
   102  
   103  		gspecNoFork  = &core.Genesis{Config: configNoFork}
   104  		gspecProFork = &core.Genesis{Config: configProFork}
   105  
   106  		genesisNoFork  = gspecNoFork.MustCommit(dbNoFork)
   107  		genesisProFork = gspecProFork.MustCommit(dbProFork)
   108  
   109  		chainNoFork, _  = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil, nil)
   110  		chainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{}, nil, nil)
   111  
   112  		blocksNoFork, _  = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil)
   113  		blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil)
   114  
   115  		ethNoFork, _ = newHandler(&handlerConfig{
   116  			Database:   dbNoFork,
   117  			Chain:      chainNoFork,
   118  			TxPool:     newTestTxPool(),
   119  			Network:    1,
   120  			Sync:       downloader.FullSync,
   121  			BloomCache: 1,
   122  		})
   123  		ethProFork, _ = newHandler(&handlerConfig{
   124  			Database:   dbProFork,
   125  			Chain:      chainProFork,
   126  			TxPool:     newTestTxPool(),
   127  			Network:    1,
   128  			Sync:       downloader.FullSync,
   129  			BloomCache: 1,
   130  		})
   131  	)
   132  	ethNoFork.Start(1000)
   133  	ethProFork.Start(1000)
   134  
   135  	// Clean up everything after ourselves
   136  	defer chainNoFork.Stop()
   137  	defer chainProFork.Stop()
   138  
   139  	defer ethNoFork.Stop()
   140  	defer ethProFork.Stop()
   141  
   142  	// Both nodes should allow the other to connect (same genesis, next fork is the same)
   143  	p2pNoFork, p2pProFork := p2p.MsgPipe()
   144  	defer p2pNoFork.Close()
   145  	defer p2pProFork.Close()
   146  
   147  	peerNoFork := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pNoFork), p2pNoFork, nil)
   148  	peerProFork := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pProFork), p2pProFork, nil)
   149  	defer peerNoFork.Close()
   150  	defer peerProFork.Close()
   151  
   152  	errc := make(chan error, 2)
   153  	go func(errc chan error) {
   154  		errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil })
   155  	}(errc)
   156  	go func(errc chan error) {
   157  		errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil })
   158  	}(errc)
   159  
   160  	for i := 0; i < 2; i++ {
   161  		select {
   162  		case err := <-errc:
   163  			if err != nil {
   164  				t.Fatalf("frontier nofork <-> profork failed: %v", err)
   165  			}
   166  		case <-time.After(250 * time.Millisecond):
   167  			t.Fatalf("frontier nofork <-> profork handler timeout")
   168  		}
   169  	}
   170  	// Progress into Homestead. Fork's match, so we don't care what the future holds
   171  	chainNoFork.InsertChain(blocksNoFork[:1])
   172  	chainProFork.InsertChain(blocksProFork[:1])
   173  
   174  	p2pNoFork, p2pProFork = p2p.MsgPipe()
   175  	defer p2pNoFork.Close()
   176  	defer p2pProFork.Close()
   177  
   178  	peerNoFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
   179  	peerProFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
   180  	defer peerNoFork.Close()
   181  	defer peerProFork.Close()
   182  
   183  	errc = make(chan error, 2)
   184  	go func(errc chan error) {
   185  		errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil })
   186  	}(errc)
   187  	go func(errc chan error) {
   188  		errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil })
   189  	}(errc)
   190  
   191  	for i := 0; i < 2; i++ {
   192  		select {
   193  		case err := <-errc:
   194  			if err != nil {
   195  				t.Fatalf("homestead nofork <-> profork failed: %v", err)
   196  			}
   197  		case <-time.After(250 * time.Millisecond):
   198  			t.Fatalf("homestead nofork <-> profork handler timeout")
   199  		}
   200  	}
   201  	// Progress into Spurious. Forks mismatch, signalling differing chains, reject
   202  	chainNoFork.InsertChain(blocksNoFork[1:2])
   203  	chainProFork.InsertChain(blocksProFork[1:2])
   204  
   205  	p2pNoFork, p2pProFork = p2p.MsgPipe()
   206  	defer p2pNoFork.Close()
   207  	defer p2pProFork.Close()
   208  
   209  	peerNoFork = eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pNoFork), p2pNoFork, nil)
   210  	peerProFork = eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pProFork), p2pProFork, nil)
   211  	defer peerNoFork.Close()
   212  	defer peerProFork.Close()
   213  
   214  	errc = make(chan error, 2)
   215  	go func(errc chan error) {
   216  		errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil })
   217  	}(errc)
   218  	go func(errc chan error) {
   219  		errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil })
   220  	}(errc)
   221  
   222  	var successes int
   223  	for i := 0; i < 2; i++ {
   224  		select {
   225  		case err := <-errc:
   226  			if err == nil {
   227  				successes++
   228  				if successes == 2 { // Only one side disconnects
   229  					t.Fatalf("fork ID rejection didn't happen")
   230  				}
   231  			}
   232  		case <-time.After(250 * time.Millisecond):
   233  			t.Fatalf("split peers not rejected")
   234  		}
   235  	}
   236  }
   237  
   238  // Tests that received transactions are added to the local pool.
   239  func TestRecvTransactions65(t *testing.T) { testRecvTransactions(t, eth.ETH65) }
   240  func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) }
   241  
   242  func testRecvTransactions(t *testing.T, protocol uint) {
   243  	t.Parallel()
   244  
   245  	// Create a message handler, configure it to accept transactions and watch them
   246  	handler := newTestHandler()
   247  	defer handler.close()
   248  
   249  	handler.handler.acceptTxs = 1 // mark synced to accept transactions
   250  
   251  	txs := make(chan core.NewTxsEvent)
   252  	sub := handler.txpool.SubscribeNewTxsEvent(txs)
   253  	defer sub.Unsubscribe()
   254  
   255  	// Create a source peer to send messages through and a sink handler to receive them
   256  	p2pSrc, p2pSink := p2p.MsgPipe()
   257  	defer p2pSrc.Close()
   258  	defer p2pSink.Close()
   259  
   260  	src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, handler.txpool)
   261  	sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, handler.txpool)
   262  	defer src.Close()
   263  	defer sink.Close()
   264  
   265  	go handler.handler.runEthPeer(sink, func(peer *eth.Peer) error {
   266  		return eth.Handle((*ethHandler)(handler.handler), peer)
   267  	})
   268  	// Run the handshake locally to avoid spinning up a source handler
   269  	var (
   270  		genesis = handler.chain.Genesis()
   271  		head    = handler.chain.CurrentBlock()
   272  		td      = handler.chain.GetTd(head.Hash(), head.NumberU64())
   273  	)
   274  	if err := src.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {
   275  		t.Fatalf("failed to run protocol handshake")
   276  	}
   277  	// Send the transaction to the sink and verify that it's added to the tx pool
   278  	tx := types.NewTransaction(0, common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil)
   279  	tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
   280  
   281  	if err := src.SendTransactions([]*types.Transaction{tx}); err != nil {
   282  		t.Fatalf("failed to send transaction: %v", err)
   283  	}
   284  	select {
   285  	case event := <-txs:
   286  		if len(event.Txs) != 1 {
   287  			t.Errorf("wrong number of added transactions: got %d, want 1", len(event.Txs))
   288  		} else if event.Txs[0].Hash() != tx.Hash() {
   289  			t.Errorf("added wrong tx hash: got %v, want %v", event.Txs[0].Hash(), tx.Hash())
   290  		}
   291  	case <-time.After(2 * time.Second):
   292  		t.Errorf("no NewTxsEvent received within 2 seconds")
   293  	}
   294  }
   295  
   296  // This test checks that pending transactions are sent.
   297  func TestSendTransactions65(t *testing.T) { testSendTransactions(t, eth.ETH65) }
   298  func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) }
   299  
   300  func testSendTransactions(t *testing.T, protocol uint) {
   301  	t.Parallel()
   302  
   303  	// Create a message handler and fill the pool with big transactions
   304  	handler := newTestHandler()
   305  	defer handler.close()
   306  
   307  	insert := make([]*types.Transaction, 100)
   308  	for nonce := range insert {
   309  		tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), make([]byte, txsyncPackSize/10))
   310  		tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
   311  
   312  		insert[nonce] = tx
   313  	}
   314  	go handler.txpool.AddRemotes(insert) // Need goroutine to not block on feed
   315  	time.Sleep(250 * time.Millisecond)   // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join)
   316  
   317  	// Create a source handler to send messages through and a sink peer to receive them
   318  	p2pSrc, p2pSink := p2p.MsgPipe()
   319  	defer p2pSrc.Close()
   320  	defer p2pSink.Close()
   321  
   322  	src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, handler.txpool)
   323  	sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, handler.txpool)
   324  	defer src.Close()
   325  	defer sink.Close()
   326  
   327  	go handler.handler.runEthPeer(src, func(peer *eth.Peer) error {
   328  		return eth.Handle((*ethHandler)(handler.handler), peer)
   329  	})
   330  	// Run the handshake locally to avoid spinning up a source handler
   331  	var (
   332  		genesis = handler.chain.Genesis()
   333  		head    = handler.chain.CurrentBlock()
   334  		td      = handler.chain.GetTd(head.Hash(), head.NumberU64())
   335  	)
   336  	if err := sink.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {
   337  		t.Fatalf("failed to run protocol handshake")
   338  	}
   339  	// After the handshake completes, the source handler should stream the sink
   340  	// the transactions, subscribe to all inbound network events
   341  	backend := new(testEthHandler)
   342  
   343  	anns := make(chan []common.Hash)
   344  	annSub := backend.txAnnounces.Subscribe(anns)
   345  	defer annSub.Unsubscribe()
   346  
   347  	bcasts := make(chan []*types.Transaction)
   348  	bcastSub := backend.txBroadcasts.Subscribe(bcasts)
   349  	defer bcastSub.Unsubscribe()
   350  
   351  	go eth.Handle(backend, sink)
   352  
   353  	// Make sure we get all the transactions on the correct channels
   354  	seen := make(map[common.Hash]struct{})
   355  	for len(seen) < len(insert) {
   356  		switch protocol {
   357  		case 65, 66:
   358  			select {
   359  			case hashes := <-anns:
   360  				for _, hash := range hashes {
   361  					if _, ok := seen[hash]; ok {
   362  						t.Errorf("duplicate transaction announced: %x", hash)
   363  					}
   364  					seen[hash] = struct{}{}
   365  				}
   366  			case <-bcasts:
   367  				t.Errorf("initial tx broadcast received on post eth/65")
   368  			}
   369  
   370  		default:
   371  			panic("unsupported protocol, please extend test")
   372  		}
   373  	}
   374  	for _, tx := range insert {
   375  		if _, ok := seen[tx.Hash()]; !ok {
   376  			t.Errorf("missing transaction: %x", tx.Hash())
   377  		}
   378  	}
   379  }
   380  
   381  // Tests that transactions get propagated to all attached peers, either via direct
   382  // broadcasts or via announcements/retrievals.
   383  func TestTransactionPropagation65(t *testing.T) { testTransactionPropagation(t, eth.ETH65) }
   384  func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) }
   385  
   386  func testTransactionPropagation(t *testing.T, protocol uint) {
   387  	t.Parallel()
   388  
   389  	// Create a source handler to send transactions from and a number of sinks
   390  	// to receive them. We need multiple sinks since a one-to-one peering would
   391  	// broadcast all transactions without announcement.
   392  	source := newTestHandler()
   393  	defer source.close()
   394  
   395  	sinks := make([]*testHandler, 10)
   396  	for i := 0; i < len(sinks); i++ {
   397  		sinks[i] = newTestHandler()
   398  		defer sinks[i].close()
   399  
   400  		sinks[i].handler.acceptTxs = 1 // mark synced to accept transactions
   401  	}
   402  	// Interconnect all the sink handlers with the source handler
   403  	for i, sink := range sinks {
   404  		sink := sink // Closure for gorotuine below
   405  
   406  		sourcePipe, sinkPipe := p2p.MsgPipe()
   407  		defer sourcePipe.Close()
   408  		defer sinkPipe.Close()
   409  
   410  		sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, source.txpool)
   411  		sinkPeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, sink.txpool)
   412  		defer sourcePeer.Close()
   413  		defer sinkPeer.Close()
   414  
   415  		go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error {
   416  			return eth.Handle((*ethHandler)(source.handler), peer)
   417  		})
   418  		go sink.handler.runEthPeer(sinkPeer, func(peer *eth.Peer) error {
   419  			return eth.Handle((*ethHandler)(sink.handler), peer)
   420  		})
   421  	}
   422  	// Subscribe to all the transaction pools
   423  	txChs := make([]chan core.NewTxsEvent, len(sinks))
   424  	for i := 0; i < len(sinks); i++ {
   425  		txChs[i] = make(chan core.NewTxsEvent, 1024)
   426  
   427  		sub := sinks[i].txpool.SubscribeNewTxsEvent(txChs[i])
   428  		defer sub.Unsubscribe()
   429  	}
   430  	// Fill the source pool with transactions and wait for them at the sinks
   431  	txs := make([]*types.Transaction, 1024)
   432  	for nonce := range txs {
   433  		tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil)
   434  		tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
   435  
   436  		txs[nonce] = tx
   437  	}
   438  	source.txpool.AddRemotes(txs)
   439  
   440  	// Iterate through all the sinks and ensure they all got the transactions
   441  	for i := range sinks {
   442  		for arrived := 0; arrived < len(txs); {
   443  			select {
   444  			case event := <-txChs[i]:
   445  				arrived += len(event.Txs)
   446  			case <-time.NewTimer(time.Second).C:
   447  				t.Errorf("sink %d: transaction propagation timed out: have %d, want %d", i, arrived, len(txs))
   448  			}
   449  		}
   450  	}
   451  }
   452  
   453  // Tests that post eth protocol handshake, clients perform a mutual checkpoint
   454  // challenge to validate each other's chains. Hash mismatches, or missing ones
   455  // during a fast sync should lead to the peer getting dropped.
   456  func TestCheckpointChallenge(t *testing.T) {
   457  	tests := []struct {
   458  		syncmode   downloader.SyncMode
   459  		checkpoint bool
   460  		timeout    bool
   461  		empty      bool
   462  		match      bool
   463  		drop       bool
   464  	}{
   465  		// If checkpointing is not enabled locally, don't challenge and don't drop
   466  		{downloader.FullSync, false, false, false, false, false},
   467  		{downloader.FastSync, false, false, false, false, false},
   468  
   469  		// If checkpointing is enabled locally and remote response is empty, only drop during fast sync
   470  		{downloader.FullSync, true, false, true, false, false},
   471  		{downloader.FastSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer
   472  
   473  		// If checkpointing is enabled locally and remote response mismatches, always drop
   474  		{downloader.FullSync, true, false, false, false, true},
   475  		{downloader.FastSync, true, false, false, false, true},
   476  
   477  		// If checkpointing is enabled locally and remote response matches, never drop
   478  		{downloader.FullSync, true, false, false, true, false},
   479  		{downloader.FastSync, true, false, false, true, false},
   480  
   481  		// If checkpointing is enabled locally and remote times out, always drop
   482  		{downloader.FullSync, true, true, false, true, true},
   483  		{downloader.FastSync, true, true, false, true, true},
   484  	}
   485  	for _, tt := range tests {
   486  		t.Run(fmt.Sprintf("sync %v checkpoint %v timeout %v empty %v match %v", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) {
   487  			testCheckpointChallenge(t, tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match, tt.drop)
   488  		})
   489  	}
   490  }
   491  
   492  func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpoint bool, timeout bool, empty bool, match bool, drop bool) {
   493  	t.Parallel()
   494  
   495  	// Reduce the checkpoint handshake challenge timeout
   496  	defer func(old time.Duration) { syncChallengeTimeout = old }(syncChallengeTimeout)
   497  	syncChallengeTimeout = 250 * time.Millisecond
   498  
   499  	// Create a test handler and inject a CHT into it. The injection is a bit
   500  	// ugly, but it beats creating everything manually just to avoid reaching
   501  	// into the internals a bit.
   502  	handler := newTestHandler()
   503  	defer handler.close()
   504  
   505  	if syncmode == downloader.FastSync {
   506  		atomic.StoreUint32(&handler.handler.fastSync, 1)
   507  	} else {
   508  		atomic.StoreUint32(&handler.handler.fastSync, 0)
   509  	}
   510  	var response *types.Header
   511  	if checkpoint {
   512  		number := (uint64(rand.Intn(500))+1)*params.CHTFrequency - 1
   513  		response = &types.Header{Number: big.NewInt(int64(number)), Extra: []byte("valid")}
   514  
   515  		handler.handler.checkpointNumber = number
   516  		handler.handler.checkpointHash = response.Hash()
   517  	}
   518  
   519  	// Create a challenger peer and a challenged one.
   520  	p2pLocal, p2pRemote := p2p.MsgPipe()
   521  	defer p2pLocal.Close()
   522  	defer p2pRemote.Close()
   523  
   524  	local := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pLocal), p2pLocal, handler.txpool)
   525  	remote := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pRemote), p2pRemote, handler.txpool)
   526  	defer local.Close()
   527  	defer remote.Close()
   528  
   529  	handlerDone := make(chan struct{})
   530  	go func() {
   531  		defer close(handlerDone)
   532  		handler.handler.runEthPeer(local, func(peer *eth.Peer) error {
   533  			return eth.Handle((*ethHandler)(handler.handler), peer)
   534  		})
   535  	}()
   536  
   537  	// Run the handshake locally to avoid spinning up a remote handler.
   538  	var (
   539  		genesis = handler.chain.Genesis()
   540  		head    = handler.chain.CurrentBlock()
   541  		td      = handler.chain.GetTd(head.Hash(), head.NumberU64())
   542  	)
   543  	if err := remote.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {
   544  		t.Fatalf("failed to run protocol handshake")
   545  	}
   546  
   547  	// Connect a new peer and check that we receive the checkpoint challenge.
   548  	if checkpoint {
   549  		if err := remote.ExpectRequestHeadersByNumber(response.Number.Uint64(), 1, 0, false); err != nil {
   550  			t.Fatalf("challenge mismatch: %v", err)
   551  		}
   552  		// Create a block to reply to the challenge if no timeout is simulated.
   553  		if !timeout {
   554  			if empty {
   555  				if err := remote.SendBlockHeaders([]*types.Header{}); err != nil {
   556  					t.Fatalf("failed to answer challenge: %v", err)
   557  				}
   558  			} else if match {
   559  				if err := remote.SendBlockHeaders([]*types.Header{response}); err != nil {
   560  					t.Fatalf("failed to answer challenge: %v", err)
   561  				}
   562  			} else {
   563  				if err := remote.SendBlockHeaders([]*types.Header{{Number: response.Number}}); err != nil {
   564  					t.Fatalf("failed to answer challenge: %v", err)
   565  				}
   566  			}
   567  		}
   568  	}
   569  
   570  	// Wait until the test timeout passes to ensure proper cleanup
   571  	time.Sleep(syncChallengeTimeout + 300*time.Millisecond)
   572  
   573  	// Verify that the remote peer is maintained or dropped.
   574  	if drop {
   575  		<-handlerDone
   576  		if peers := handler.handler.peers.len(); peers != 0 {
   577  			t.Fatalf("peer count mismatch: have %d, want %d", peers, 0)
   578  		}
   579  	} else {
   580  		if peers := handler.handler.peers.len(); peers != 1 {
   581  			t.Fatalf("peer count mismatch: have %d, want %d", peers, 1)
   582  		}
   583  	}
   584  }
   585  
   586  // Tests that blocks are broadcast to a sqrt number of peers only.
   587  func TestBroadcastBlock1Peer(t *testing.T)    { testBroadcastBlock(t, 1, 1) }
   588  func TestBroadcastBlock2Peers(t *testing.T)   { testBroadcastBlock(t, 2, 1) }
   589  func TestBroadcastBlock3Peers(t *testing.T)   { testBroadcastBlock(t, 3, 1) }
   590  func TestBroadcastBlock4Peers(t *testing.T)   { testBroadcastBlock(t, 4, 2) }
   591  func TestBroadcastBlock5Peers(t *testing.T)   { testBroadcastBlock(t, 5, 2) }
   592  func TestBroadcastBlock8Peers(t *testing.T)   { testBroadcastBlock(t, 9, 3) }
   593  func TestBroadcastBlock12Peers(t *testing.T)  { testBroadcastBlock(t, 12, 3) }
   594  func TestBroadcastBlock16Peers(t *testing.T)  { testBroadcastBlock(t, 16, 4) }
   595  func TestBroadcastBloc26Peers(t *testing.T)   { testBroadcastBlock(t, 26, 5) }
   596  func TestBroadcastBlock100Peers(t *testing.T) { testBroadcastBlock(t, 100, 10) }
   597  
   598  func testBroadcastBlock(t *testing.T, peers, bcasts int) {
   599  	t.Parallel()
   600  
   601  	// Create a source handler to broadcast blocks from and a number of sinks
   602  	// to receive them.
   603  	source := newTestHandlerWithBlocks(1)
   604  	defer source.close()
   605  
   606  	sinks := make([]*testEthHandler, peers)
   607  	for i := 0; i < len(sinks); i++ {
   608  		sinks[i] = new(testEthHandler)
   609  	}
   610  	// Interconnect all the sink handlers with the source handler
   611  	var (
   612  		genesis = source.chain.Genesis()
   613  		td      = source.chain.GetTd(genesis.Hash(), genesis.NumberU64())
   614  	)
   615  	for i, sink := range sinks {
   616  		sink := sink // Closure for gorotuine below
   617  
   618  		sourcePipe, sinkPipe := p2p.MsgPipe()
   619  		defer sourcePipe.Close()
   620  		defer sinkPipe.Close()
   621  
   622  		sourcePeer := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
   623  		sinkPeer := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
   624  		defer sourcePeer.Close()
   625  		defer sinkPeer.Close()
   626  
   627  		go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error {
   628  			return eth.Handle((*ethHandler)(source.handler), peer)
   629  		})
   630  		if err := sinkPeer.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain)); err != nil {
   631  			t.Fatalf("failed to run protocol handshake")
   632  		}
   633  		go eth.Handle(sink, sinkPeer)
   634  	}
   635  	// Subscribe to all the transaction pools
   636  	blockChs := make([]chan *types.Block, len(sinks))
   637  	for i := 0; i < len(sinks); i++ {
   638  		blockChs[i] = make(chan *types.Block, 1)
   639  		defer close(blockChs[i])
   640  
   641  		sub := sinks[i].blockBroadcasts.Subscribe(blockChs[i])
   642  		defer sub.Unsubscribe()
   643  	}
   644  	// Initiate a block propagation across the peers
   645  	time.Sleep(100 * time.Millisecond)
   646  	source.handler.BroadcastBlock(source.chain.CurrentBlock(), true)
   647  
   648  	// Iterate through all the sinks and ensure the correct number got the block
   649  	done := make(chan struct{}, peers)
   650  	for _, ch := range blockChs {
   651  		ch := ch
   652  		go func() {
   653  			<-ch
   654  			done <- struct{}{}
   655  		}()
   656  	}
   657  	var received int
   658  	for {
   659  		select {
   660  		case <-done:
   661  			received++
   662  
   663  		case <-time.After(100 * time.Millisecond):
   664  			if received != bcasts {
   665  				t.Errorf("broadcast count mismatch: have %d, want %d", received, bcasts)
   666  			}
   667  			return
   668  		}
   669  	}
   670  }
   671  
   672  // Tests that a propagated malformed block (uncles or transactions don't match
   673  // with the hashes in the header) gets discarded and not broadcast forward.
   674  func TestBroadcastMalformedBlock65(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH65) }
   675  func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) }
   676  
   677  func testBroadcastMalformedBlock(t *testing.T, protocol uint) {
   678  	t.Parallel()
   679  
   680  	// Create a source handler to broadcast blocks from and a number of sinks
   681  	// to receive them.
   682  	source := newTestHandlerWithBlocks(1)
   683  	defer source.close()
   684  
   685  	// Create a source handler to send messages through and a sink peer to receive them
   686  	p2pSrc, p2pSink := p2p.MsgPipe()
   687  	defer p2pSrc.Close()
   688  	defer p2pSink.Close()
   689  
   690  	src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, source.txpool)
   691  	sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, source.txpool)
   692  	defer src.Close()
   693  	defer sink.Close()
   694  
   695  	go source.handler.runEthPeer(src, func(peer *eth.Peer) error {
   696  		return eth.Handle((*ethHandler)(source.handler), peer)
   697  	})
   698  	// Run the handshake locally to avoid spinning up a sink handler
   699  	var (
   700  		genesis = source.chain.Genesis()
   701  		td      = source.chain.GetTd(genesis.Hash(), genesis.NumberU64())
   702  	)
   703  	if err := sink.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain)); err != nil {
   704  		t.Fatalf("failed to run protocol handshake")
   705  	}
   706  	// After the handshake completes, the source handler should stream the sink
   707  	// the blocks, subscribe to inbound network events
   708  	backend := new(testEthHandler)
   709  
   710  	blocks := make(chan *types.Block, 1)
   711  	sub := backend.blockBroadcasts.Subscribe(blocks)
   712  	defer sub.Unsubscribe()
   713  
   714  	go eth.Handle(backend, sink)
   715  
   716  	// Create various combinations of malformed blocks
   717  	head := source.chain.CurrentBlock()
   718  
   719  	malformedUncles := head.Header()
   720  	malformedUncles.UncleHash[0]++
   721  	malformedTransactions := head.Header()
   722  	malformedTransactions.TxHash[0]++
   723  	malformedEverything := head.Header()
   724  	malformedEverything.UncleHash[0]++
   725  	malformedEverything.TxHash[0]++
   726  
   727  	// Try to broadcast all malformations and ensure they all get discarded
   728  	for _, header := range []*types.Header{malformedUncles, malformedTransactions, malformedEverything} {
   729  		block := types.NewBlockWithHeader(header).WithBody(head.Transactions(), head.Uncles())
   730  		if err := src.SendNewBlock(block, big.NewInt(131136)); err != nil {
   731  			t.Fatalf("failed to broadcast block: %v", err)
   732  		}
   733  		select {
   734  		case <-blocks:
   735  			t.Fatalf("malformed block forwarded")
   736  		case <-time.After(100 * time.Millisecond):
   737  		}
   738  	}
   739  }