github.com/dominant-strategies/go-quai@v0.28.2/eth/handler_eth_test.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package eth
    18  
    19  import (
    20  	"fmt"
    21  	"math/big"
    22  	"math/rand"
    23  	"sync/atomic"
    24  	"testing"
    25  	"time"
    26  
    27  	"github.com/dominant-strategies/go-quai/common"
    28  	"github.com/dominant-strategies/go-quai/consensus/blake3pow"
    29  	"github.com/dominant-strategies/go-quai/core"
    30  	"github.com/dominant-strategies/go-quai/core/forkid"
    31  	"github.com/dominant-strategies/go-quai/core/rawdb"
    32  	"github.com/dominant-strategies/go-quai/core/types"
    33  	"github.com/dominant-strategies/go-quai/core/vm"
    34  	"github.com/dominant-strategies/go-quai/eth/downloader"
    35  	"github.com/dominant-strategies/go-quai/eth/protocols/eth"
    36  	"github.com/dominant-strategies/go-quai/event"
    37  	"github.com/dominant-strategies/go-quai/p2p"
    38  	"github.com/dominant-strategies/go-quai/p2p/enode"
    39  	"github.com/dominant-strategies/go-quai/params"
    40  	"github.com/dominant-strategies/go-quai/trie"
    41  )
    42  
    43  // testEthHandler is a mock event handler to listen for inbound network requests
    44  // on the `eth` protocol and convert them into a more easily testable form.
    45  type testEthHandler struct {
    46  	blockBroadcasts event.Feed
    47  	txAnnounces     event.Feed
    48  	txBroadcasts    event.Feed
    49  }
    50  
    51  func (h *testEthHandler) Chain() *core.BlockChain              { panic("no backing chain") }
    52  func (h *testEthHandler) StateBloom() *trie.SyncBloom          { panic("no backing state bloom") }
    53  func (h *testEthHandler) TxPool() eth.TxPool                   { panic("no backing tx pool") }
    54  func (h *testEthHandler) AcceptTxs() bool                      { return true }
    55  func (h *testEthHandler) RunPeer(*eth.Peer, eth.Handler) error { panic("not used in tests") }
    56  func (h *testEthHandler) PeerInfo(enode.ID) interface{}        { panic("not used in tests") }
    57  
    58  func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
    59  	switch packet := packet.(type) {
    60  	case *eth.NewBlockPacket:
    61  		h.blockBroadcasts.Send(packet.Block)
    62  		return nil
    63  
    64  	case *eth.NewPooledTransactionHashesPacket:
    65  		h.txAnnounces.Send(([]common.Hash)(*packet))
    66  		return nil
    67  
    68  	case *eth.TransactionsPacket:
    69  		h.txBroadcasts.Send(([]*types.Transaction)(*packet))
    70  		return nil
    71  
    72  	case *eth.PooledTransactionsPacket:
    73  		h.txBroadcasts.Send(([]*types.Transaction)(*packet))
    74  		return nil
    75  
    76  	default:
    77  		panic(fmt.Sprintf("unexpected eth packet type in tests: %T", packet))
    78  	}
    79  }
    80  
    81  // Tests that peers are correctly accepted (or rejected) based on the advertised
    82  // fork IDs in the protocol handshake.
    83  func TestForkIDSplit65(t *testing.T) { testForkIDSplit(t, eth.ETH65) }
    84  func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) }
    85  
    86  func testForkIDSplit(t *testing.T, protocol uint) {
    87  	t.Parallel()
    88  
    89  	var (
    90  		engine = blake3pow.NewFaker()
    91  
    92  		configNoFork  = &params.ChainConfig{}
    93  		configProFork = &params.ChainConfig{}
    94  		dbNoFork      = rawdb.NewMemoryDatabase()
    95  		dbProFork     = rawdb.NewMemoryDatabase()
    96  
    97  		gspecNoFork  = &core.Genesis{Config: configNoFork}
    98  		gspecProFork = &core.Genesis{Config: configProFork}
    99  
   100  		genesisNoFork  = gspecNoFork.MustCommit(dbNoFork)
   101  		genesisProFork = gspecProFork.MustCommit(dbProFork)
   102  
   103  		chainNoFork, _  = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil, nil)
   104  		chainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{}, nil, nil)
   105  
   106  		blocksNoFork, _  = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil)
   107  		blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil)
   108  
   109  		ethNoFork, _ = newHandler(&handlerConfig{
   110  			Database:   dbNoFork,
   111  			Chain:      chainNoFork,
   112  			TxPool:     newTestTxPool(),
   113  			Network:    1,
   114  			Sync:       downloader.FullSync,
   115  			BloomCache: 1,
   116  		})
   117  		ethProFork, _ = newHandler(&handlerConfig{
   118  			Database:   dbProFork,
   119  			Chain:      chainProFork,
   120  			TxPool:     newTestTxPool(),
   121  			Network:    1,
   122  			Sync:       downloader.FullSync,
   123  			BloomCache: 1,
   124  		})
   125  	)
   126  	ethNoFork.Start(1000)
   127  	ethProFork.Start(1000)
   128  
   129  	// Clean up everything after ourselves
   130  	defer chainNoFork.Stop()
   131  	defer chainProFork.Stop()
   132  
   133  	defer ethNoFork.Stop()
   134  	defer ethProFork.Stop()
   135  
   136  	// Both nodes should allow the other to connect (same genesis, next fork is the same)
   137  	p2pNoFork, p2pProFork := p2p.MsgPipe()
   138  	defer p2pNoFork.Close()
   139  	defer p2pProFork.Close()
   140  
   141  	peerNoFork := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pNoFork), p2pNoFork, nil)
   142  	peerProFork := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pProFork), p2pProFork, nil)
   143  	defer peerNoFork.Close()
   144  	defer peerProFork.Close()
   145  
   146  	errc := make(chan error, 2)
   147  	go func(errc chan error) {
   148  		errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil })
   149  	}(errc)
   150  	go func(errc chan error) {
   151  		errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil })
   152  	}(errc)
   153  
   154  	for i := 0; i < 2; i++ {
   155  		select {
   156  		case err := <-errc:
   157  			if err != nil {
   158  				t.Fatalf(" nofork <-> profork failed: %v", err)
   159  			}
   160  		case <-time.After(250 * time.Millisecond):
   161  			t.Fatalf(" nofork <-> profork handler timeout")
   162  		}
   163  	}
   164  
   165  	chainNoFork.InsertChain(blocksNoFork[:1])
   166  	chainProFork.InsertChain(blocksProFork[:1])
   167  
   168  	p2pNoFork, p2pProFork = p2p.MsgPipe()
   169  	defer p2pNoFork.Close()
   170  	defer p2pProFork.Close()
   171  
   172  	peerNoFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
   173  	peerProFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
   174  	defer peerNoFork.Close()
   175  	defer peerProFork.Close()
   176  
   177  	errc = make(chan error, 2)
   178  	go func(errc chan error) {
   179  		errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil })
   180  	}(errc)
   181  	go func(errc chan error) {
   182  		errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil })
   183  	}(errc)
   184  
   185  	for i := 0; i < 2; i++ {
   186  		select {
   187  		case err := <-errc:
   188  			if err != nil {
   189  				t.Fatalf("nofork <-> profork failed: %v", err)
   190  			}
   191  		case <-time.After(250 * time.Millisecond):
   192  			t.Fatalf("nofork <-> profork handler timeout")
   193  		}
   194  	}
   195  	// Progress into _. Forks mismatch, signalling differing chains, reject
   196  	chainNoFork.InsertChain(blocksNoFork[1:2])
   197  	chainProFork.InsertChain(blocksProFork[1:2])
   198  
   199  	p2pNoFork, p2pProFork = p2p.MsgPipe()
   200  	defer p2pNoFork.Close()
   201  	defer p2pProFork.Close()
   202  
   203  	peerNoFork = eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pNoFork), p2pNoFork, nil)
   204  	peerProFork = eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pProFork), p2pProFork, nil)
   205  	defer peerNoFork.Close()
   206  	defer peerProFork.Close()
   207  
   208  	errc = make(chan error, 2)
   209  	go func(errc chan error) {
   210  		errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil })
   211  	}(errc)
   212  	go func(errc chan error) {
   213  		errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil })
   214  	}(errc)
   215  
   216  	var successes int
   217  	for i := 0; i < 2; i++ {
   218  		select {
   219  		case err := <-errc:
   220  			if err == nil {
   221  				successes++
   222  				if successes == 2 { // Only one side disconnects
   223  					t.Fatalf("fork ID rejection didn't happen")
   224  				}
   225  			}
   226  		case <-time.After(250 * time.Millisecond):
   227  			t.Fatalf("split peers not rejected")
   228  		}
   229  	}
   230  }
   231  
   232  // Tests that received transactions are added to the local pool.
   233  func TestRecvTransactions65(t *testing.T) { testRecvTransactions(t, eth.ETH65) }
   234  func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) }
   235  
   236  func testRecvTransactions(t *testing.T, protocol uint) {
   237  	t.Parallel()
   238  
   239  	// Create a message handler, configure it to accept transactions and watch them
   240  	handler := newTestHandler()
   241  	defer handler.close()
   242  
   243  	handler.handler.acceptTxs = 1 // mark synced to accept transactions
   244  
   245  	txs := make(chan core.NewTxsEvent)
   246  	sub := handler.txpool.SubscribeNewTxsEvent(txs)
   247  	defer sub.Unsubscribe()
   248  
   249  	// Create a source peer to send messages through and a sink handler to receive them
   250  	p2pSrc, p2pSink := p2p.MsgPipe()
   251  	defer p2pSrc.Close()
   252  	defer p2pSink.Close()
   253  
   254  	src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, handler.txpool)
   255  	sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, handler.txpool)
   256  	defer src.Close()
   257  	defer sink.Close()
   258  
   259  	go handler.handler.runEthPeer(sink, func(peer *eth.Peer) error {
   260  		return eth.Handle((*ethHandler)(handler.handler), peer)
   261  	})
   262  	// Run the handshake locally to avoid spinning up a source handler
   263  	var (
   264  		genesis = handler.chain.Genesis()
   265  		head    = handler.chain.CurrentBlock()
   266  		td      = handler.chain.GetTd(head.Hash(), head.NumberU64())
   267  	)
   268  	if err := src.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {
   269  		t.Fatalf("failed to run protocol handshake")
   270  	}
   271  	// Send the transaction to the sink and verify that it's added to the tx pool
   272  	tx := types.NewTransaction(0, common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil)
   273  	tx, _ = types.SignTx(tx, types.LatestSigner(params.RopstenChainConfig), testKey)
   274  
   275  	if err := src.SendTransactions([]*types.Transaction{tx}); err != nil {
   276  		t.Fatalf("failed to send transaction: %v", err)
   277  	}
   278  	select {
   279  	case event := <-txs:
   280  		if len(event.Txs) != 1 {
   281  			t.Errorf("wrong number of added transactions: got %d, want 1", len(event.Txs))
   282  		} else if event.Txs[0].Hash() != tx.Hash() {
   283  			t.Errorf("added wrong tx hash: got %v, want %v", event.Txs[0].Hash(), tx.Hash())
   284  		}
   285  	case <-time.After(2 * time.Second):
   286  		t.Errorf("no NewTxsEvent received within 2 seconds")
   287  	}
   288  }
   289  
   290  // This test checks that pending transactions are sent.
   291  func TestSendTransactions65(t *testing.T) { testSendTransactions(t, eth.ETH65) }
   292  func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) }
   293  
   294  func testSendTransactions(t *testing.T, protocol uint) {
   295  	t.Parallel()
   296  
   297  	// Create a message handler and fill the pool with big transactions
   298  	handler := newTestHandler()
   299  	defer handler.close()
   300  
   301  	insert := make([]*types.Transaction, 100)
   302  	for nonce := range insert {
   303  		tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), make([]byte, txsyncPackSize/10))
   304  		tx, _ = types.SignTx(tx, types.LatestSigner(params.RopstenChainConfig), testKey)
   305  
   306  		insert[nonce] = tx
   307  	}
   308  	go handler.txpool.AddRemotes(insert) // Need goroutine to not block on feed
   309  	time.Sleep(250 * time.Millisecond)   // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join)
   310  
   311  	// Create a source handler to send messages through and a sink peer to receive them
   312  	p2pSrc, p2pSink := p2p.MsgPipe()
   313  	defer p2pSrc.Close()
   314  	defer p2pSink.Close()
   315  
   316  	src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, handler.txpool)
   317  	sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, handler.txpool)
   318  	defer src.Close()
   319  	defer sink.Close()
   320  
   321  	go handler.handler.runEthPeer(src, func(peer *eth.Peer) error {
   322  		return eth.Handle((*ethHandler)(handler.handler), peer)
   323  	})
   324  	// Run the handshake locally to avoid spinning up a source handler
   325  	var (
   326  		genesis = handler.chain.Genesis()
   327  		head    = handler.chain.CurrentBlock()
   328  		td      = handler.chain.GetTd(head.Hash(), head.NumberU64())
   329  	)
   330  	if err := sink.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {
   331  		t.Fatalf("failed to run protocol handshake")
   332  	}
   333  	// After the handshake completes, the source handler should stream the sink
   334  	// the transactions, subscribe to all inbound network events
   335  	backend := new(testEthHandler)
   336  
   337  	anns := make(chan []common.Hash)
   338  	annSub := backend.txAnnounces.Subscribe(anns)
   339  	defer annSub.Unsubscribe()
   340  
   341  	bcasts := make(chan []*types.Transaction)
   342  	bcastSub := backend.txBroadcasts.Subscribe(bcasts)
   343  	defer bcastSub.Unsubscribe()
   344  
   345  	go eth.Handle(backend, sink)
   346  
   347  	// Make sure we get all the transactions on the correct channels
   348  	seen := make(map[common.Hash]struct{})
   349  	for len(seen) < len(insert) {
   350  		switch protocol {
   351  		case 65, 66:
   352  			select {
   353  			case hashes := <-anns:
   354  				for _, hash := range hashes {
   355  					if _, ok := seen[hash]; ok {
   356  						t.Errorf("duplicate transaction announced: %x", hash)
   357  					}
   358  					seen[hash] = struct{}{}
   359  				}
   360  			case <-bcasts:
   361  				t.Errorf("initial tx broadcast received on post eth/65")
   362  			}
   363  
   364  		default:
   365  			panic("unsupported protocol, please extend test")
   366  		}
   367  	}
   368  	for _, tx := range insert {
   369  		if _, ok := seen[tx.Hash()]; !ok {
   370  			t.Errorf("missing transaction: %x", tx.Hash())
   371  		}
   372  	}
   373  }
   374  
   375  // Tests that transactions get propagated to all attached peers, either via direct
   376  // broadcasts or via announcements/retrievals.
   377  func TestTransactionPropagation65(t *testing.T) { testTransactionPropagation(t, eth.ETH65) }
   378  func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) }
   379  
   380  func testTransactionPropagation(t *testing.T, protocol uint) {
   381  	t.Parallel()
   382  
   383  	// Create a source handler to send transactions from and a number of sinks
   384  	// to receive them. We need multiple sinks since a one-to-one peering would
   385  	// broadcast all transactions without announcement.
   386  	source := newTestHandler()
   387  	defer source.close()
   388  
   389  	sinks := make([]*testHandler, 10)
   390  	for i := 0; i < len(sinks); i++ {
   391  		sinks[i] = newTestHandler()
   392  		defer sinks[i].close()
   393  
   394  		sinks[i].handler.acceptTxs = 1 // mark synced to accept transactions
   395  	}
   396  	// Interconnect all the sink handlers with the source handler
   397  	for i, sink := range sinks {
   398  		sink := sink // Closure for gorotuine below
   399  
   400  		sourcePipe, sinkPipe := p2p.MsgPipe()
   401  		defer sourcePipe.Close()
   402  		defer sinkPipe.Close()
   403  
   404  		sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, source.txpool)
   405  		sinkPeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, sink.txpool)
   406  		defer sourcePeer.Close()
   407  		defer sinkPeer.Close()
   408  
   409  		go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error {
   410  			return eth.Handle((*ethHandler)(source.handler), peer)
   411  		})
   412  		go sink.handler.runEthPeer(sinkPeer, func(peer *eth.Peer) error {
   413  			return eth.Handle((*ethHandler)(sink.handler), peer)
   414  		})
   415  	}
   416  	// Subscribe to all the transaction pools
   417  	txChs := make([]chan core.NewTxsEvent, len(sinks))
   418  	for i := 0; i < len(sinks); i++ {
   419  		txChs[i] = make(chan core.NewTxsEvent, 1024)
   420  
   421  		sub := sinks[i].txpool.SubscribeNewTxsEvent(txChs[i])
   422  		defer sub.Unsubscribe()
   423  	}
   424  	// Fill the source pool with transactions and wait for them at the sinks
   425  	txs := make([]*types.Transactions, 1024)
   426  	for nonce := range txs {
   427  		tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil)
   428  		tx, _ = types.SignTx(tx, types.LatestSigner(params.RopstenChainConfig), testKey)
   429  
   430  		txs[nonce] = tx
   431  	}
   432  	source.txpool.AddRemotes(txs)
   433  
   434  	// Iterate through all the sinks and ensure they all got the transactions
   435  	for i := range sinks {
   436  		for arrived := 0; arrived < len(txs); {
   437  			select {
   438  			case event := <-txChs[i]:
   439  				arrived += len(event.Txs)
   440  			case <-time.NewTimer(time.Second).C:
   441  				t.Errorf("sink %d: transaction propagation timed out: have %d, want %d", i, arrived, len(txs))
   442  			}
   443  		}
   444  	}
   445  }
   446  
   447  // Tests that post eth protocol handshake, clients perform a mutual checkpoint
   448  // challenge to validate each other's chains. Hash mismatches, or missing ones
   449  // during a fast sync should lead to the peer getting dropped.
   450  func TestCheckpointChallenge(t *testing.T) {
   451  	tests := []struct {
   452  		syncmode   downloader.SyncMode
   453  		checkpoint bool
   454  		timeout    bool
   455  		empty      bool
   456  		match      bool
   457  		drop       bool
   458  	}{
   459  		// If checkpointing is not enabled locally, don't challenge and don't drop
   460  		{downloader.FullSync, false, false, false, false, false},
   461  		{downloader.FastSync, false, false, false, false, false},
   462  
   463  		// If checkpointing is enabled locally and remote response is empty, only drop during fast sync
   464  		{downloader.FullSync, true, false, true, false, false},
   465  		{downloader.FastSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer
   466  
   467  		// If checkpointing is enabled locally and remote response mismatches, always drop
   468  		{downloader.FullSync, true, false, false, false, true},
   469  		{downloader.FastSync, true, false, false, false, true},
   470  
   471  		// If checkpointing is enabled locally and remote response matches, never drop
   472  		{downloader.FullSync, true, false, false, true, false},
   473  		{downloader.FastSync, true, false, false, true, false},
   474  
   475  		// If checkpointing is enabled locally and remote times out, always drop
   476  		{downloader.FullSync, true, true, false, true, true},
   477  		{downloader.FastSync, true, true, false, true, true},
   478  	}
   479  	for _, tt := range tests {
   480  		t.Run(fmt.Sprintf("sync %v checkpoint %v timeout %v empty %v match %v", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) {
   481  			testCheckpointChallenge(t, tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match, tt.drop)
   482  		})
   483  	}
   484  }
   485  
   486  func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpoint bool, timeout bool, empty bool, match bool, drop bool) {
   487  	t.Parallel()
   488  
   489  	// Reduce the checkpoint handshake challenge timeout
   490  	defer func(old time.Duration) { syncChallengeTimeout = old }(syncChallengeTimeout)
   491  	syncChallengeTimeout = 250 * time.Millisecond
   492  
   493  	// Create a test handler and inject a CHT into it. The injection is a bit
   494  	// ugly, but it beats creating everything manually just to avoid reaching
   495  	// into the internals a bit.
   496  	handler := newTestHandler()
   497  	defer handler.close()
   498  
   499  	if syncmode == downloader.FastSync {
   500  		atomic.StoreUint32(&handler.handler.fastSync, 1)
   501  	} else {
   502  		atomic.StoreUint32(&handler.handler.fastSync, 0)
   503  	}
   504  	var response *types.Header
   505  	if checkpoint {
   506  		number := (uint64(rand.Intn(500))+1)*params.CHTFrequency - 1
   507  		response = &types.Header{Number: big.NewInt(int64(number)), Extra: []byte("valid")}
   508  
   509  		handler.handler.checkpointNumber = number
   510  		handler.handler.checkpointHash = response.Hash()
   511  	}
   512  
   513  	// Create a challenger peer and a challenged one.
   514  	p2pLocal, p2pRemote := p2p.MsgPipe()
   515  	defer p2pLocal.Close()
   516  	defer p2pRemote.Close()
   517  
   518  	local := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pLocal), p2pLocal, handler.txpool)
   519  	remote := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pRemote), p2pRemote, handler.txpool)
   520  	defer local.Close()
   521  	defer remote.Close()
   522  
   523  	handlerDone := make(chan struct{})
   524  	go func() {
   525  		defer close(handlerDone)
   526  		handler.handler.runEthPeer(local, func(peer *eth.Peer) error {
   527  			return eth.Handle((*ethHandler)(handler.handler), peer)
   528  		})
   529  	}()
   530  
   531  	// Run the handshake locally to avoid spinning up a remote handler.
   532  	var (
   533  		genesis = handler.chain.Genesis()
   534  		head    = handler.chain.CurrentBlock()
   535  		td      = handler.chain.GetTd(head.Hash(), head.NumberU64())
   536  	)
   537  	if err := remote.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {
   538  		t.Fatalf("failed to run protocol handshake")
   539  	}
   540  
   541  	// Connect a new peer and check that we receive the checkpoint challenge.
   542  	if checkpoint {
   543  		if err := remote.ExpectRequestHeadersByNumber(response.Number().Uint64(), 1, 0, false); err != nil {
   544  			t.Fatalf("challenge mismatch: %v", err)
   545  		}
   546  		// Create a block to reply to the challenge if no timeout is simulated.
   547  		if !timeout {
   548  			if empty {
   549  				if err := remote.SendBlockHeaders([]*types.Header{}); err != nil {
   550  					t.Fatalf("failed to answer challenge: %v", err)
   551  				}
   552  			} else if match {
   553  				if err := remote.SendBlockHeaders([]*types.Header{response}); err != nil {
   554  					t.Fatalf("failed to answer challenge: %v", err)
   555  				}
   556  			} else {
   557  				if err := remote.SendBlockHeaders([]*types.Header{{Number: response.Number}}); err != nil {
   558  					t.Fatalf("failed to answer challenge: %v", err)
   559  				}
   560  			}
   561  		}
   562  	}
   563  
   564  	// Wait until the test timeout passes to ensure proper cleanup
   565  	time.Sleep(syncChallengeTimeout + 300*time.Millisecond)
   566  
   567  	// Verify that the remote peer is maintained or dropped.
   568  	if drop {
   569  		<-handlerDone
   570  		if peers := handler.handler.peers.len(); peers != 0 {
   571  			t.Fatalf("peer count mismatch: have %d, want %d", peers, 0)
   572  		}
   573  	} else {
   574  		if peers := handler.handler.peers.len(); peers != 1 {
   575  			t.Fatalf("peer count mismatch: have %d, want %d", peers, 1)
   576  		}
   577  	}
   578  }
   579  
   580  // Tests that blocks are broadcast to a sqrt number of peers only.
   581  func TestBroadcastBlock1Peer(t *testing.T)    { testBroadcastBlock(t, 1, 1) }
   582  func TestBroadcastBlock2Peers(t *testing.T)   { testBroadcastBlock(t, 2, 1) }
   583  func TestBroadcastBlock3Peers(t *testing.T)   { testBroadcastBlock(t, 3, 1) }
   584  func TestBroadcastBlock4Peers(t *testing.T)   { testBroadcastBlock(t, 4, 2) }
   585  func TestBroadcastBlock5Peers(t *testing.T)   { testBroadcastBlock(t, 5, 2) }
   586  func TestBroadcastBlock8Peers(t *testing.T)   { testBroadcastBlock(t, 9, 3) }
   587  func TestBroadcastBlock12Peers(t *testing.T)  { testBroadcastBlock(t, 12, 3) }
   588  func TestBroadcastBlock16Peers(t *testing.T)  { testBroadcastBlock(t, 16, 4) }
   589  func TestBroadcastBloc26Peers(t *testing.T)   { testBroadcastBlock(t, 26, 5) }
   590  func TestBroadcastBlock100Peers(t *testing.T) { testBroadcastBlock(t, 100, 10) }
   591  
   592  func testBroadcastBlock(t *testing.T, peers, bcasts int) {
   593  	t.Parallel()
   594  
   595  	// Create a source handler to broadcast blocks from and a number of sinks
   596  	// to receive them.
   597  	source := newTestHandlerWithBlocks(1)
   598  	defer source.close()
   599  
   600  	sinks := make([]*testEthHandler, peers)
   601  	for i := 0; i < len(sinks); i++ {
   602  		sinks[i] = new(testEthHandler)
   603  	}
   604  	// Interconnect all the sink handlers with the source handler
   605  	var (
   606  		genesis = source.chain.Genesis()
   607  		td      = source.chain.GetTd(genesis.Hash(), genesis.NumberU64())
   608  	)
   609  	for i, sink := range sinks {
   610  		sink := sink // Closure for gorotuine below
   611  
   612  		sourcePipe, sinkPipe := p2p.MsgPipe()
   613  		defer sourcePipe.Close()
   614  		defer sinkPipe.Close()
   615  
   616  		sourcePeer := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
   617  		sinkPeer := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
   618  		defer sourcePeer.Close()
   619  		defer sinkPeer.Close()
   620  
   621  		go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error {
   622  			return eth.Handle((*ethHandler)(source.handler), peer)
   623  		})
   624  		if err := sinkPeer.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain)); err != nil {
   625  			t.Fatalf("failed to run protocol handshake")
   626  		}
   627  		go eth.Handle(sink, sinkPeer)
   628  	}
   629  	// Subscribe to all the transaction pools
   630  	blockChs := make([]chan *types.Block, len(sinks))
   631  	for i := 0; i < len(sinks); i++ {
   632  		blockChs[i] = make(chan *types.Block, 1)
   633  		defer close(blockChs[i])
   634  
   635  		sub := sinks[i].blockBroadcasts.Subscribe(blockChs[i])
   636  		defer sub.Unsubscribe()
   637  	}
   638  	// Initiate a block propagation across the peers
   639  	time.Sleep(100 * time.Millisecond)
   640  	source.handler.BroadcastBlock(source.chain.CurrentBlock(), true)
   641  
   642  	// Iterate through all the sinks and ensure the correct number got the block
   643  	done := make(chan struct{}, peers)
   644  	for _, ch := range blockChs {
   645  		ch := ch
   646  		go func() {
   647  			<-ch
   648  			done <- struct{}{}
   649  		}()
   650  	}
   651  	var received int
   652  	for {
   653  		select {
   654  		case <-done:
   655  			received++
   656  
   657  		case <-time.After(100 * time.Millisecond):
   658  			if received != bcasts {
   659  				t.Errorf("broadcast count mismatch: have %d, want %d", received, bcasts)
   660  			}
   661  			return
   662  		}
   663  	}
   664  }
   665  
   666  // Tests that a propagated malformed block (uncles or transactions don't match
   667  // with the hashes in the header) gets discarded and not broadcast forward.
   668  func TestBroadcastMalformedBlock65(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH65) }
   669  func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) }
   670  
   671  func testBroadcastMalformedBlock(t *testing.T, protocol uint) {
   672  	t.Parallel()
   673  
   674  	// Create a source handler to broadcast blocks from and a number of sinks
   675  	// to receive them.
   676  	source := newTestHandlerWithBlocks(1)
   677  	defer source.close()
   678  
   679  	// Create a source handler to send messages through and a sink peer to receive them
   680  	p2pSrc, p2pSink := p2p.MsgPipe()
   681  	defer p2pSrc.Close()
   682  	defer p2pSink.Close()
   683  
   684  	src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, source.txpool)
   685  	sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, source.txpool)
   686  	defer src.Close()
   687  	defer sink.Close()
   688  
   689  	go source.handler.runEthPeer(src, func(peer *eth.Peer) error {
   690  		return eth.Handle((*ethHandler)(source.handler), peer)
   691  	})
   692  	// Run the handshake locally to avoid spinning up a sink handler
   693  	var (
   694  		genesis = source.chain.Genesis()
   695  		td      = source.chain.GetTd(genesis.Hash(), genesis.NumberU64())
   696  	)
   697  	if err := sink.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain)); err != nil {
   698  		t.Fatalf("failed to run protocol handshake")
   699  	}
   700  	// After the handshake completes, the source handler should stream the sink
   701  	// the blocks, subscribe to inbound network events
   702  	backend := new(testEthHandler)
   703  
   704  	blocks := make(chan *types.Block, 1)
   705  	sub := backend.blockBroadcasts.Subscribe(blocks)
   706  	defer sub.Unsubscribe()
   707  
   708  	go eth.Handle(backend, sink)
   709  
   710  	// Create various combinations of malformed blocks
   711  	head := source.chain.CurrentBlock()
   712  
   713  	malformedUncles := head.Header()
   714  	malformedUncles.UncleHash[0]++
   715  	malformedTransactions := head.Header()
   716  	malformedTransactions.TxHash[0]++
   717  	malformedEverything := head.Header()
   718  	malformedEverything.UncleHash[0]++
   719  	malformedEverything.TxHash[0]++
   720  
   721  	// Try to broadcast all malformations and ensure they all get discarded
   722  	for _, header := range []*types.Header{malformedUncles, malformedTransactions, malformedEverything} {
   723  		block := types.NewBlockWithHeader(header).WithBody(head.Transactions(), head.Uncles())
   724  		if err := src.SendNewBlock(block, big.NewInt(131136)); err != nil {
   725  			t.Fatalf("failed to broadcast block: %v", err)
   726  		}
   727  		select {
   728  		case <-blocks:
   729  			t.Fatalf("malformed block forwarded")
   730  		case <-time.After(100 * time.Millisecond):
   731  		}
   732  	}
   733  }