github.com/ethxdao/go-ethereum@v0.0.0-20221218102228-5ae34a9cc189/les/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"strings"
    24  	"sync"
    25  	"sync/atomic"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/ethxdao/go-ethereum/common"
    30  	"github.com/ethxdao/go-ethereum/core/rawdb"
    31  	"github.com/ethxdao/go-ethereum/core/state/snapshot"
    32  	"github.com/ethxdao/go-ethereum/core/types"
    33  	"github.com/ethxdao/go-ethereum/eth/protocols/eth"
    34  	"github.com/ethxdao/go-ethereum/ethdb"
    35  	"github.com/ethxdao/go-ethereum/event"
    36  	"github.com/ethxdao/go-ethereum/trie"
    37  )
    38  
    39  // Reduce some of the parameters to make the tester faster.
    40  func init() {
    41  	fullMaxForkAncestry = 10000
    42  	lightMaxForkAncestry = 10000
    43  	blockCacheMaxItems = 1024
    44  	fsHeaderContCheck = 500 * time.Millisecond
    45  }
    46  
    47  // downloadTester is a test simulator for mocking out local block chain.
    48  type downloadTester struct {
    49  	downloader *Downloader
    50  
    51  	genesis *types.Block   // Genesis blocks used by the tester and peers
    52  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    53  	peerDb  ethdb.Database // Database of the peers containing all data
    54  	peers   map[string]*downloadTesterPeer
    55  
    56  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    57  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    58  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    59  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    60  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    61  
    62  	ancientHeaders  map[common.Hash]*types.Header  // Ancient headers belonging to the tester
    63  	ancientBlocks   map[common.Hash]*types.Block   // Ancient blocks belonging to the tester
    64  	ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester
    65  	ancientChainTd  map[common.Hash]*big.Int       // Ancient total difficulties of the blocks in the local chain
    66  
    67  	lock sync.RWMutex
    68  }
    69  
    70  // newTester creates a new downloader test mocker.
    71  func newTester() *downloadTester {
    72  	tester := &downloadTester{
    73  		genesis:     testGenesis,
    74  		peerDb:      testDB,
    75  		peers:       make(map[string]*downloadTesterPeer),
    76  		ownHashes:   []common.Hash{testGenesis.Hash()},
    77  		ownHeaders:  map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
    78  		ownBlocks:   map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
    79  		ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
    80  		ownChainTd:  map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
    81  
    82  		// Initialize ancient store with test genesis block
    83  		ancientHeaders:  map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
    84  		ancientBlocks:   map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
    85  		ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
    86  		ancientChainTd:  map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
    87  	}
    88  	tester.stateDb = rawdb.NewMemoryDatabase()
    89  	tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
    90  
    91  	tester.downloader = New(0, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
    92  	return tester
    93  }
    94  
    95  // terminate aborts any operations on the embedded downloader and releases all
    96  // held resources.
    97  func (dl *downloadTester) terminate() {
    98  	dl.downloader.Terminate()
    99  }
   100  
   101  // sync starts synchronizing with a remote peer, blocking until it completes.
   102  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   103  	dl.lock.RLock()
   104  	hash := dl.peers[id].chain.headBlock().Hash()
   105  	// If no particular TD was requested, load from the peer's blockchain
   106  	if td == nil {
   107  		td = dl.peers[id].chain.td(hash)
   108  	}
   109  	dl.lock.RUnlock()
   110  
   111  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   112  	err := dl.downloader.synchronise(id, hash, td, mode)
   113  	select {
   114  	case <-dl.downloader.cancelCh:
   115  		// Ok, downloader fully cancelled after sync cycle
   116  	default:
   117  		// Downloader is still accepting packets, can block a peer up
   118  		panic("downloader active post sync cycle") // panic will be caught by tester
   119  	}
   120  	return err
   121  }
   122  
   123  // HasHeader checks if a header is present in the testers canonical chain.
   124  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   125  	return dl.GetHeaderByHash(hash) != nil
   126  }
   127  
   128  // HasBlock checks if a block is present in the testers canonical chain.
   129  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   130  	return dl.GetBlockByHash(hash) != nil
   131  }
   132  
   133  // HasFastBlock checks if a block is present in the testers canonical chain.
   134  func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
   135  	dl.lock.RLock()
   136  	defer dl.lock.RUnlock()
   137  
   138  	if _, ok := dl.ancientReceipts[hash]; ok {
   139  		return true
   140  	}
   141  	_, ok := dl.ownReceipts[hash]
   142  	return ok
   143  }
   144  
   145  // GetHeader retrieves a header from the testers canonical chain.
   146  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   147  	dl.lock.RLock()
   148  	defer dl.lock.RUnlock()
   149  	return dl.getHeaderByHash(hash)
   150  }
   151  
   152  // getHeaderByHash returns the header if found either within ancients or own blocks)
   153  // This method assumes that the caller holds at least the read-lock (dl.lock)
   154  func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header {
   155  	header := dl.ancientHeaders[hash]
   156  	if header != nil {
   157  		return header
   158  	}
   159  	return dl.ownHeaders[hash]
   160  }
   161  
   162  // GetBlock retrieves a block from the testers canonical chain.
   163  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   164  	dl.lock.RLock()
   165  	defer dl.lock.RUnlock()
   166  
   167  	block := dl.ancientBlocks[hash]
   168  	if block != nil {
   169  		return block
   170  	}
   171  	return dl.ownBlocks[hash]
   172  }
   173  
   174  // CurrentHeader retrieves the current head header from the canonical chain.
   175  func (dl *downloadTester) CurrentHeader() *types.Header {
   176  	dl.lock.RLock()
   177  	defer dl.lock.RUnlock()
   178  
   179  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   180  		if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil {
   181  			return header
   182  		}
   183  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   184  			return header
   185  		}
   186  	}
   187  	return dl.genesis.Header()
   188  }
   189  
   190  // CurrentBlock retrieves the current head block from the canonical chain.
   191  func (dl *downloadTester) CurrentBlock() *types.Block {
   192  	dl.lock.RLock()
   193  	defer dl.lock.RUnlock()
   194  
   195  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   196  		if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
   197  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   198  				return block
   199  			}
   200  			return block
   201  		}
   202  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   203  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   204  				return block
   205  			}
   206  		}
   207  	}
   208  	return dl.genesis
   209  }
   210  
   211  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   212  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   213  	dl.lock.RLock()
   214  	defer dl.lock.RUnlock()
   215  
   216  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   217  		if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
   218  			return block
   219  		}
   220  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   221  			return block
   222  		}
   223  	}
   224  	return dl.genesis
   225  }
   226  
   227  // FastSyncCommitHead manually sets the head block to a given hash.
   228  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   229  	// For now only check that the state trie is correct
   230  	if block := dl.GetBlockByHash(hash); block != nil {
   231  		_, err := trie.NewStateTrie(common.Hash{}, block.Root(), trie.NewDatabase(dl.stateDb))
   232  		return err
   233  	}
   234  	return fmt.Errorf("non existent block: %x", hash[:4])
   235  }
   236  
   237  // GetTd retrieves the block's total difficulty from the canonical chain.
   238  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   239  	dl.lock.RLock()
   240  	defer dl.lock.RUnlock()
   241  
   242  	return dl.getTd(hash)
   243  }
   244  
   245  // getTd retrieves the block's total difficulty if found either within
   246  // ancients or own blocks).
   247  // This method assumes that the caller holds at least the read-lock (dl.lock)
   248  func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
   249  	if td := dl.ancientChainTd[hash]; td != nil {
   250  		return td
   251  	}
   252  	return dl.ownChainTd[hash]
   253  }
   254  
   255  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   256  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) {
   257  	dl.lock.Lock()
   258  	defer dl.lock.Unlock()
   259  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   260  	if dl.getHeaderByHash(headers[0].ParentHash) == nil {
   261  		return 0, fmt.Errorf("InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number)
   262  	}
   263  	var hashes []common.Hash
   264  	for i := 1; i < len(headers); i++ {
   265  		hash := headers[i-1].Hash()
   266  		if headers[i].ParentHash != headers[i-1].Hash() {
   267  			return i, fmt.Errorf("non-contiguous import at position %d", i)
   268  		}
   269  		hashes = append(hashes, hash)
   270  	}
   271  	hashes = append(hashes, headers[len(headers)-1].Hash())
   272  	// Do a full insert if pre-checks passed
   273  	for i, header := range headers {
   274  		hash := hashes[i]
   275  		if dl.getHeaderByHash(hash) != nil {
   276  			continue
   277  		}
   278  		if dl.getHeaderByHash(header.ParentHash) == nil {
   279  			// This _should_ be impossible, due to precheck and induction
   280  			return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i)
   281  		}
   282  		dl.ownHashes = append(dl.ownHashes, hash)
   283  		dl.ownHeaders[hash] = header
   284  
   285  		td := dl.getTd(header.ParentHash)
   286  		dl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty)
   287  	}
   288  	return len(headers), nil
   289  }
   290  
   291  // InsertChain injects a new batch of blocks into the simulated chain.
   292  func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
   293  	dl.lock.Lock()
   294  	defer dl.lock.Unlock()
   295  	for i, block := range blocks {
   296  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   297  			return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks))
   298  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   299  			return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err)
   300  		}
   301  		if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil {
   302  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   303  			dl.ownHeaders[block.Hash()] = block.Header()
   304  		}
   305  		dl.ownBlocks[block.Hash()] = block
   306  		dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
   307  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   308  		td := dl.getTd(block.ParentHash())
   309  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty())
   310  	}
   311  	return len(blocks), nil
   312  }
   313  
   314  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   315  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) {
   316  	dl.lock.Lock()
   317  	defer dl.lock.Unlock()
   318  
   319  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   320  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   321  			return i, errors.New("unknown owner")
   322  		}
   323  		if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok {
   324  			if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   325  				return i, errors.New("InsertReceiptChain: unknown parent")
   326  			}
   327  		}
   328  		if blocks[i].NumberU64() <= ancientLimit {
   329  			dl.ancientBlocks[blocks[i].Hash()] = blocks[i]
   330  			dl.ancientReceipts[blocks[i].Hash()] = receipts[i]
   331  
   332  			// Migrate from active db to ancient db
   333  			dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header()
   334  			dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty())
   335  			delete(dl.ownHeaders, blocks[i].Hash())
   336  			delete(dl.ownChainTd, blocks[i].Hash())
   337  		} else {
   338  			dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   339  			dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   340  		}
   341  	}
   342  	return len(blocks), nil
   343  }
   344  
   345  // SetHead rewinds the local chain to a new head.
   346  func (dl *downloadTester) SetHead(head uint64) error {
   347  	dl.lock.Lock()
   348  	defer dl.lock.Unlock()
   349  
   350  	// Find the hash of the head to reset to
   351  	var hash common.Hash
   352  	for h, header := range dl.ownHeaders {
   353  		if header.Number.Uint64() == head {
   354  			hash = h
   355  		}
   356  	}
   357  	for h, header := range dl.ancientHeaders {
   358  		if header.Number.Uint64() == head {
   359  			hash = h
   360  		}
   361  	}
   362  	if hash == (common.Hash{}) {
   363  		return fmt.Errorf("unknown head to set: %d", head)
   364  	}
   365  	// Find the offset in the header chain
   366  	var offset int
   367  	for o, h := range dl.ownHashes {
   368  		if h == hash {
   369  			offset = o
   370  			break
   371  		}
   372  	}
   373  	// Remove all the hashes and associated data afterwards
   374  	for i := offset + 1; i < len(dl.ownHashes); i++ {
   375  		delete(dl.ownChainTd, dl.ownHashes[i])
   376  		delete(dl.ownHeaders, dl.ownHashes[i])
   377  		delete(dl.ownReceipts, dl.ownHashes[i])
   378  		delete(dl.ownBlocks, dl.ownHashes[i])
   379  
   380  		delete(dl.ancientChainTd, dl.ownHashes[i])
   381  		delete(dl.ancientHeaders, dl.ownHashes[i])
   382  		delete(dl.ancientReceipts, dl.ownHashes[i])
   383  		delete(dl.ancientBlocks, dl.ownHashes[i])
   384  	}
   385  	dl.ownHashes = dl.ownHashes[:offset+1]
   386  	return nil
   387  }
   388  
   389  // Rollback removes some recently added elements from the chain.
   390  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   391  }
   392  
   393  // newPeer registers a new block download source into the downloader.
   394  func (dl *downloadTester) newPeer(id string, version uint, chain *testChain) error {
   395  	dl.lock.Lock()
   396  	defer dl.lock.Unlock()
   397  
   398  	peer := &downloadTesterPeer{dl: dl, id: id, chain: chain}
   399  	dl.peers[id] = peer
   400  	return dl.downloader.RegisterPeer(id, version, peer)
   401  }
   402  
   403  // dropPeer simulates a hard peer removal from the connection pool.
   404  func (dl *downloadTester) dropPeer(id string) {
   405  	dl.lock.Lock()
   406  	defer dl.lock.Unlock()
   407  
   408  	delete(dl.peers, id)
   409  	dl.downloader.UnregisterPeer(id)
   410  }
   411  
   412  // Snapshots implements the BlockChain interface for the downloader, but is a noop.
   413  func (dl *downloadTester) Snapshots() *snapshot.Tree {
   414  	return nil
   415  }
   416  
   417  type downloadTesterPeer struct {
   418  	dl            *downloadTester
   419  	id            string
   420  	chain         *testChain
   421  	missingStates map[common.Hash]bool // State entries that fast sync should not return
   422  }
   423  
   424  // Head constructs a function to retrieve a peer's current head hash
   425  // and total difficulty.
   426  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   427  	b := dlp.chain.headBlock()
   428  	return b.Hash(), dlp.chain.td(b.Hash())
   429  }
   430  
   431  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   432  // origin; associated with a particular peer in the download tester. The returned
   433  // function can be used to retrieve batches of headers from the particular peer.
   434  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   435  	result := dlp.chain.headersByHash(origin, amount, skip, reverse)
   436  	go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   437  	return nil
   438  }
   439  
   440  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   441  // origin; associated with a particular peer in the download tester. The returned
   442  // function can be used to retrieve batches of headers from the particular peer.
   443  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   444  	result := dlp.chain.headersByNumber(origin, amount, skip, reverse)
   445  	go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   446  	return nil
   447  }
   448  
   449  // RequestBodies constructs a getBlockBodies method associated with a particular
   450  // peer in the download tester. The returned function can be used to retrieve
   451  // batches of block bodies from the particularly requested peer.
   452  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   453  	txs, uncles := dlp.chain.bodies(hashes)
   454  	go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles)
   455  	return nil
   456  }
   457  
   458  // RequestReceipts constructs a getReceipts method associated with a particular
   459  // peer in the download tester. The returned function can be used to retrieve
   460  // batches of block receipts from the particularly requested peer.
   461  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   462  	receipts := dlp.chain.receipts(hashes)
   463  	go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts)
   464  	return nil
   465  }
   466  
   467  // RequestNodeData constructs a getNodeData method associated with a particular
   468  // peer in the download tester. The returned function can be used to retrieve
   469  // batches of node state data from the particularly requested peer.
   470  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   471  	dlp.dl.lock.RLock()
   472  	defer dlp.dl.lock.RUnlock()
   473  
   474  	results := make([][]byte, 0, len(hashes))
   475  	for _, hash := range hashes {
   476  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   477  			if !dlp.missingStates[hash] {
   478  				results = append(results, data)
   479  			}
   480  		}
   481  	}
   482  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   483  	return nil
   484  }
   485  
   486  // assertOwnChain checks if the local chain contains the correct number of items
   487  // of the various chain components.
   488  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   489  	// Mark this method as a helper to report errors at callsite, not in here
   490  	t.Helper()
   491  
   492  	assertOwnForkedChain(t, tester, 1, []int{length})
   493  }
   494  
   495  // assertOwnForkedChain checks if the local forked chain contains the correct
   496  // number of items of the various chain components.
   497  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   498  	// Mark this method as a helper to report errors at callsite, not in here
   499  	t.Helper()
   500  
   501  	// Initialize the counters for the first fork
   502  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]
   503  
   504  	// Update the counters for each subsequent fork
   505  	for _, length := range lengths[1:] {
   506  		headers += length - common
   507  		blocks += length - common
   508  		receipts += length - common
   509  	}
   510  	if tester.downloader.getMode() == LightSync {
   511  		blocks, receipts = 1, 1
   512  	}
   513  	if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers {
   514  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   515  	}
   516  	if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks {
   517  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   518  	}
   519  	if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts {
   520  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   521  	}
   522  }
   523  
   524  func TestCanonicalSynchronisation66Full(t *testing.T)  { testCanonSync(t, eth.ETH66, FullSync) }
   525  func TestCanonicalSynchronisation66Fast(t *testing.T)  { testCanonSync(t, eth.ETH66, FastSync) }
   526  func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
   527  
   528  func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
   529  	t.Parallel()
   530  
   531  	tester := newTester()
   532  	defer tester.terminate()
   533  
   534  	// Create a small enough block chain to download
   535  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   536  	tester.newPeer("peer", protocol, chain)
   537  
   538  	// Synchronise with the peer and make sure all relevant data was retrieved
   539  	if err := tester.sync("peer", nil, mode); err != nil {
   540  		t.Fatalf("failed to synchronise blocks: %v", err)
   541  	}
   542  	assertOwnChain(t, tester, chain.len())
   543  }
   544  
   545  // Tests that if a large batch of blocks are being downloaded, it is throttled
   546  // until the cached blocks are retrieved.
   547  func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
   548  func TestThrottling66Fast(t *testing.T) { testThrottling(t, eth.ETH66, FastSync) }
   549  
   550  func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
   551  	t.Parallel()
   552  	tester := newTester()
   553  
   554  	// Create a long block chain to download and the tester
   555  	targetBlocks := testChainBase.len() - 1
   556  	tester.newPeer("peer", protocol, testChainBase)
   557  
   558  	// Wrap the importer to allow stepping
   559  	blocked, proceed := uint32(0), make(chan struct{})
   560  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   561  		atomic.StoreUint32(&blocked, uint32(len(results)))
   562  		<-proceed
   563  	}
   564  	// Start a synchronisation concurrently
   565  	errc := make(chan error, 1)
   566  	go func() {
   567  		errc <- tester.sync("peer", nil, mode)
   568  	}()
   569  	// Iteratively take some blocks, always checking the retrieval count
   570  	for {
   571  		// Check the retrieval count synchronously (! reason for this ugly block)
   572  		tester.lock.RLock()
   573  		retrieved := len(tester.ownBlocks)
   574  		tester.lock.RUnlock()
   575  		if retrieved >= targetBlocks+1 {
   576  			break
   577  		}
   578  		// Wait a bit for sync to throttle itself
   579  		var cached, frozen int
   580  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   581  			time.Sleep(25 * time.Millisecond)
   582  
   583  			tester.lock.Lock()
   584  			tester.downloader.queue.lock.Lock()
   585  			tester.downloader.queue.resultCache.lock.Lock()
   586  			{
   587  				cached = tester.downloader.queue.resultCache.countCompleted()
   588  				frozen = int(atomic.LoadUint32(&blocked))
   589  				retrieved = len(tester.ownBlocks)
   590  			}
   591  			tester.downloader.queue.resultCache.lock.Unlock()
   592  			tester.downloader.queue.lock.Unlock()
   593  			tester.lock.Unlock()
   594  
   595  			if cached == blockCacheMaxItems ||
   596  				cached == blockCacheMaxItems-reorgProtHeaderDelay ||
   597  				retrieved+cached+frozen == targetBlocks+1 ||
   598  				retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
   599  				break
   600  			}
   601  		}
   602  		// Make sure we filled up the cache, then exhaust it
   603  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   604  		tester.lock.RLock()
   605  		retrieved = len(tester.ownBlocks)
   606  		tester.lock.RUnlock()
   607  		if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
   608  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
   609  		}
   610  
   611  		// Permit the blocked blocks to import
   612  		if atomic.LoadUint32(&blocked) > 0 {
   613  			atomic.StoreUint32(&blocked, uint32(0))
   614  			proceed <- struct{}{}
   615  		}
   616  	}
   617  	// Check that we haven't pulled more blocks than available
   618  	assertOwnChain(t, tester, targetBlocks+1)
   619  	if err := <-errc; err != nil {
   620  		t.Fatalf("block synchronization failed: %v", err)
   621  	}
   622  	tester.terminate()
   623  }
   624  
   625  // Tests that simple synchronization against a forked chain works correctly. In
   626  // this test common ancestor lookup should *not* be short circuited, and a full
   627  // binary search should be executed.
   628  func TestForkedSync66Full(t *testing.T)  { testForkedSync(t, eth.ETH66, FullSync) }
   629  func TestForkedSync66Fast(t *testing.T)  { testForkedSync(t, eth.ETH66, FastSync) }
   630  func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
   631  
   632  func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   633  	t.Parallel()
   634  
   635  	tester := newTester()
   636  	defer tester.terminate()
   637  
   638  	chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
   639  	chainB := testChainForkLightB.shorten(testChainBase.len() + 80)
   640  	tester.newPeer("fork A", protocol, chainA)
   641  	tester.newPeer("fork B", protocol, chainB)
   642  	// Synchronise with the peer and make sure all blocks were retrieved
   643  	if err := tester.sync("fork A", nil, mode); err != nil {
   644  		t.Fatalf("failed to synchronise blocks: %v", err)
   645  	}
   646  	assertOwnChain(t, tester, chainA.len())
   647  
   648  	// Synchronise with the second peer and make sure that fork is pulled too
   649  	if err := tester.sync("fork B", nil, mode); err != nil {
   650  		t.Fatalf("failed to synchronise blocks: %v", err)
   651  	}
   652  	assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
   653  }
   654  
   655  // Tests that synchronising against a much shorter but much heavier fork works
   656  // correctly and is not dropped.
   657  func TestHeavyForkedSync66Full(t *testing.T)  { testHeavyForkedSync(t, eth.ETH66, FullSync) }
   658  func TestHeavyForkedSync66Fast(t *testing.T)  { testHeavyForkedSync(t, eth.ETH66, FastSync) }
   659  func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
   660  
   661  func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   662  	t.Parallel()
   663  
   664  	tester := newTester()
   665  	defer tester.terminate()
   666  
   667  	chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
   668  	chainB := testChainForkHeavy.shorten(testChainBase.len() + 80)
   669  	tester.newPeer("light", protocol, chainA)
   670  	tester.newPeer("heavy", protocol, chainB)
   671  
   672  	// Synchronise with the peer and make sure all blocks were retrieved
   673  	if err := tester.sync("light", nil, mode); err != nil {
   674  		t.Fatalf("failed to synchronise blocks: %v", err)
   675  	}
   676  	assertOwnChain(t, tester, chainA.len())
   677  
   678  	// Synchronise with the second peer and make sure that fork is pulled too
   679  	if err := tester.sync("heavy", nil, mode); err != nil {
   680  		t.Fatalf("failed to synchronise blocks: %v", err)
   681  	}
   682  	assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
   683  }
   684  
   685  // Tests that chain forks are contained within a certain interval of the current
   686  // chain head, ensuring that malicious peers cannot waste resources by feeding
   687  // long dead chains.
   688  func TestBoundedForkedSync66Full(t *testing.T)  { testBoundedForkedSync(t, eth.ETH66, FullSync) }
   689  func TestBoundedForkedSync66Fast(t *testing.T)  { testBoundedForkedSync(t, eth.ETH66, FastSync) }
   690  func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
   691  
   692  func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   693  	t.Parallel()
   694  
   695  	tester := newTester()
   696  	defer tester.terminate()
   697  
   698  	chainA := testChainForkLightA
   699  	chainB := testChainForkLightB
   700  	tester.newPeer("original", protocol, chainA)
   701  	tester.newPeer("rewriter", protocol, chainB)
   702  
   703  	// Synchronise with the peer and make sure all blocks were retrieved
   704  	if err := tester.sync("original", nil, mode); err != nil {
   705  		t.Fatalf("failed to synchronise blocks: %v", err)
   706  	}
   707  	assertOwnChain(t, tester, chainA.len())
   708  
   709  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   710  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   711  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   712  	}
   713  }
   714  
   715  // Tests that chain forks are contained within a certain interval of the current
   716  // chain head for short but heavy forks too. These are a bit special because they
   717  // take different ancestor lookup paths.
   718  func TestBoundedHeavyForkedSync66Full(t *testing.T) {
   719  	testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
   720  }
   721  func TestBoundedHeavyForkedSync66Fast(t *testing.T) {
   722  	testBoundedHeavyForkedSync(t, eth.ETH66, FastSync)
   723  }
   724  func TestBoundedHeavyForkedSync66Light(t *testing.T) {
   725  	testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
   726  }
   727  
   728  func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   729  	t.Parallel()
   730  	tester := newTester()
   731  
   732  	// Create a long enough forked chain
   733  	chainA := testChainForkLightA
   734  	chainB := testChainForkHeavy
   735  	tester.newPeer("original", protocol, chainA)
   736  
   737  	// Synchronise with the peer and make sure all blocks were retrieved
   738  	if err := tester.sync("original", nil, mode); err != nil {
   739  		t.Fatalf("failed to synchronise blocks: %v", err)
   740  	}
   741  	assertOwnChain(t, tester, chainA.len())
   742  
   743  	tester.newPeer("heavy-rewriter", protocol, chainB)
   744  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   745  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   746  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   747  	}
   748  	tester.terminate()
   749  }
   750  
   751  // Tests that an inactive downloader will not accept incoming block headers,
   752  // bodies and receipts.
   753  func TestInactiveDownloader63(t *testing.T) {
   754  	t.Parallel()
   755  
   756  	tester := newTester()
   757  	defer tester.terminate()
   758  
   759  	// Check that neither block headers nor bodies are accepted
   760  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   761  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   762  	}
   763  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   764  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   765  	}
   766  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   767  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   768  	}
   769  }
   770  
   771  // Tests that a canceled download wipes all previously accumulated state.
   772  func TestCancel66Full(t *testing.T)  { testCancel(t, eth.ETH66, FullSync) }
   773  func TestCancel66Fast(t *testing.T)  { testCancel(t, eth.ETH66, FastSync) }
   774  func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
   775  
   776  func testCancel(t *testing.T, protocol uint, mode SyncMode) {
   777  	t.Parallel()
   778  
   779  	tester := newTester()
   780  	defer tester.terminate()
   781  
   782  	chain := testChainBase.shorten(MaxHeaderFetch)
   783  	tester.newPeer("peer", protocol, chain)
   784  
   785  	// Make sure canceling works with a pristine downloader
   786  	tester.downloader.Cancel()
   787  	if !tester.downloader.queue.Idle() {
   788  		t.Errorf("download queue not idle")
   789  	}
   790  	// Synchronise with the peer, but cancel afterwards
   791  	if err := tester.sync("peer", nil, mode); err != nil {
   792  		t.Fatalf("failed to synchronise blocks: %v", err)
   793  	}
   794  	tester.downloader.Cancel()
   795  	if !tester.downloader.queue.Idle() {
   796  		t.Errorf("download queue not idle")
   797  	}
   798  }
   799  
   800  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   801  func TestMultiSynchronisation66Full(t *testing.T)  { testMultiSynchronisation(t, eth.ETH66, FullSync) }
   802  func TestMultiSynchronisation66Fast(t *testing.T)  { testMultiSynchronisation(t, eth.ETH66, FastSync) }
   803  func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
   804  
   805  func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
   806  	t.Parallel()
   807  
   808  	tester := newTester()
   809  	defer tester.terminate()
   810  
   811  	// Create various peers with various parts of the chain
   812  	targetPeers := 8
   813  	chain := testChainBase.shorten(targetPeers * 100)
   814  
   815  	for i := 0; i < targetPeers; i++ {
   816  		id := fmt.Sprintf("peer #%d", i)
   817  		tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1)))
   818  	}
   819  	if err := tester.sync("peer #0", nil, mode); err != nil {
   820  		t.Fatalf("failed to synchronise blocks: %v", err)
   821  	}
   822  	assertOwnChain(t, tester, chain.len())
   823  }
   824  
   825  // Tests that synchronisations behave well in multi-version protocol environments
   826  // and not wreak havoc on other nodes in the network.
   827  func TestMultiProtoSynchronisation66Full(t *testing.T)  { testMultiProtoSync(t, eth.ETH66, FullSync) }
   828  func TestMultiProtoSynchronisation66Fast(t *testing.T)  { testMultiProtoSync(t, eth.ETH66, FastSync) }
   829  func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
   830  
   831  func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
   832  	t.Parallel()
   833  
   834  	tester := newTester()
   835  	defer tester.terminate()
   836  
   837  	// Create a small enough block chain to download
   838  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   839  
   840  	// Create peers of every type
   841  	tester.newPeer("peer 66", eth.ETH66, chain)
   842  	//tester.newPeer("peer 65", eth.ETH67, chain)
   843  
   844  	// Synchronise with the requested peer and make sure all blocks were retrieved
   845  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
   846  		t.Fatalf("failed to synchronise blocks: %v", err)
   847  	}
   848  	assertOwnChain(t, tester, chain.len())
   849  
   850  	// Check that no peers have been dropped off
   851  	for _, version := range []int{66} {
   852  		peer := fmt.Sprintf("peer %d", version)
   853  		if _, ok := tester.peers[peer]; !ok {
   854  			t.Errorf("%s dropped", peer)
   855  		}
   856  	}
   857  }
   858  
   859  // Tests that if a block is empty (e.g. header only), no body request should be
   860  // made, and instead the header should be assembled into a whole block in itself.
   861  func TestEmptyShortCircuit66Full(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
   862  func TestEmptyShortCircuit66Fast(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH66, FastSync) }
   863  func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
   864  
   865  func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
   866  	t.Parallel()
   867  
   868  	tester := newTester()
   869  	defer tester.terminate()
   870  
   871  	// Create a block chain to download
   872  	chain := testChainBase
   873  	tester.newPeer("peer", protocol, chain)
   874  
   875  	// Instrument the downloader to signal body requests
   876  	bodiesHave, receiptsHave := int32(0), int32(0)
   877  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
   878  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
   879  	}
   880  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
   881  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
   882  	}
   883  	// Synchronise with the peer and make sure all blocks were retrieved
   884  	if err := tester.sync("peer", nil, mode); err != nil {
   885  		t.Fatalf("failed to synchronise blocks: %v", err)
   886  	}
   887  	assertOwnChain(t, tester, chain.len())
   888  
   889  	// Validate the number of block bodies that should have been requested
   890  	bodiesNeeded, receiptsNeeded := 0, 0
   891  	for _, block := range chain.blockm {
   892  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
   893  			bodiesNeeded++
   894  		}
   895  	}
   896  	for _, receipt := range chain.receiptm {
   897  		if mode == FastSync && len(receipt) > 0 {
   898  			receiptsNeeded++
   899  		}
   900  	}
   901  	if int(bodiesHave) != bodiesNeeded {
   902  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
   903  	}
   904  	if int(receiptsHave) != receiptsNeeded {
   905  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
   906  	}
   907  }
   908  
   909  // Tests that headers are enqueued continuously, preventing malicious nodes from
   910  // stalling the downloader by feeding gapped header chains.
   911  func TestMissingHeaderAttack66Full(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
   912  func TestMissingHeaderAttack66Fast(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH66, FastSync) }
   913  func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
   914  
   915  func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
   916  	t.Parallel()
   917  
   918  	tester := newTester()
   919  	defer tester.terminate()
   920  
   921  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   922  	brokenChain := chain.shorten(chain.len())
   923  	delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2])
   924  	tester.newPeer("attack", protocol, brokenChain)
   925  
   926  	if err := tester.sync("attack", nil, mode); err == nil {
   927  		t.Fatalf("succeeded attacker synchronisation")
   928  	}
   929  	// Synchronise with the valid peer and make sure sync succeeds
   930  	tester.newPeer("valid", protocol, chain)
   931  	if err := tester.sync("valid", nil, mode); err != nil {
   932  		t.Fatalf("failed to synchronise blocks: %v", err)
   933  	}
   934  	assertOwnChain(t, tester, chain.len())
   935  }
   936  
   937  // Tests that if requested headers are shifted (i.e. first is missing), the queue
   938  // detects the invalid numbering.
   939  func TestShiftedHeaderAttack66Full(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
   940  func TestShiftedHeaderAttack66Fast(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH66, FastSync) }
   941  func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
   942  
   943  func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
   944  	t.Parallel()
   945  
   946  	tester := newTester()
   947  	defer tester.terminate()
   948  
   949  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   950  
   951  	// Attempt a full sync with an attacker feeding shifted headers
   952  	brokenChain := chain.shorten(chain.len())
   953  	delete(brokenChain.headerm, brokenChain.chain[1])
   954  	delete(brokenChain.blockm, brokenChain.chain[1])
   955  	delete(brokenChain.receiptm, brokenChain.chain[1])
   956  	tester.newPeer("attack", protocol, brokenChain)
   957  	if err := tester.sync("attack", nil, mode); err == nil {
   958  		t.Fatalf("succeeded attacker synchronisation")
   959  	}
   960  
   961  	// Synchronise with the valid peer and make sure sync succeeds
   962  	tester.newPeer("valid", protocol, chain)
   963  	if err := tester.sync("valid", nil, mode); err != nil {
   964  		t.Fatalf("failed to synchronise blocks: %v", err)
   965  	}
   966  	assertOwnChain(t, tester, chain.len())
   967  }
   968  
   969  // Tests that upon detecting an invalid header, the recent ones are rolled back
   970  // for various failure scenarios. Afterwards a full sync is attempted to make
   971  // sure no state was corrupted.
   972  func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, FastSync) }
   973  
   974  func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
   975  	t.Parallel()
   976  
   977  	tester := newTester()
   978  
   979  	// Create a small enough block chain to download
   980  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
   981  	chain := testChainBase.shorten(targetBlocks)
   982  
   983  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
   984  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
   985  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
   986  	fastAttackChain := chain.shorten(chain.len())
   987  	delete(fastAttackChain.headerm, fastAttackChain.chain[missing])
   988  	tester.newPeer("fast-attack", protocol, fastAttackChain)
   989  
   990  	if err := tester.sync("fast-attack", nil, mode); err == nil {
   991  		t.Fatalf("succeeded fast attacker synchronisation")
   992  	}
   993  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
   994  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
   995  	}
   996  
   997  	// Attempt to sync with an attacker that feeds junk during the block import phase.
   998  	// This should result in both the last fsHeaderSafetyNet number of headers being
   999  	// rolled back, and also the pivot point being reverted to a non-block status.
  1000  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1001  	blockAttackChain := chain.shorten(chain.len())
  1002  	delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in
  1003  	delete(blockAttackChain.headerm, blockAttackChain.chain[missing])
  1004  	tester.newPeer("block-attack", protocol, blockAttackChain)
  1005  
  1006  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1007  		t.Fatalf("succeeded block attacker synchronisation")
  1008  	}
  1009  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1010  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1011  	}
  1012  	if mode == FastSync {
  1013  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1014  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1015  		}
  1016  	}
  1017  
  1018  	// Attempt to sync with an attacker that withholds promised blocks after the
  1019  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1020  	// but already imported pivot block.
  1021  	withholdAttackChain := chain.shorten(chain.len())
  1022  	tester.newPeer("withhold-attack", protocol, withholdAttackChain)
  1023  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1024  		for i := missing; i < withholdAttackChain.len(); i++ {
  1025  			delete(withholdAttackChain.headerm, withholdAttackChain.chain[i])
  1026  		}
  1027  		tester.downloader.syncInitHook = nil
  1028  	}
  1029  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1030  		t.Fatalf("succeeded withholding attacker synchronisation")
  1031  	}
  1032  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1033  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1034  	}
  1035  	if mode == FastSync {
  1036  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1037  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1038  		}
  1039  	}
  1040  
  1041  	// synchronise with the valid peer and make sure sync succeeds. Since the last rollback
  1042  	// should also disable fast syncing for this process, verify that we did a fresh full
  1043  	// sync. Note, we can't assert anything about the receipts since we won't purge the
  1044  	// database of them, hence we can't use assertOwnChain.
  1045  	tester.newPeer("valid", protocol, chain)
  1046  	if err := tester.sync("valid", nil, mode); err != nil {
  1047  		t.Fatalf("failed to synchronise blocks: %v", err)
  1048  	}
  1049  	if hs := len(tester.ownHeaders); hs != chain.len() {
  1050  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len())
  1051  	}
  1052  	if mode != LightSync {
  1053  		if bs := len(tester.ownBlocks); bs != chain.len() {
  1054  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len())
  1055  		}
  1056  	}
  1057  	tester.terminate()
  1058  }
  1059  
  1060  // Tests that a peer advertising a high TD doesn't get to stall the downloader
  1061  // afterwards by not sending any useful hashes.
  1062  func TestHighTDStarvationAttack66Full(t *testing.T) {
  1063  	testHighTDStarvationAttack(t, eth.ETH66, FullSync)
  1064  }
  1065  func TestHighTDStarvationAttack66Fast(t *testing.T) {
  1066  	testHighTDStarvationAttack(t, eth.ETH66, FastSync)
  1067  }
  1068  func TestHighTDStarvationAttack66Light(t *testing.T) {
  1069  	testHighTDStarvationAttack(t, eth.ETH66, LightSync)
  1070  }
  1071  
  1072  func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
  1073  	t.Parallel()
  1074  
  1075  	tester := newTester()
  1076  
  1077  	chain := testChainBase.shorten(1)
  1078  	tester.newPeer("attack", protocol, chain)
  1079  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1080  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1081  	}
  1082  	tester.terminate()
  1083  }
  1084  
  1085  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1086  func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
  1087  
  1088  func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
  1089  	t.Parallel()
  1090  
  1091  	// Define the disconnection requirement for individual hash fetch errors
  1092  	tests := []struct {
  1093  		result error
  1094  		drop   bool
  1095  	}{
  1096  		{nil, false},                        // Sync succeeded, all is well
  1097  		{errBusy, false},                    // Sync is already in progress, no problem
  1098  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1099  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1100  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1101  		{errUnsyncedPeer, true},             // Peer was detected to be unsynced, drop it
  1102  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1103  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1104  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1105  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1106  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1107  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1108  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1109  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1110  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1111  	}
  1112  	// Run the tests and check disconnection status
  1113  	tester := newTester()
  1114  	defer tester.terminate()
  1115  	chain := testChainBase.shorten(1)
  1116  
  1117  	for i, tt := range tests {
  1118  		// Register a new peer and ensure its presence
  1119  		id := fmt.Sprintf("test %d", i)
  1120  		if err := tester.newPeer(id, protocol, chain); err != nil {
  1121  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1122  		}
  1123  		if _, ok := tester.peers[id]; !ok {
  1124  			t.Fatalf("test %d: registered peer not found", i)
  1125  		}
  1126  		// Simulate a synchronisation and check the required result
  1127  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1128  
  1129  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1130  		if _, ok := tester.peers[id]; !ok != tt.drop {
  1131  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1132  		}
  1133  	}
  1134  }
  1135  
  1136  // Tests that synchronisation progress (origin block number, current block number
  1137  // and highest block number) is tracked and updated correctly.
  1138  func TestSyncProgress66Full(t *testing.T)  { testSyncProgress(t, eth.ETH66, FullSync) }
  1139  func TestSyncProgress66Fast(t *testing.T)  { testSyncProgress(t, eth.ETH66, FastSync) }
  1140  func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
  1141  
  1142  func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1143  	t.Parallel()
  1144  
  1145  	tester := newTester()
  1146  	defer tester.terminate()
  1147  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1148  
  1149  	// Set a sync init hook to catch progress changes
  1150  	starting := make(chan struct{})
  1151  	progress := make(chan struct{})
  1152  
  1153  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1154  		starting <- struct{}{}
  1155  		<-progress
  1156  	}
  1157  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1158  
  1159  	// Synchronise half the blocks and check initial progress
  1160  	tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2))
  1161  	pending := new(sync.WaitGroup)
  1162  	pending.Add(1)
  1163  
  1164  	go func() {
  1165  		defer pending.Done()
  1166  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1167  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1168  		}
  1169  	}()
  1170  	<-starting
  1171  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1172  		HighestBlock: uint64(chain.len()/2 - 1),
  1173  	})
  1174  	progress <- struct{}{}
  1175  	pending.Wait()
  1176  
  1177  	// Synchronise all the blocks and check continuation progress
  1178  	tester.newPeer("peer-full", protocol, chain)
  1179  	pending.Add(1)
  1180  	go func() {
  1181  		defer pending.Done()
  1182  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1183  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1184  		}
  1185  	}()
  1186  	<-starting
  1187  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1188  		StartingBlock: uint64(chain.len()/2 - 1),
  1189  		CurrentBlock:  uint64(chain.len()/2 - 1),
  1190  		HighestBlock:  uint64(chain.len() - 1),
  1191  	})
  1192  
  1193  	// Check final progress after successful sync
  1194  	progress <- struct{}{}
  1195  	pending.Wait()
  1196  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1197  		StartingBlock: uint64(chain.len()/2 - 1),
  1198  		CurrentBlock:  uint64(chain.len() - 1),
  1199  		HighestBlock:  uint64(chain.len() - 1),
  1200  	})
  1201  }
  1202  
  1203  func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
  1204  	// Mark this method as a helper to report errors at callsite, not in here
  1205  	t.Helper()
  1206  
  1207  	p := d.Progress()
  1208  	//p.KnownStates, p.PulledStates = 0, 0
  1209  	//want.KnownStates, want.PulledStates = 0, 0
  1210  	if p != want {
  1211  		t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
  1212  	}
  1213  }
  1214  
  1215  // Tests that synchronisation progress (origin block number and highest block
  1216  // number) is tracked and updated correctly in case of a fork (or manual head
  1217  // revertal).
  1218  func TestForkedSyncProgress66Full(t *testing.T)  { testForkedSyncProgress(t, eth.ETH66, FullSync) }
  1219  func TestForkedSyncProgress66Fast(t *testing.T)  { testForkedSyncProgress(t, eth.ETH66, FastSync) }
  1220  func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
  1221  
  1222  func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1223  	t.Parallel()
  1224  
  1225  	tester := newTester()
  1226  	defer tester.terminate()
  1227  	chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHeaderFetch)
  1228  	chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHeaderFetch)
  1229  
  1230  	// Set a sync init hook to catch progress changes
  1231  	starting := make(chan struct{})
  1232  	progress := make(chan struct{})
  1233  
  1234  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1235  		starting <- struct{}{}
  1236  		<-progress
  1237  	}
  1238  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1239  
  1240  	// Synchronise with one of the forks and check progress
  1241  	tester.newPeer("fork A", protocol, chainA)
  1242  	pending := new(sync.WaitGroup)
  1243  	pending.Add(1)
  1244  	go func() {
  1245  		defer pending.Done()
  1246  		if err := tester.sync("fork A", nil, mode); err != nil {
  1247  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1248  		}
  1249  	}()
  1250  	<-starting
  1251  
  1252  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1253  		HighestBlock: uint64(chainA.len() - 1),
  1254  	})
  1255  	progress <- struct{}{}
  1256  	pending.Wait()
  1257  
  1258  	// Simulate a successful sync above the fork
  1259  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1260  
  1261  	// Synchronise with the second fork and check progress resets
  1262  	tester.newPeer("fork B", protocol, chainB)
  1263  	pending.Add(1)
  1264  	go func() {
  1265  		defer pending.Done()
  1266  		if err := tester.sync("fork B", nil, mode); err != nil {
  1267  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1268  		}
  1269  	}()
  1270  	<-starting
  1271  	checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
  1272  		StartingBlock: uint64(testChainBase.len()) - 1,
  1273  		CurrentBlock:  uint64(chainA.len() - 1),
  1274  		HighestBlock:  uint64(chainB.len() - 1),
  1275  	})
  1276  
  1277  	// Check final progress after successful sync
  1278  	progress <- struct{}{}
  1279  	pending.Wait()
  1280  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1281  		StartingBlock: uint64(testChainBase.len()) - 1,
  1282  		CurrentBlock:  uint64(chainB.len() - 1),
  1283  		HighestBlock:  uint64(chainB.len() - 1),
  1284  	})
  1285  }
  1286  
  1287  // Tests that if synchronisation is aborted due to some failure, then the progress
  1288  // origin is not updated in the next sync cycle, as it should be considered the
  1289  // continuation of the previous sync and not a new instance.
  1290  func TestFailedSyncProgress66Full(t *testing.T)  { testFailedSyncProgress(t, eth.ETH66, FullSync) }
  1291  func TestFailedSyncProgress66Fast(t *testing.T)  { testFailedSyncProgress(t, eth.ETH66, FastSync) }
  1292  func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
  1293  
  1294  func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1295  	t.Parallel()
  1296  
  1297  	tester := newTester()
  1298  	defer tester.terminate()
  1299  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1300  
  1301  	// Set a sync init hook to catch progress changes
  1302  	starting := make(chan struct{})
  1303  	progress := make(chan struct{})
  1304  
  1305  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1306  		starting <- struct{}{}
  1307  		<-progress
  1308  	}
  1309  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1310  
  1311  	// Attempt a full sync with a faulty peer
  1312  	brokenChain := chain.shorten(chain.len())
  1313  	missing := brokenChain.len() / 2
  1314  	delete(brokenChain.headerm, brokenChain.chain[missing])
  1315  	delete(brokenChain.blockm, brokenChain.chain[missing])
  1316  	delete(brokenChain.receiptm, brokenChain.chain[missing])
  1317  	tester.newPeer("faulty", protocol, brokenChain)
  1318  
  1319  	pending := new(sync.WaitGroup)
  1320  	pending.Add(1)
  1321  	go func() {
  1322  		defer pending.Done()
  1323  		if err := tester.sync("faulty", nil, mode); err == nil {
  1324  			panic("succeeded faulty synchronisation")
  1325  		}
  1326  	}()
  1327  	<-starting
  1328  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1329  		HighestBlock: uint64(brokenChain.len() - 1),
  1330  	})
  1331  	progress <- struct{}{}
  1332  	pending.Wait()
  1333  	afterFailedSync := tester.downloader.Progress()
  1334  
  1335  	// Synchronise with a good peer and check that the progress origin remind the same
  1336  	// after a failure
  1337  	tester.newPeer("valid", protocol, chain)
  1338  	pending.Add(1)
  1339  	go func() {
  1340  		defer pending.Done()
  1341  		if err := tester.sync("valid", nil, mode); err != nil {
  1342  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1343  		}
  1344  	}()
  1345  	<-starting
  1346  	checkProgress(t, tester.downloader, "completing", afterFailedSync)
  1347  
  1348  	// Check final progress after successful sync
  1349  	progress <- struct{}{}
  1350  	pending.Wait()
  1351  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1352  		CurrentBlock: uint64(chain.len() - 1),
  1353  		HighestBlock: uint64(chain.len() - 1),
  1354  	})
  1355  }
  1356  
  1357  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1358  // the progress height is successfully reduced at the next sync invocation.
  1359  func TestFakedSyncProgress66Full(t *testing.T)  { testFakedSyncProgress(t, eth.ETH66, FullSync) }
  1360  func TestFakedSyncProgress66Fast(t *testing.T)  { testFakedSyncProgress(t, eth.ETH66, FastSync) }
  1361  func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
  1362  
  1363  func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1364  	t.Parallel()
  1365  
  1366  	tester := newTester()
  1367  	defer tester.terminate()
  1368  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1369  
  1370  	// Set a sync init hook to catch progress changes
  1371  	starting := make(chan struct{})
  1372  	progress := make(chan struct{})
  1373  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1374  		starting <- struct{}{}
  1375  		<-progress
  1376  	}
  1377  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1378  
  1379  	// Create and sync with an attacker that promises a higher chain than available.
  1380  	brokenChain := chain.shorten(chain.len())
  1381  	numMissing := 5
  1382  	for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- {
  1383  		delete(brokenChain.headerm, brokenChain.chain[i])
  1384  	}
  1385  	tester.newPeer("attack", protocol, brokenChain)
  1386  
  1387  	pending := new(sync.WaitGroup)
  1388  	pending.Add(1)
  1389  	go func() {
  1390  		defer pending.Done()
  1391  		if err := tester.sync("attack", nil, mode); err == nil {
  1392  			panic("succeeded attacker synchronisation")
  1393  		}
  1394  	}()
  1395  	<-starting
  1396  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1397  		HighestBlock: uint64(brokenChain.len() - 1),
  1398  	})
  1399  	progress <- struct{}{}
  1400  	pending.Wait()
  1401  	afterFailedSync := tester.downloader.Progress()
  1402  
  1403  	// Synchronise with a good peer and check that the progress height has been reduced to
  1404  	// the true value.
  1405  	validChain := chain.shorten(chain.len() - numMissing)
  1406  	tester.newPeer("valid", protocol, validChain)
  1407  	pending.Add(1)
  1408  
  1409  	go func() {
  1410  		defer pending.Done()
  1411  		if err := tester.sync("valid", nil, mode); err != nil {
  1412  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1413  		}
  1414  	}()
  1415  	<-starting
  1416  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1417  		CurrentBlock: afterFailedSync.CurrentBlock,
  1418  		HighestBlock: uint64(validChain.len() - 1),
  1419  	})
  1420  
  1421  	// Check final progress after successful sync.
  1422  	progress <- struct{}{}
  1423  	pending.Wait()
  1424  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1425  		CurrentBlock: uint64(validChain.len() - 1),
  1426  		HighestBlock: uint64(validChain.len() - 1),
  1427  	})
  1428  }
  1429  
  1430  // This test reproduces an issue where unexpected deliveries would
  1431  // block indefinitely if they arrived at the right time.
  1432  func TestDeliverHeadersHang66Full(t *testing.T)  { testDeliverHeadersHang(t, eth.ETH66, FullSync) }
  1433  func TestDeliverHeadersHang66Fast(t *testing.T)  { testDeliverHeadersHang(t, eth.ETH66, FastSync) }
  1434  func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, LightSync) }
  1435  
  1436  func testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) {
  1437  	t.Parallel()
  1438  
  1439  	master := newTester()
  1440  	defer master.terminate()
  1441  	chain := testChainBase.shorten(15)
  1442  
  1443  	for i := 0; i < 200; i++ {
  1444  		tester := newTester()
  1445  		tester.peerDb = master.peerDb
  1446  		tester.newPeer("peer", protocol, chain)
  1447  
  1448  		// Whenever the downloader requests headers, flood it with
  1449  		// a lot of unrequested header deliveries.
  1450  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1451  			peer:   tester.downloader.peers.peers["peer"].peer,
  1452  			tester: tester,
  1453  		}
  1454  		if err := tester.sync("peer", nil, mode); err != nil {
  1455  			t.Errorf("test %d: sync failed: %v", i, err)
  1456  		}
  1457  		tester.terminate()
  1458  	}
  1459  }
  1460  
  1461  type floodingTestPeer struct {
  1462  	peer   Peer
  1463  	tester *downloadTester
  1464  }
  1465  
  1466  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1467  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1468  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1469  }
  1470  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1471  	return ftp.peer.RequestBodies(hashes)
  1472  }
  1473  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1474  	return ftp.peer.RequestReceipts(hashes)
  1475  }
  1476  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1477  	return ftp.peer.RequestNodeData(hashes)
  1478  }
  1479  
  1480  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1481  	deliveriesDone := make(chan struct{}, 500)
  1482  	for i := 0; i < cap(deliveriesDone)-1; i++ {
  1483  		peer := fmt.Sprintf("fake-peer%d", i)
  1484  		go func() {
  1485  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1486  			deliveriesDone <- struct{}{}
  1487  		}()
  1488  	}
  1489  
  1490  	// None of the extra deliveries should block.
  1491  	timeout := time.After(60 * time.Second)
  1492  	launched := false
  1493  	for i := 0; i < cap(deliveriesDone); i++ {
  1494  		select {
  1495  		case <-deliveriesDone:
  1496  			if !launched {
  1497  				// Start delivering the requested headers
  1498  				// after one of the flooding responses has arrived.
  1499  				go func() {
  1500  					ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1501  					deliveriesDone <- struct{}{}
  1502  				}()
  1503  				launched = true
  1504  			}
  1505  		case <-timeout:
  1506  			panic("blocked")
  1507  		}
  1508  	}
  1509  	return nil
  1510  }
  1511  
  1512  func TestRemoteHeaderRequestSpan(t *testing.T) {
  1513  	testCases := []struct {
  1514  		remoteHeight uint64
  1515  		localHeight  uint64
  1516  		expected     []int
  1517  	}{
  1518  		// Remote is way higher. We should ask for the remote head and go backwards
  1519  		{1500, 1000,
  1520  			[]int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
  1521  		},
  1522  		{15000, 13006,
  1523  			[]int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
  1524  		},
  1525  		// Remote is pretty close to us. We don't have to fetch as many
  1526  		{1200, 1150,
  1527  			[]int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
  1528  		},
  1529  		// Remote is equal to us (so on a fork with higher td)
  1530  		// We should get the closest couple of ancestors
  1531  		{1500, 1500,
  1532  			[]int{1497, 1499},
  1533  		},
  1534  		// We're higher than the remote! Odd
  1535  		{1000, 1500,
  1536  			[]int{997, 999},
  1537  		},
  1538  		// Check some weird edgecases that it behaves somewhat rationally
  1539  		{0, 1500,
  1540  			[]int{0, 2},
  1541  		},
  1542  		{6000000, 0,
  1543  			[]int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
  1544  		},
  1545  		{0, 0,
  1546  			[]int{0, 2},
  1547  		},
  1548  	}
  1549  	reqs := func(from, count, span int) []int {
  1550  		var r []int
  1551  		num := from
  1552  		for len(r) < count {
  1553  			r = append(r, num)
  1554  			num += span + 1
  1555  		}
  1556  		return r
  1557  	}
  1558  	for i, tt := range testCases {
  1559  		from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
  1560  		data := reqs(int(from), count, span)
  1561  
  1562  		if max != uint64(data[len(data)-1]) {
  1563  			t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
  1564  		}
  1565  		failed := false
  1566  		if len(data) != len(tt.expected) {
  1567  			failed = true
  1568  			t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
  1569  		} else {
  1570  			for j, n := range data {
  1571  				if n != tt.expected[j] {
  1572  					failed = true
  1573  					break
  1574  				}
  1575  			}
  1576  		}
  1577  		if failed {
  1578  			res := strings.ReplaceAll(fmt.Sprint(data), " ", ",")
  1579  			exp := strings.ReplaceAll(fmt.Sprint(tt.expected), " ", ",")
  1580  			t.Logf("got: %v\n", res)
  1581  			t.Logf("exp: %v\n", exp)
  1582  			t.Errorf("test %d: wrong values", i)
  1583  		}
  1584  	}
  1585  }
  1586  
  1587  // Tests that peers below a pre-configured checkpoint block are prevented from
  1588  // being fast-synced from, avoiding potential cheap eclipse attacks.
  1589  func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) }
  1590  func TestCheckpointEnforcement66Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FastSync) }
  1591  func TestCheckpointEnforcement66Light(t *testing.T) {
  1592  	testCheckpointEnforcement(t, eth.ETH66, LightSync)
  1593  }
  1594  
  1595  func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) {
  1596  	t.Parallel()
  1597  
  1598  	// Create a new tester with a particular hard coded checkpoint block
  1599  	tester := newTester()
  1600  	defer tester.terminate()
  1601  
  1602  	tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
  1603  	chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)
  1604  
  1605  	// Attempt to sync with the peer and validate the result
  1606  	tester.newPeer("peer", protocol, chain)
  1607  
  1608  	var expect error
  1609  	if mode == FastSync || mode == LightSync {
  1610  		expect = errUnsyncedPeer
  1611  	}
  1612  	if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) {
  1613  		t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
  1614  	}
  1615  	if mode == FastSync || mode == LightSync {
  1616  		assertOwnChain(t, tester, 1)
  1617  	} else {
  1618  		assertOwnChain(t, tester, chain.len())
  1619  	}
  1620  }