github.com/arjunbeliever/ignite@v0.0.0-20220406110515-46bbbbec2587/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"strings"
    24  	"sync"
    25  	"sync/atomic"
    26  	"testing"
    27  	"time"
    28  
    29  	ethereum "github.com/arjunbeliever/ignite"
    30  	"github.com/arjunbeliever/ignite/common"
    31  	"github.com/arjunbeliever/ignite/core/rawdb"
    32  	"github.com/arjunbeliever/ignite/core/state/snapshot"
    33  	"github.com/arjunbeliever/ignite/core/types"
    34  	"github.com/arjunbeliever/ignite/eth/protocols/eth"
    35  	"github.com/arjunbeliever/ignite/ethdb"
    36  	"github.com/arjunbeliever/ignite/event"
    37  	"github.com/arjunbeliever/ignite/trie"
    38  )
    39  
    40  // Reduce some of the parameters to make the tester faster.
    41  func init() {
    42  	fullMaxForkAncestry = 10000
    43  	lightMaxForkAncestry = 10000
    44  	blockCacheMaxItems = 1024
    45  	fsHeaderContCheck = 500 * time.Millisecond
    46  }
    47  
    48  // downloadTester is a test simulator for mocking out local block chain.
    49  type downloadTester struct {
    50  	downloader *Downloader
    51  
    52  	genesis *types.Block   // Genesis blocks used by the tester and peers
    53  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    54  	peerDb  ethdb.Database // Database of the peers containing all data
    55  	peers   map[string]*downloadTesterPeer
    56  
    57  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    58  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    59  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    60  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    61  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    62  
    63  	ancientHeaders  map[common.Hash]*types.Header  // Ancient headers belonging to the tester
    64  	ancientBlocks   map[common.Hash]*types.Block   // Ancient blocks belonging to the tester
    65  	ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester
    66  	ancientChainTd  map[common.Hash]*big.Int       // Ancient total difficulties of the blocks in the local chain
    67  
    68  	lock sync.RWMutex
    69  }
    70  
    71  // newTester creates a new downloader test mocker.
    72  func newTester() *downloadTester {
    73  	tester := &downloadTester{
    74  		genesis:     testGenesis,
    75  		peerDb:      testDB,
    76  		peers:       make(map[string]*downloadTesterPeer),
    77  		ownHashes:   []common.Hash{testGenesis.Hash()},
    78  		ownHeaders:  map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
    79  		ownBlocks:   map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
    80  		ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
    81  		ownChainTd:  map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
    82  
    83  		// Initialize ancient store with test genesis block
    84  		ancientHeaders:  map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
    85  		ancientBlocks:   map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
    86  		ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
    87  		ancientChainTd:  map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
    88  	}
    89  	tester.stateDb = rawdb.NewMemoryDatabase()
    90  	tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
    91  
    92  	tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer)
    93  	return tester
    94  }
    95  
    96  // terminate aborts any operations on the embedded downloader and releases all
    97  // held resources.
    98  func (dl *downloadTester) terminate() {
    99  	dl.downloader.Terminate()
   100  }
   101  
   102  // sync starts synchronizing with a remote peer, blocking until it completes.
   103  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   104  	dl.lock.RLock()
   105  	hash := dl.peers[id].chain.headBlock().Hash()
   106  	// If no particular TD was requested, load from the peer's blockchain
   107  	if td == nil {
   108  		td = dl.peers[id].chain.td(hash)
   109  	}
   110  	dl.lock.RUnlock()
   111  
   112  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   113  	err := dl.downloader.synchronise(id, hash, td, mode)
   114  	select {
   115  	case <-dl.downloader.cancelCh:
   116  		// Ok, downloader fully cancelled after sync cycle
   117  	default:
   118  		// Downloader is still accepting packets, can block a peer up
   119  		panic("downloader active post sync cycle") // panic will be caught by tester
   120  	}
   121  	return err
   122  }
   123  
   124  // HasHeader checks if a header is present in the testers canonical chain.
   125  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   126  	return dl.GetHeaderByHash(hash) != nil
   127  }
   128  
   129  // HasBlock checks if a block is present in the testers canonical chain.
   130  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   131  	return dl.GetBlockByHash(hash) != nil
   132  }
   133  
   134  // HasFastBlock checks if a block is present in the testers canonical chain.
   135  func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
   136  	dl.lock.RLock()
   137  	defer dl.lock.RUnlock()
   138  
   139  	if _, ok := dl.ancientReceipts[hash]; ok {
   140  		return true
   141  	}
   142  	_, ok := dl.ownReceipts[hash]
   143  	return ok
   144  }
   145  
   146  // GetHeader retrieves a header from the testers canonical chain.
   147  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   148  	dl.lock.RLock()
   149  	defer dl.lock.RUnlock()
   150  	return dl.getHeaderByHash(hash)
   151  }
   152  
   153  // getHeaderByHash returns the header if found either within ancients or own blocks)
   154  // This method assumes that the caller holds at least the read-lock (dl.lock)
   155  func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header {
   156  	header := dl.ancientHeaders[hash]
   157  	if header != nil {
   158  		return header
   159  	}
   160  	return dl.ownHeaders[hash]
   161  }
   162  
   163  // GetBlock retrieves a block from the testers canonical chain.
   164  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   165  	dl.lock.RLock()
   166  	defer dl.lock.RUnlock()
   167  
   168  	block := dl.ancientBlocks[hash]
   169  	if block != nil {
   170  		return block
   171  	}
   172  	return dl.ownBlocks[hash]
   173  }
   174  
   175  // CurrentHeader retrieves the current head header from the canonical chain.
   176  func (dl *downloadTester) CurrentHeader() *types.Header {
   177  	dl.lock.RLock()
   178  	defer dl.lock.RUnlock()
   179  
   180  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   181  		if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil {
   182  			return header
   183  		}
   184  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   185  			return header
   186  		}
   187  	}
   188  	return dl.genesis.Header()
   189  }
   190  
   191  // CurrentBlock retrieves the current head block from the canonical chain.
   192  func (dl *downloadTester) CurrentBlock() *types.Block {
   193  	dl.lock.RLock()
   194  	defer dl.lock.RUnlock()
   195  
   196  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   197  		if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
   198  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   199  				return block
   200  			}
   201  			return block
   202  		}
   203  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   204  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   205  				return block
   206  			}
   207  		}
   208  	}
   209  	return dl.genesis
   210  }
   211  
   212  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   213  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   214  	dl.lock.RLock()
   215  	defer dl.lock.RUnlock()
   216  
   217  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   218  		if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
   219  			return block
   220  		}
   221  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   222  			return block
   223  		}
   224  	}
   225  	return dl.genesis
   226  }
   227  
   228  // FastSyncCommitHead manually sets the head block to a given hash.
   229  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   230  	// For now only check that the state trie is correct
   231  	if block := dl.GetBlockByHash(hash); block != nil {
   232  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb))
   233  		return err
   234  	}
   235  	return fmt.Errorf("non existent block: %x", hash[:4])
   236  }
   237  
   238  // GetTd retrieves the block's total difficulty from the canonical chain.
   239  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   240  	dl.lock.RLock()
   241  	defer dl.lock.RUnlock()
   242  
   243  	return dl.getTd(hash)
   244  }
   245  
   246  // getTd retrieves the block's total difficulty if found either within
   247  // ancients or own blocks).
   248  // This method assumes that the caller holds at least the read-lock (dl.lock)
   249  func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
   250  	if td := dl.ancientChainTd[hash]; td != nil {
   251  		return td
   252  	}
   253  	return dl.ownChainTd[hash]
   254  }
   255  
   256  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   257  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) {
   258  	dl.lock.Lock()
   259  	defer dl.lock.Unlock()
   260  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   261  	if dl.getHeaderByHash(headers[0].ParentHash) == nil {
   262  		return 0, fmt.Errorf("InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number)
   263  	}
   264  	var hashes []common.Hash
   265  	for i := 1; i < len(headers); i++ {
   266  		hash := headers[i-1].Hash()
   267  		if headers[i].ParentHash != headers[i-1].Hash() {
   268  			return i, fmt.Errorf("non-contiguous import at position %d", i)
   269  		}
   270  		hashes = append(hashes, hash)
   271  	}
   272  	hashes = append(hashes, headers[len(headers)-1].Hash())
   273  	// Do a full insert if pre-checks passed
   274  	for i, header := range headers {
   275  		hash := hashes[i]
   276  		if dl.getHeaderByHash(hash) != nil {
   277  			continue
   278  		}
   279  		if dl.getHeaderByHash(header.ParentHash) == nil {
   280  			// This _should_ be impossible, due to precheck and induction
   281  			return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i)
   282  		}
   283  		dl.ownHashes = append(dl.ownHashes, hash)
   284  		dl.ownHeaders[hash] = header
   285  
   286  		td := dl.getTd(header.ParentHash)
   287  		dl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty)
   288  	}
   289  	return len(headers), nil
   290  }
   291  
   292  // InsertChain injects a new batch of blocks into the simulated chain.
   293  func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
   294  	dl.lock.Lock()
   295  	defer dl.lock.Unlock()
   296  	for i, block := range blocks {
   297  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   298  			return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks))
   299  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   300  			return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err)
   301  		}
   302  		if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil {
   303  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   304  			dl.ownHeaders[block.Hash()] = block.Header()
   305  		}
   306  		dl.ownBlocks[block.Hash()] = block
   307  		dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
   308  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   309  		td := dl.getTd(block.ParentHash())
   310  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty())
   311  	}
   312  	return len(blocks), nil
   313  }
   314  
   315  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   316  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) {
   317  	dl.lock.Lock()
   318  	defer dl.lock.Unlock()
   319  
   320  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   321  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   322  			return i, errors.New("unknown owner")
   323  		}
   324  		if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok {
   325  			if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   326  				return i, errors.New("InsertReceiptChain: unknown parent")
   327  			}
   328  		}
   329  		if blocks[i].NumberU64() <= ancientLimit {
   330  			dl.ancientBlocks[blocks[i].Hash()] = blocks[i]
   331  			dl.ancientReceipts[blocks[i].Hash()] = receipts[i]
   332  
   333  			// Migrate from active db to ancient db
   334  			dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header()
   335  			dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty())
   336  			delete(dl.ownHeaders, blocks[i].Hash())
   337  			delete(dl.ownChainTd, blocks[i].Hash())
   338  		} else {
   339  			dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   340  			dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   341  		}
   342  	}
   343  	return len(blocks), nil
   344  }
   345  
   346  // SetHead rewinds the local chain to a new head.
   347  func (dl *downloadTester) SetHead(head uint64) error {
   348  	dl.lock.Lock()
   349  	defer dl.lock.Unlock()
   350  
   351  	// Find the hash of the head to reset to
   352  	var hash common.Hash
   353  	for h, header := range dl.ownHeaders {
   354  		if header.Number.Uint64() == head {
   355  			hash = h
   356  		}
   357  	}
   358  	for h, header := range dl.ancientHeaders {
   359  		if header.Number.Uint64() == head {
   360  			hash = h
   361  		}
   362  	}
   363  	if hash == (common.Hash{}) {
   364  		return fmt.Errorf("unknown head to set: %d", head)
   365  	}
   366  	// Find the offset in the header chain
   367  	var offset int
   368  	for o, h := range dl.ownHashes {
   369  		if h == hash {
   370  			offset = o
   371  			break
   372  		}
   373  	}
   374  	// Remove all the hashes and associated data afterwards
   375  	for i := offset + 1; i < len(dl.ownHashes); i++ {
   376  		delete(dl.ownChainTd, dl.ownHashes[i])
   377  		delete(dl.ownHeaders, dl.ownHashes[i])
   378  		delete(dl.ownReceipts, dl.ownHashes[i])
   379  		delete(dl.ownBlocks, dl.ownHashes[i])
   380  
   381  		delete(dl.ancientChainTd, dl.ownHashes[i])
   382  		delete(dl.ancientHeaders, dl.ownHashes[i])
   383  		delete(dl.ancientReceipts, dl.ownHashes[i])
   384  		delete(dl.ancientBlocks, dl.ownHashes[i])
   385  	}
   386  	dl.ownHashes = dl.ownHashes[:offset+1]
   387  	return nil
   388  }
   389  
   390  // Rollback removes some recently added elements from the chain.
   391  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   392  }
   393  
   394  // newPeer registers a new block download source into the downloader.
   395  func (dl *downloadTester) newPeer(id string, version uint, chain *testChain) error {
   396  	dl.lock.Lock()
   397  	defer dl.lock.Unlock()
   398  
   399  	peer := &downloadTesterPeer{dl: dl, id: id, chain: chain}
   400  	dl.peers[id] = peer
   401  	return dl.downloader.RegisterPeer(id, version, peer)
   402  }
   403  
   404  // dropPeer simulates a hard peer removal from the connection pool.
   405  func (dl *downloadTester) dropPeer(id string) {
   406  	dl.lock.Lock()
   407  	defer dl.lock.Unlock()
   408  
   409  	delete(dl.peers, id)
   410  	dl.downloader.UnregisterPeer(id)
   411  }
   412  
   413  // Snapshots implements the BlockChain interface for the downloader, but is a noop.
   414  func (dl *downloadTester) Snapshots() *snapshot.Tree {
   415  	return nil
   416  }
   417  
   418  type downloadTesterPeer struct {
   419  	dl            *downloadTester
   420  	id            string
   421  	chain         *testChain
   422  	missingStates map[common.Hash]bool // State entries that fast sync should not return
   423  }
   424  
   425  // Head constructs a function to retrieve a peer's current head hash
   426  // and total difficulty.
   427  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   428  	b := dlp.chain.headBlock()
   429  	return b.Hash(), dlp.chain.td(b.Hash())
   430  }
   431  
   432  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   433  // origin; associated with a particular peer in the download tester. The returned
   434  // function can be used to retrieve batches of headers from the particular peer.
   435  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   436  	result := dlp.chain.headersByHash(origin, amount, skip, reverse)
   437  	go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   438  	return nil
   439  }
   440  
   441  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   442  // origin; associated with a particular peer in the download tester. The returned
   443  // function can be used to retrieve batches of headers from the particular peer.
   444  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   445  	result := dlp.chain.headersByNumber(origin, amount, skip, reverse)
   446  	go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   447  	return nil
   448  }
   449  
   450  // RequestBodies constructs a getBlockBodies method associated with a particular
   451  // peer in the download tester. The returned function can be used to retrieve
   452  // batches of block bodies from the particularly requested peer.
   453  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   454  	txs, uncles := dlp.chain.bodies(hashes)
   455  	go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles)
   456  	return nil
   457  }
   458  
   459  // RequestReceipts constructs a getReceipts method associated with a particular
   460  // peer in the download tester. The returned function can be used to retrieve
   461  // batches of block receipts from the particularly requested peer.
   462  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   463  	receipts := dlp.chain.receipts(hashes)
   464  	go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts)
   465  	return nil
   466  }
   467  
   468  // RequestNodeData constructs a getNodeData method associated with a particular
   469  // peer in the download tester. The returned function can be used to retrieve
   470  // batches of node state data from the particularly requested peer.
   471  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   472  	dlp.dl.lock.RLock()
   473  	defer dlp.dl.lock.RUnlock()
   474  
   475  	results := make([][]byte, 0, len(hashes))
   476  	for _, hash := range hashes {
   477  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   478  			if !dlp.missingStates[hash] {
   479  				results = append(results, data)
   480  			}
   481  		}
   482  	}
   483  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   484  	return nil
   485  }
   486  
   487  // assertOwnChain checks if the local chain contains the correct number of items
   488  // of the various chain components.
   489  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   490  	// Mark this method as a helper to report errors at callsite, not in here
   491  	t.Helper()
   492  
   493  	assertOwnForkedChain(t, tester, 1, []int{length})
   494  }
   495  
   496  // assertOwnForkedChain checks if the local forked chain contains the correct
   497  // number of items of the various chain components.
   498  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   499  	// Mark this method as a helper to report errors at callsite, not in here
   500  	t.Helper()
   501  
   502  	// Initialize the counters for the first fork
   503  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]
   504  
   505  	// Update the counters for each subsequent fork
   506  	for _, length := range lengths[1:] {
   507  		headers += length - common
   508  		blocks += length - common
   509  		receipts += length - common
   510  	}
   511  	if tester.downloader.getMode() == LightSync {
   512  		blocks, receipts = 1, 1
   513  	}
   514  	if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers {
   515  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   516  	}
   517  	if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks {
   518  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   519  	}
   520  	if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts {
   521  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   522  	}
   523  }
   524  
   525  func TestCanonicalSynchronisation66Full(t *testing.T)  { testCanonSync(t, eth.ETH66, FullSync) }
   526  func TestCanonicalSynchronisation66Fast(t *testing.T)  { testCanonSync(t, eth.ETH66, FastSync) }
   527  func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
   528  
   529  func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
   530  	t.Parallel()
   531  
   532  	tester := newTester()
   533  	defer tester.terminate()
   534  
   535  	// Create a small enough block chain to download
   536  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   537  	tester.newPeer("peer", protocol, chain)
   538  
   539  	// Synchronise with the peer and make sure all relevant data was retrieved
   540  	if err := tester.sync("peer", nil, mode); err != nil {
   541  		t.Fatalf("failed to synchronise blocks: %v", err)
   542  	}
   543  	assertOwnChain(t, tester, chain.len())
   544  }
   545  
   546  // Tests that if a large batch of blocks are being downloaded, it is throttled
   547  // until the cached blocks are retrieved.
   548  func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
   549  func TestThrottling66Fast(t *testing.T) { testThrottling(t, eth.ETH66, FastSync) }
   550  
   551  func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
   552  	t.Parallel()
   553  	tester := newTester()
   554  
   555  	// Create a long block chain to download and the tester
   556  	targetBlocks := testChainBase.len() - 1
   557  	tester.newPeer("peer", protocol, testChainBase)
   558  
   559  	// Wrap the importer to allow stepping
   560  	blocked, proceed := uint32(0), make(chan struct{})
   561  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   562  		atomic.StoreUint32(&blocked, uint32(len(results)))
   563  		<-proceed
   564  	}
   565  	// Start a synchronisation concurrently
   566  	errc := make(chan error, 1)
   567  	go func() {
   568  		errc <- tester.sync("peer", nil, mode)
   569  	}()
   570  	// Iteratively take some blocks, always checking the retrieval count
   571  	for {
   572  		// Check the retrieval count synchronously (! reason for this ugly block)
   573  		tester.lock.RLock()
   574  		retrieved := len(tester.ownBlocks)
   575  		tester.lock.RUnlock()
   576  		if retrieved >= targetBlocks+1 {
   577  			break
   578  		}
   579  		// Wait a bit for sync to throttle itself
   580  		var cached, frozen int
   581  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   582  			time.Sleep(25 * time.Millisecond)
   583  
   584  			tester.lock.Lock()
   585  			tester.downloader.queue.lock.Lock()
   586  			tester.downloader.queue.resultCache.lock.Lock()
   587  			{
   588  				cached = tester.downloader.queue.resultCache.countCompleted()
   589  				frozen = int(atomic.LoadUint32(&blocked))
   590  				retrieved = len(tester.ownBlocks)
   591  			}
   592  			tester.downloader.queue.resultCache.lock.Unlock()
   593  			tester.downloader.queue.lock.Unlock()
   594  			tester.lock.Unlock()
   595  
   596  			if cached == blockCacheMaxItems ||
   597  				cached == blockCacheMaxItems-reorgProtHeaderDelay ||
   598  				retrieved+cached+frozen == targetBlocks+1 ||
   599  				retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
   600  				break
   601  			}
   602  		}
   603  		// Make sure we filled up the cache, then exhaust it
   604  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   605  		tester.lock.RLock()
   606  		retrieved = len(tester.ownBlocks)
   607  		tester.lock.RUnlock()
   608  		if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
   609  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
   610  		}
   611  
   612  		// Permit the blocked blocks to import
   613  		if atomic.LoadUint32(&blocked) > 0 {
   614  			atomic.StoreUint32(&blocked, uint32(0))
   615  			proceed <- struct{}{}
   616  		}
   617  	}
   618  	// Check that we haven't pulled more blocks than available
   619  	assertOwnChain(t, tester, targetBlocks+1)
   620  	if err := <-errc; err != nil {
   621  		t.Fatalf("block synchronization failed: %v", err)
   622  	}
   623  	tester.terminate()
   624  
   625  }
   626  
   627  // Tests that simple synchronization against a forked chain works correctly. In
   628  // this test common ancestor lookup should *not* be short circuited, and a full
   629  // binary search should be executed.
   630  func TestForkedSync66Full(t *testing.T)  { testForkedSync(t, eth.ETH66, FullSync) }
   631  func TestForkedSync66Fast(t *testing.T)  { testForkedSync(t, eth.ETH66, FastSync) }
   632  func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
   633  
   634  func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   635  	t.Parallel()
   636  
   637  	tester := newTester()
   638  	defer tester.terminate()
   639  
   640  	chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
   641  	chainB := testChainForkLightB.shorten(testChainBase.len() + 80)
   642  	tester.newPeer("fork A", protocol, chainA)
   643  	tester.newPeer("fork B", protocol, chainB)
   644  	// Synchronise with the peer and make sure all blocks were retrieved
   645  	if err := tester.sync("fork A", nil, mode); err != nil {
   646  		t.Fatalf("failed to synchronise blocks: %v", err)
   647  	}
   648  	assertOwnChain(t, tester, chainA.len())
   649  
   650  	// Synchronise with the second peer and make sure that fork is pulled too
   651  	if err := tester.sync("fork B", nil, mode); err != nil {
   652  		t.Fatalf("failed to synchronise blocks: %v", err)
   653  	}
   654  	assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
   655  }
   656  
   657  // Tests that synchronising against a much shorter but much heavyer fork works
   658  // corrently and is not dropped.
   659  func TestHeavyForkedSync66Full(t *testing.T)  { testHeavyForkedSync(t, eth.ETH66, FullSync) }
   660  func TestHeavyForkedSync66Fast(t *testing.T)  { testHeavyForkedSync(t, eth.ETH66, FastSync) }
   661  func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
   662  
   663  func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   664  	t.Parallel()
   665  
   666  	tester := newTester()
   667  	defer tester.terminate()
   668  
   669  	chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
   670  	chainB := testChainForkHeavy.shorten(testChainBase.len() + 80)
   671  	tester.newPeer("light", protocol, chainA)
   672  	tester.newPeer("heavy", protocol, chainB)
   673  
   674  	// Synchronise with the peer and make sure all blocks were retrieved
   675  	if err := tester.sync("light", nil, mode); err != nil {
   676  		t.Fatalf("failed to synchronise blocks: %v", err)
   677  	}
   678  	assertOwnChain(t, tester, chainA.len())
   679  
   680  	// Synchronise with the second peer and make sure that fork is pulled too
   681  	if err := tester.sync("heavy", nil, mode); err != nil {
   682  		t.Fatalf("failed to synchronise blocks: %v", err)
   683  	}
   684  	assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
   685  }
   686  
   687  // Tests that chain forks are contained within a certain interval of the current
   688  // chain head, ensuring that malicious peers cannot waste resources by feeding
   689  // long dead chains.
   690  func TestBoundedForkedSync66Full(t *testing.T)  { testBoundedForkedSync(t, eth.ETH66, FullSync) }
   691  func TestBoundedForkedSync66Fast(t *testing.T)  { testBoundedForkedSync(t, eth.ETH66, FastSync) }
   692  func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
   693  
   694  func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   695  	t.Parallel()
   696  
   697  	tester := newTester()
   698  	defer tester.terminate()
   699  
   700  	chainA := testChainForkLightA
   701  	chainB := testChainForkLightB
   702  	tester.newPeer("original", protocol, chainA)
   703  	tester.newPeer("rewriter", protocol, chainB)
   704  
   705  	// Synchronise with the peer and make sure all blocks were retrieved
   706  	if err := tester.sync("original", nil, mode); err != nil {
   707  		t.Fatalf("failed to synchronise blocks: %v", err)
   708  	}
   709  	assertOwnChain(t, tester, chainA.len())
   710  
   711  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   712  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   713  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   714  	}
   715  }
   716  
   717  // Tests that chain forks are contained within a certain interval of the current
   718  // chain head for short but heavy forks too. These are a bit special because they
   719  // take different ancestor lookup paths.
   720  func TestBoundedHeavyForkedSync66Full(t *testing.T) {
   721  	testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
   722  }
   723  func TestBoundedHeavyForkedSync66Fast(t *testing.T) {
   724  	testBoundedHeavyForkedSync(t, eth.ETH66, FastSync)
   725  }
   726  func TestBoundedHeavyForkedSync66Light(t *testing.T) {
   727  	testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
   728  }
   729  
   730  func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   731  	t.Parallel()
   732  	tester := newTester()
   733  
   734  	// Create a long enough forked chain
   735  	chainA := testChainForkLightA
   736  	chainB := testChainForkHeavy
   737  	tester.newPeer("original", protocol, chainA)
   738  
   739  	// Synchronise with the peer and make sure all blocks were retrieved
   740  	if err := tester.sync("original", nil, mode); err != nil {
   741  		t.Fatalf("failed to synchronise blocks: %v", err)
   742  	}
   743  	assertOwnChain(t, tester, chainA.len())
   744  
   745  	tester.newPeer("heavy-rewriter", protocol, chainB)
   746  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   747  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   748  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   749  	}
   750  	tester.terminate()
   751  }
   752  
   753  // Tests that an inactive downloader will not accept incoming block headers,
   754  // bodies and receipts.
   755  func TestInactiveDownloader63(t *testing.T) {
   756  	t.Parallel()
   757  
   758  	tester := newTester()
   759  	defer tester.terminate()
   760  
   761  	// Check that neither block headers nor bodies are accepted
   762  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   763  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   764  	}
   765  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   766  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   767  	}
   768  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   769  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   770  	}
   771  }
   772  
   773  // Tests that a canceled download wipes all previously accumulated state.
   774  func TestCancel66Full(t *testing.T)  { testCancel(t, eth.ETH66, FullSync) }
   775  func TestCancel66Fast(t *testing.T)  { testCancel(t, eth.ETH66, FastSync) }
   776  func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
   777  
   778  func testCancel(t *testing.T, protocol uint, mode SyncMode) {
   779  	t.Parallel()
   780  
   781  	tester := newTester()
   782  	defer tester.terminate()
   783  
   784  	chain := testChainBase.shorten(MaxHeaderFetch)
   785  	tester.newPeer("peer", protocol, chain)
   786  
   787  	// Make sure canceling works with a pristine downloader
   788  	tester.downloader.Cancel()
   789  	if !tester.downloader.queue.Idle() {
   790  		t.Errorf("download queue not idle")
   791  	}
   792  	// Synchronise with the peer, but cancel afterwards
   793  	if err := tester.sync("peer", nil, mode); err != nil {
   794  		t.Fatalf("failed to synchronise blocks: %v", err)
   795  	}
   796  	tester.downloader.Cancel()
   797  	if !tester.downloader.queue.Idle() {
   798  		t.Errorf("download queue not idle")
   799  	}
   800  }
   801  
   802  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   803  func TestMultiSynchronisation66Full(t *testing.T)  { testMultiSynchronisation(t, eth.ETH66, FullSync) }
   804  func TestMultiSynchronisation66Fast(t *testing.T)  { testMultiSynchronisation(t, eth.ETH66, FastSync) }
   805  func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
   806  
   807  func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
   808  	t.Parallel()
   809  
   810  	tester := newTester()
   811  	defer tester.terminate()
   812  
   813  	// Create various peers with various parts of the chain
   814  	targetPeers := 8
   815  	chain := testChainBase.shorten(targetPeers * 100)
   816  
   817  	for i := 0; i < targetPeers; i++ {
   818  		id := fmt.Sprintf("peer #%d", i)
   819  		tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1)))
   820  	}
   821  	if err := tester.sync("peer #0", nil, mode); err != nil {
   822  		t.Fatalf("failed to synchronise blocks: %v", err)
   823  	}
   824  	assertOwnChain(t, tester, chain.len())
   825  }
   826  
   827  // Tests that synchronisations behave well in multi-version protocol environments
   828  // and not wreak havoc on other nodes in the network.
   829  func TestMultiProtoSynchronisation66Full(t *testing.T)  { testMultiProtoSync(t, eth.ETH66, FullSync) }
   830  func TestMultiProtoSynchronisation66Fast(t *testing.T)  { testMultiProtoSync(t, eth.ETH66, FastSync) }
   831  func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
   832  
   833  func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
   834  	t.Parallel()
   835  
   836  	tester := newTester()
   837  	defer tester.terminate()
   838  
   839  	// Create a small enough block chain to download
   840  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   841  
   842  	// Create peers of every type
   843  	tester.newPeer("peer 66", eth.ETH66, chain)
   844  	//tester.newPeer("peer 65", eth.ETH67, chain)
   845  
   846  	// Synchronise with the requested peer and make sure all blocks were retrieved
   847  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
   848  		t.Fatalf("failed to synchronise blocks: %v", err)
   849  	}
   850  	assertOwnChain(t, tester, chain.len())
   851  
   852  	// Check that no peers have been dropped off
   853  	for _, version := range []int{66} {
   854  		peer := fmt.Sprintf("peer %d", version)
   855  		if _, ok := tester.peers[peer]; !ok {
   856  			t.Errorf("%s dropped", peer)
   857  		}
   858  	}
   859  }
   860  
   861  // Tests that if a block is empty (e.g. header only), no body request should be
   862  // made, and instead the header should be assembled into a whole block in itself.
   863  func TestEmptyShortCircuit66Full(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
   864  func TestEmptyShortCircuit66Fast(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH66, FastSync) }
   865  func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
   866  
   867  func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
   868  	t.Parallel()
   869  
   870  	tester := newTester()
   871  	defer tester.terminate()
   872  
   873  	// Create a block chain to download
   874  	chain := testChainBase
   875  	tester.newPeer("peer", protocol, chain)
   876  
   877  	// Instrument the downloader to signal body requests
   878  	bodiesHave, receiptsHave := int32(0), int32(0)
   879  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
   880  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
   881  	}
   882  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
   883  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
   884  	}
   885  	// Synchronise with the peer and make sure all blocks were retrieved
   886  	if err := tester.sync("peer", nil, mode); err != nil {
   887  		t.Fatalf("failed to synchronise blocks: %v", err)
   888  	}
   889  	assertOwnChain(t, tester, chain.len())
   890  
   891  	// Validate the number of block bodies that should have been requested
   892  	bodiesNeeded, receiptsNeeded := 0, 0
   893  	for _, block := range chain.blockm {
   894  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
   895  			bodiesNeeded++
   896  		}
   897  	}
   898  	for _, receipt := range chain.receiptm {
   899  		if mode == FastSync && len(receipt) > 0 {
   900  			receiptsNeeded++
   901  		}
   902  	}
   903  	if int(bodiesHave) != bodiesNeeded {
   904  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
   905  	}
   906  	if int(receiptsHave) != receiptsNeeded {
   907  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
   908  	}
   909  }
   910  
   911  // Tests that headers are enqueued continuously, preventing malicious nodes from
   912  // stalling the downloader by feeding gapped header chains.
   913  func TestMissingHeaderAttack66Full(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
   914  func TestMissingHeaderAttack66Fast(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH66, FastSync) }
   915  func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
   916  
   917  func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
   918  	t.Parallel()
   919  
   920  	tester := newTester()
   921  	defer tester.terminate()
   922  
   923  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   924  	brokenChain := chain.shorten(chain.len())
   925  	delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2])
   926  	tester.newPeer("attack", protocol, brokenChain)
   927  
   928  	if err := tester.sync("attack", nil, mode); err == nil {
   929  		t.Fatalf("succeeded attacker synchronisation")
   930  	}
   931  	// Synchronise with the valid peer and make sure sync succeeds
   932  	tester.newPeer("valid", protocol, chain)
   933  	if err := tester.sync("valid", nil, mode); err != nil {
   934  		t.Fatalf("failed to synchronise blocks: %v", err)
   935  	}
   936  	assertOwnChain(t, tester, chain.len())
   937  }
   938  
   939  // Tests that if requested headers are shifted (i.e. first is missing), the queue
   940  // detects the invalid numbering.
   941  func TestShiftedHeaderAttack66Full(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
   942  func TestShiftedHeaderAttack66Fast(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH66, FastSync) }
   943  func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
   944  
   945  func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
   946  	t.Parallel()
   947  
   948  	tester := newTester()
   949  	defer tester.terminate()
   950  
   951  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   952  
   953  	// Attempt a full sync with an attacker feeding shifted headers
   954  	brokenChain := chain.shorten(chain.len())
   955  	delete(brokenChain.headerm, brokenChain.chain[1])
   956  	delete(brokenChain.blockm, brokenChain.chain[1])
   957  	delete(brokenChain.receiptm, brokenChain.chain[1])
   958  	tester.newPeer("attack", protocol, brokenChain)
   959  	if err := tester.sync("attack", nil, mode); err == nil {
   960  		t.Fatalf("succeeded attacker synchronisation")
   961  	}
   962  
   963  	// Synchronise with the valid peer and make sure sync succeeds
   964  	tester.newPeer("valid", protocol, chain)
   965  	if err := tester.sync("valid", nil, mode); err != nil {
   966  		t.Fatalf("failed to synchronise blocks: %v", err)
   967  	}
   968  	assertOwnChain(t, tester, chain.len())
   969  }
   970  
   971  // Tests that upon detecting an invalid header, the recent ones are rolled back
   972  // for various failure scenarios. Afterwards a full sync is attempted to make
   973  // sure no state was corrupted.
   974  func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, FastSync) }
   975  
   976  func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
   977  	t.Parallel()
   978  
   979  	tester := newTester()
   980  
   981  	// Create a small enough block chain to download
   982  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
   983  	chain := testChainBase.shorten(targetBlocks)
   984  
   985  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
   986  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
   987  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
   988  	fastAttackChain := chain.shorten(chain.len())
   989  	delete(fastAttackChain.headerm, fastAttackChain.chain[missing])
   990  	tester.newPeer("fast-attack", protocol, fastAttackChain)
   991  
   992  	if err := tester.sync("fast-attack", nil, mode); err == nil {
   993  		t.Fatalf("succeeded fast attacker synchronisation")
   994  	}
   995  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
   996  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
   997  	}
   998  
   999  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1000  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1001  	// rolled back, and also the pivot point being reverted to a non-block status.
  1002  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1003  	blockAttackChain := chain.shorten(chain.len())
  1004  	delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in
  1005  	delete(blockAttackChain.headerm, blockAttackChain.chain[missing])
  1006  	tester.newPeer("block-attack", protocol, blockAttackChain)
  1007  
  1008  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1009  		t.Fatalf("succeeded block attacker synchronisation")
  1010  	}
  1011  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1012  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1013  	}
  1014  	if mode == FastSync {
  1015  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1016  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1017  		}
  1018  	}
  1019  
  1020  	// Attempt to sync with an attacker that withholds promised blocks after the
  1021  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1022  	// but already imported pivot block.
  1023  	withholdAttackChain := chain.shorten(chain.len())
  1024  	tester.newPeer("withhold-attack", protocol, withholdAttackChain)
  1025  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1026  		for i := missing; i < withholdAttackChain.len(); i++ {
  1027  			delete(withholdAttackChain.headerm, withholdAttackChain.chain[i])
  1028  		}
  1029  		tester.downloader.syncInitHook = nil
  1030  	}
  1031  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1032  		t.Fatalf("succeeded withholding attacker synchronisation")
  1033  	}
  1034  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1035  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1036  	}
  1037  	if mode == FastSync {
  1038  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1039  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1040  		}
  1041  	}
  1042  
  1043  	// synchronise with the valid peer and make sure sync succeeds. Since the last rollback
  1044  	// should also disable fast syncing for this process, verify that we did a fresh full
  1045  	// sync. Note, we can't assert anything about the receipts since we won't purge the
  1046  	// database of them, hence we can't use assertOwnChain.
  1047  	tester.newPeer("valid", protocol, chain)
  1048  	if err := tester.sync("valid", nil, mode); err != nil {
  1049  		t.Fatalf("failed to synchronise blocks: %v", err)
  1050  	}
  1051  	if hs := len(tester.ownHeaders); hs != chain.len() {
  1052  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len())
  1053  	}
  1054  	if mode != LightSync {
  1055  		if bs := len(tester.ownBlocks); bs != chain.len() {
  1056  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len())
  1057  		}
  1058  	}
  1059  	tester.terminate()
  1060  }
  1061  
  1062  // Tests that a peer advertising a high TD doesn't get to stall the downloader
  1063  // afterwards by not sending any useful hashes.
  1064  func TestHighTDStarvationAttack66Full(t *testing.T) {
  1065  	testHighTDStarvationAttack(t, eth.ETH66, FullSync)
  1066  }
  1067  func TestHighTDStarvationAttack66Fast(t *testing.T) {
  1068  	testHighTDStarvationAttack(t, eth.ETH66, FastSync)
  1069  }
  1070  func TestHighTDStarvationAttack66Light(t *testing.T) {
  1071  	testHighTDStarvationAttack(t, eth.ETH66, LightSync)
  1072  }
  1073  
  1074  func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
  1075  	t.Parallel()
  1076  
  1077  	tester := newTester()
  1078  
  1079  	chain := testChainBase.shorten(1)
  1080  	tester.newPeer("attack", protocol, chain)
  1081  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1082  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1083  	}
  1084  	tester.terminate()
  1085  }
  1086  
  1087  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1088  func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
  1089  
  1090  func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
  1091  	t.Parallel()
  1092  
  1093  	// Define the disconnection requirement for individual hash fetch errors
  1094  	tests := []struct {
  1095  		result error
  1096  		drop   bool
  1097  	}{
  1098  		{nil, false},                        // Sync succeeded, all is well
  1099  		{errBusy, false},                    // Sync is already in progress, no problem
  1100  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1101  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1102  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1103  		{errUnsyncedPeer, true},             // Peer was detected to be unsynced, drop it
  1104  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1105  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1106  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1107  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1108  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1109  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1110  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1111  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1112  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1113  	}
  1114  	// Run the tests and check disconnection status
  1115  	tester := newTester()
  1116  	defer tester.terminate()
  1117  	chain := testChainBase.shorten(1)
  1118  
  1119  	for i, tt := range tests {
  1120  		// Register a new peer and ensure its presence
  1121  		id := fmt.Sprintf("test %d", i)
  1122  		if err := tester.newPeer(id, protocol, chain); err != nil {
  1123  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1124  		}
  1125  		if _, ok := tester.peers[id]; !ok {
  1126  			t.Fatalf("test %d: registered peer not found", i)
  1127  		}
  1128  		// Simulate a synchronisation and check the required result
  1129  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1130  
  1131  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1132  		if _, ok := tester.peers[id]; !ok != tt.drop {
  1133  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1134  		}
  1135  	}
  1136  }
  1137  
  1138  // Tests that synchronisation progress (origin block number, current block number
  1139  // and highest block number) is tracked and updated correctly.
  1140  func TestSyncProgress66Full(t *testing.T)  { testSyncProgress(t, eth.ETH66, FullSync) }
  1141  func TestSyncProgress66Fast(t *testing.T)  { testSyncProgress(t, eth.ETH66, FastSync) }
  1142  func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
  1143  
  1144  func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1145  	t.Parallel()
  1146  
  1147  	tester := newTester()
  1148  	defer tester.terminate()
  1149  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1150  
  1151  	// Set a sync init hook to catch progress changes
  1152  	starting := make(chan struct{})
  1153  	progress := make(chan struct{})
  1154  
  1155  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1156  		starting <- struct{}{}
  1157  		<-progress
  1158  	}
  1159  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1160  
  1161  	// Synchronise half the blocks and check initial progress
  1162  	tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2))
  1163  	pending := new(sync.WaitGroup)
  1164  	pending.Add(1)
  1165  
  1166  	go func() {
  1167  		defer pending.Done()
  1168  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1169  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1170  		}
  1171  	}()
  1172  	<-starting
  1173  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1174  		HighestBlock: uint64(chain.len()/2 - 1),
  1175  	})
  1176  	progress <- struct{}{}
  1177  	pending.Wait()
  1178  
  1179  	// Synchronise all the blocks and check continuation progress
  1180  	tester.newPeer("peer-full", protocol, chain)
  1181  	pending.Add(1)
  1182  	go func() {
  1183  		defer pending.Done()
  1184  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1185  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1186  		}
  1187  	}()
  1188  	<-starting
  1189  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1190  		StartingBlock: uint64(chain.len()/2 - 1),
  1191  		CurrentBlock:  uint64(chain.len()/2 - 1),
  1192  		HighestBlock:  uint64(chain.len() - 1),
  1193  	})
  1194  
  1195  	// Check final progress after successful sync
  1196  	progress <- struct{}{}
  1197  	pending.Wait()
  1198  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1199  		StartingBlock: uint64(chain.len()/2 - 1),
  1200  		CurrentBlock:  uint64(chain.len() - 1),
  1201  		HighestBlock:  uint64(chain.len() - 1),
  1202  	})
  1203  }
  1204  
  1205  func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
  1206  	// Mark this method as a helper to report errors at callsite, not in here
  1207  	t.Helper()
  1208  
  1209  	p := d.Progress()
  1210  	p.KnownStates, p.PulledStates = 0, 0
  1211  	want.KnownStates, want.PulledStates = 0, 0
  1212  	if p != want {
  1213  		t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
  1214  	}
  1215  }
  1216  
  1217  // Tests that synchronisation progress (origin block number and highest block
  1218  // number) is tracked and updated correctly in case of a fork (or manual head
  1219  // revertal).
  1220  func TestForkedSyncProgress66Full(t *testing.T)  { testForkedSyncProgress(t, eth.ETH66, FullSync) }
  1221  func TestForkedSyncProgress66Fast(t *testing.T)  { testForkedSyncProgress(t, eth.ETH66, FastSync) }
  1222  func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
  1223  
  1224  func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1225  	t.Parallel()
  1226  
  1227  	tester := newTester()
  1228  	defer tester.terminate()
  1229  	chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHeaderFetch)
  1230  	chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHeaderFetch)
  1231  
  1232  	// Set a sync init hook to catch progress changes
  1233  	starting := make(chan struct{})
  1234  	progress := make(chan struct{})
  1235  
  1236  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1237  		starting <- struct{}{}
  1238  		<-progress
  1239  	}
  1240  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1241  
  1242  	// Synchronise with one of the forks and check progress
  1243  	tester.newPeer("fork A", protocol, chainA)
  1244  	pending := new(sync.WaitGroup)
  1245  	pending.Add(1)
  1246  	go func() {
  1247  		defer pending.Done()
  1248  		if err := tester.sync("fork A", nil, mode); err != nil {
  1249  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1250  		}
  1251  	}()
  1252  	<-starting
  1253  
  1254  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1255  		HighestBlock: uint64(chainA.len() - 1),
  1256  	})
  1257  	progress <- struct{}{}
  1258  	pending.Wait()
  1259  
  1260  	// Simulate a successful sync above the fork
  1261  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1262  
  1263  	// Synchronise with the second fork and check progress resets
  1264  	tester.newPeer("fork B", protocol, chainB)
  1265  	pending.Add(1)
  1266  	go func() {
  1267  		defer pending.Done()
  1268  		if err := tester.sync("fork B", nil, mode); err != nil {
  1269  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1270  		}
  1271  	}()
  1272  	<-starting
  1273  	checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
  1274  		StartingBlock: uint64(testChainBase.len()) - 1,
  1275  		CurrentBlock:  uint64(chainA.len() - 1),
  1276  		HighestBlock:  uint64(chainB.len() - 1),
  1277  	})
  1278  
  1279  	// Check final progress after successful sync
  1280  	progress <- struct{}{}
  1281  	pending.Wait()
  1282  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1283  		StartingBlock: uint64(testChainBase.len()) - 1,
  1284  		CurrentBlock:  uint64(chainB.len() - 1),
  1285  		HighestBlock:  uint64(chainB.len() - 1),
  1286  	})
  1287  }
  1288  
  1289  // Tests that if synchronisation is aborted due to some failure, then the progress
  1290  // origin is not updated in the next sync cycle, as it should be considered the
  1291  // continuation of the previous sync and not a new instance.
  1292  func TestFailedSyncProgress66Full(t *testing.T)  { testFailedSyncProgress(t, eth.ETH66, FullSync) }
  1293  func TestFailedSyncProgress66Fast(t *testing.T)  { testFailedSyncProgress(t, eth.ETH66, FastSync) }
  1294  func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
  1295  
  1296  func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1297  	t.Parallel()
  1298  
  1299  	tester := newTester()
  1300  	defer tester.terminate()
  1301  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1302  
  1303  	// Set a sync init hook to catch progress changes
  1304  	starting := make(chan struct{})
  1305  	progress := make(chan struct{})
  1306  
  1307  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1308  		starting <- struct{}{}
  1309  		<-progress
  1310  	}
  1311  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1312  
  1313  	// Attempt a full sync with a faulty peer
  1314  	brokenChain := chain.shorten(chain.len())
  1315  	missing := brokenChain.len() / 2
  1316  	delete(brokenChain.headerm, brokenChain.chain[missing])
  1317  	delete(brokenChain.blockm, brokenChain.chain[missing])
  1318  	delete(brokenChain.receiptm, brokenChain.chain[missing])
  1319  	tester.newPeer("faulty", protocol, brokenChain)
  1320  
  1321  	pending := new(sync.WaitGroup)
  1322  	pending.Add(1)
  1323  	go func() {
  1324  		defer pending.Done()
  1325  		if err := tester.sync("faulty", nil, mode); err == nil {
  1326  			panic("succeeded faulty synchronisation")
  1327  		}
  1328  	}()
  1329  	<-starting
  1330  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1331  		HighestBlock: uint64(brokenChain.len() - 1),
  1332  	})
  1333  	progress <- struct{}{}
  1334  	pending.Wait()
  1335  	afterFailedSync := tester.downloader.Progress()
  1336  
  1337  	// Synchronise with a good peer and check that the progress origin remind the same
  1338  	// after a failure
  1339  	tester.newPeer("valid", protocol, chain)
  1340  	pending.Add(1)
  1341  	go func() {
  1342  		defer pending.Done()
  1343  		if err := tester.sync("valid", nil, mode); err != nil {
  1344  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1345  		}
  1346  	}()
  1347  	<-starting
  1348  	checkProgress(t, tester.downloader, "completing", afterFailedSync)
  1349  
  1350  	// Check final progress after successful sync
  1351  	progress <- struct{}{}
  1352  	pending.Wait()
  1353  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1354  		CurrentBlock: uint64(chain.len() - 1),
  1355  		HighestBlock: uint64(chain.len() - 1),
  1356  	})
  1357  }
  1358  
  1359  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1360  // the progress height is successfully reduced at the next sync invocation.
  1361  func TestFakedSyncProgress66Full(t *testing.T)  { testFakedSyncProgress(t, eth.ETH66, FullSync) }
  1362  func TestFakedSyncProgress66Fast(t *testing.T)  { testFakedSyncProgress(t, eth.ETH66, FastSync) }
  1363  func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
  1364  
  1365  func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1366  	t.Parallel()
  1367  
  1368  	tester := newTester()
  1369  	defer tester.terminate()
  1370  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1371  
  1372  	// Set a sync init hook to catch progress changes
  1373  	starting := make(chan struct{})
  1374  	progress := make(chan struct{})
  1375  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1376  		starting <- struct{}{}
  1377  		<-progress
  1378  	}
  1379  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1380  
  1381  	// Create and sync with an attacker that promises a higher chain than available.
  1382  	brokenChain := chain.shorten(chain.len())
  1383  	numMissing := 5
  1384  	for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- {
  1385  		delete(brokenChain.headerm, brokenChain.chain[i])
  1386  	}
  1387  	tester.newPeer("attack", protocol, brokenChain)
  1388  
  1389  	pending := new(sync.WaitGroup)
  1390  	pending.Add(1)
  1391  	go func() {
  1392  		defer pending.Done()
  1393  		if err := tester.sync("attack", nil, mode); err == nil {
  1394  			panic("succeeded attacker synchronisation")
  1395  		}
  1396  	}()
  1397  	<-starting
  1398  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1399  		HighestBlock: uint64(brokenChain.len() - 1),
  1400  	})
  1401  	progress <- struct{}{}
  1402  	pending.Wait()
  1403  	afterFailedSync := tester.downloader.Progress()
  1404  
  1405  	// Synchronise with a good peer and check that the progress height has been reduced to
  1406  	// the true value.
  1407  	validChain := chain.shorten(chain.len() - numMissing)
  1408  	tester.newPeer("valid", protocol, validChain)
  1409  	pending.Add(1)
  1410  
  1411  	go func() {
  1412  		defer pending.Done()
  1413  		if err := tester.sync("valid", nil, mode); err != nil {
  1414  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1415  		}
  1416  	}()
  1417  	<-starting
  1418  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1419  		CurrentBlock: afterFailedSync.CurrentBlock,
  1420  		HighestBlock: uint64(validChain.len() - 1),
  1421  	})
  1422  
  1423  	// Check final progress after successful sync.
  1424  	progress <- struct{}{}
  1425  	pending.Wait()
  1426  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1427  		CurrentBlock: uint64(validChain.len() - 1),
  1428  		HighestBlock: uint64(validChain.len() - 1),
  1429  	})
  1430  }
  1431  
  1432  // This test reproduces an issue where unexpected deliveries would
  1433  // block indefinitely if they arrived at the right time.
  1434  func TestDeliverHeadersHang66Full(t *testing.T)  { testDeliverHeadersHang(t, eth.ETH66, FullSync) }
  1435  func TestDeliverHeadersHang66Fast(t *testing.T)  { testDeliverHeadersHang(t, eth.ETH66, FastSync) }
  1436  func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, LightSync) }
  1437  
  1438  func testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) {
  1439  	t.Parallel()
  1440  
  1441  	master := newTester()
  1442  	defer master.terminate()
  1443  	chain := testChainBase.shorten(15)
  1444  
  1445  	for i := 0; i < 200; i++ {
  1446  		tester := newTester()
  1447  		tester.peerDb = master.peerDb
  1448  		tester.newPeer("peer", protocol, chain)
  1449  
  1450  		// Whenever the downloader requests headers, flood it with
  1451  		// a lot of unrequested header deliveries.
  1452  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1453  			peer:   tester.downloader.peers.peers["peer"].peer,
  1454  			tester: tester,
  1455  		}
  1456  		if err := tester.sync("peer", nil, mode); err != nil {
  1457  			t.Errorf("test %d: sync failed: %v", i, err)
  1458  		}
  1459  		tester.terminate()
  1460  	}
  1461  }
  1462  
  1463  type floodingTestPeer struct {
  1464  	peer   Peer
  1465  	tester *downloadTester
  1466  }
  1467  
  1468  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1469  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1470  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1471  }
  1472  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1473  	return ftp.peer.RequestBodies(hashes)
  1474  }
  1475  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1476  	return ftp.peer.RequestReceipts(hashes)
  1477  }
  1478  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1479  	return ftp.peer.RequestNodeData(hashes)
  1480  }
  1481  
  1482  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1483  	deliveriesDone := make(chan struct{}, 500)
  1484  	for i := 0; i < cap(deliveriesDone)-1; i++ {
  1485  		peer := fmt.Sprintf("fake-peer%d", i)
  1486  		go func() {
  1487  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1488  			deliveriesDone <- struct{}{}
  1489  		}()
  1490  	}
  1491  
  1492  	// None of the extra deliveries should block.
  1493  	timeout := time.After(60 * time.Second)
  1494  	launched := false
  1495  	for i := 0; i < cap(deliveriesDone); i++ {
  1496  		select {
  1497  		case <-deliveriesDone:
  1498  			if !launched {
  1499  				// Start delivering the requested headers
  1500  				// after one of the flooding responses has arrived.
  1501  				go func() {
  1502  					ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1503  					deliveriesDone <- struct{}{}
  1504  				}()
  1505  				launched = true
  1506  			}
  1507  		case <-timeout:
  1508  			panic("blocked")
  1509  		}
  1510  	}
  1511  	return nil
  1512  }
  1513  
  1514  func TestRemoteHeaderRequestSpan(t *testing.T) {
  1515  	testCases := []struct {
  1516  		remoteHeight uint64
  1517  		localHeight  uint64
  1518  		expected     []int
  1519  	}{
  1520  		// Remote is way higher. We should ask for the remote head and go backwards
  1521  		{1500, 1000,
  1522  			[]int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
  1523  		},
  1524  		{15000, 13006,
  1525  			[]int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
  1526  		},
  1527  		// Remote is pretty close to us. We don't have to fetch as many
  1528  		{1200, 1150,
  1529  			[]int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
  1530  		},
  1531  		// Remote is equal to us (so on a fork with higher td)
  1532  		// We should get the closest couple of ancestors
  1533  		{1500, 1500,
  1534  			[]int{1497, 1499},
  1535  		},
  1536  		// We're higher than the remote! Odd
  1537  		{1000, 1500,
  1538  			[]int{997, 999},
  1539  		},
  1540  		// Check some weird edgecases that it behaves somewhat rationally
  1541  		{0, 1500,
  1542  			[]int{0, 2},
  1543  		},
  1544  		{6000000, 0,
  1545  			[]int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
  1546  		},
  1547  		{0, 0,
  1548  			[]int{0, 2},
  1549  		},
  1550  	}
  1551  	reqs := func(from, count, span int) []int {
  1552  		var r []int
  1553  		num := from
  1554  		for len(r) < count {
  1555  			r = append(r, num)
  1556  			num += span + 1
  1557  		}
  1558  		return r
  1559  	}
  1560  	for i, tt := range testCases {
  1561  		from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
  1562  		data := reqs(int(from), count, span)
  1563  
  1564  		if max != uint64(data[len(data)-1]) {
  1565  			t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
  1566  		}
  1567  		failed := false
  1568  		if len(data) != len(tt.expected) {
  1569  			failed = true
  1570  			t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
  1571  		} else {
  1572  			for j, n := range data {
  1573  				if n != tt.expected[j] {
  1574  					failed = true
  1575  					break
  1576  				}
  1577  			}
  1578  		}
  1579  		if failed {
  1580  			res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
  1581  			exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
  1582  			t.Logf("got: %v\n", res)
  1583  			t.Logf("exp: %v\n", exp)
  1584  			t.Errorf("test %d: wrong values", i)
  1585  		}
  1586  	}
  1587  }
  1588  
  1589  // Tests that peers below a pre-configured checkpoint block are prevented from
  1590  // being fast-synced from, avoiding potential cheap eclipse attacks.
  1591  func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) }
  1592  func TestCheckpointEnforcement66Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FastSync) }
  1593  func TestCheckpointEnforcement66Light(t *testing.T) {
  1594  	testCheckpointEnforcement(t, eth.ETH66, LightSync)
  1595  }
  1596  
  1597  func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) {
  1598  	t.Parallel()
  1599  
  1600  	// Create a new tester with a particular hard coded checkpoint block
  1601  	tester := newTester()
  1602  	defer tester.terminate()
  1603  
  1604  	tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
  1605  	chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)
  1606  
  1607  	// Attempt to sync with the peer and validate the result
  1608  	tester.newPeer("peer", protocol, chain)
  1609  
  1610  	var expect error
  1611  	if mode == FastSync || mode == LightSync {
  1612  		expect = errUnsyncedPeer
  1613  	}
  1614  	if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) {
  1615  		t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
  1616  	}
  1617  	if mode == FastSync || mode == LightSync {
  1618  		assertOwnChain(t, tester, 1)
  1619  	} else {
  1620  		assertOwnChain(t, tester, chain.len())
  1621  	}
  1622  }