github.com/Blockdaemon/celo-blockchain@v0.0.0-20200129231733-e667f6b08419/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"github.com/ethereum/go-ethereum/params"
    23  	"math/big"
    24  	"strings"
    25  	"sync"
    26  	"sync/atomic"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/ethereum/go-ethereum"
    31  	"github.com/ethereum/go-ethereum/common"
    32  	"github.com/ethereum/go-ethereum/core/types"
    33  	"github.com/ethereum/go-ethereum/ethdb"
    34  	"github.com/ethereum/go-ethereum/event"
    35  	"github.com/ethereum/go-ethereum/trie"
    36  )
    37  
    38  // Reduce some of the parameters to make the tester faster.
    39  func init() {
    40  	MaxForkAncestry = uint64(10000)
    41  	blockCacheItems = 1024
    42  	fsHeaderContCheck = 500 * time.Millisecond
    43  
    44  	// configure logger by uncommenting this to enable more logging
    45  	//glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(true)))
    46  	//glogger.Verbosity(log.LvlInfo)
    47  	//log.Root().SetHandler(glogger)
    48  }
    49  
    50  // downloadTester is a test simulator for mocking out local block chain.
    51  type downloadTester struct {
    52  	downloader *Downloader
    53  
    54  	genesis *types.Block   // Genesis blocks used by the tester and peers
    55  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    56  	peerDb  ethdb.Database // Database of the peers containing all data
    57  	peers   map[string]*downloadTesterPeer
    58  
    59  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    60  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    61  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    62  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    63  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    64  
    65  	lock sync.RWMutex
    66  }
    67  
    68  func (dl *downloadTester) Config() *params.ChainConfig {
    69  	return nil
    70  }
    71  
    72  // newTester creates a new downloader test mocker.
    73  func newTester() *downloadTester {
    74  	tester := &downloadTester{
    75  		genesis:     testGenesis,
    76  		peerDb:      testDB,
    77  		peers:       make(map[string]*downloadTesterPeer),
    78  		ownHashes:   []common.Hash{testGenesis.Hash()},
    79  		ownHeaders:  map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
    80  		ownBlocks:   map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
    81  		ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
    82  		ownChainTd:  map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
    83  	}
    84  	tester.stateDb = ethdb.NewMemDatabase()
    85  	tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
    86  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
    87  	return tester
    88  }
    89  
    90  // terminate aborts any operations on the embedded downloader and releases all
    91  // held resources.
    92  func (dl *downloadTester) terminate() {
    93  	dl.downloader.Terminate()
    94  }
    95  
    96  // sync starts synchronizing with a remote peer, blocking until it completes.
    97  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
    98  	dl.lock.RLock()
    99  	hash := dl.peers[id].chain.headBlock().Hash()
   100  	// If no particular TD was requested, load from the peer's blockchain
   101  	if td == nil {
   102  		td = dl.peers[id].chain.td(hash)
   103  	}
   104  	dl.lock.RUnlock()
   105  
   106  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   107  	err := dl.downloader.synchronise(id, hash, td, mode)
   108  	select {
   109  	case <-dl.downloader.cancelCh:
   110  		// Ok, downloader fully cancelled after sync cycle
   111  	default:
   112  		// Downloader is still accepting packets, can block a peer up
   113  		panic("downloader active post sync cycle") // panic will be caught by tester
   114  	}
   115  	return err
   116  }
   117  
   118  // HasHeader checks if a header is present in the testers canonical chain.
   119  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   120  	return dl.GetHeaderByHash(hash) != nil
   121  }
   122  
   123  // HasBlock checks if a block is present in the testers canonical chain.
   124  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   125  	return dl.GetBlockByHash(hash) != nil
   126  }
   127  
   128  // HasFastBlock checks if a block is present in the testers canonical chain.
   129  func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
   130  	dl.lock.RLock()
   131  	defer dl.lock.RUnlock()
   132  
   133  	_, ok := dl.ownReceipts[hash]
   134  	return ok
   135  }
   136  
   137  // GetHeader retrieves a header from the testers canonical chain.
   138  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   139  	dl.lock.RLock()
   140  	defer dl.lock.RUnlock()
   141  
   142  	return dl.ownHeaders[hash]
   143  }
   144  
   145  // GetBlock retrieves a block from the testers canonical chain.
   146  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   147  	dl.lock.RLock()
   148  	defer dl.lock.RUnlock()
   149  
   150  	return dl.ownBlocks[hash]
   151  }
   152  
   153  // CurrentHeader retrieves the current head header from the canonical chain.
   154  func (dl *downloadTester) CurrentHeader() *types.Header {
   155  	dl.lock.RLock()
   156  	defer dl.lock.RUnlock()
   157  
   158  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   159  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   160  			return header
   161  		}
   162  	}
   163  	return dl.genesis.Header()
   164  }
   165  
   166  // CurrentBlock retrieves the current head block from the canonical chain.
   167  func (dl *downloadTester) CurrentBlock() *types.Block {
   168  	dl.lock.RLock()
   169  	defer dl.lock.RUnlock()
   170  
   171  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   172  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   173  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   174  				return block
   175  			}
   176  		}
   177  	}
   178  	return dl.genesis
   179  }
   180  
   181  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   182  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   183  	dl.lock.RLock()
   184  	defer dl.lock.RUnlock()
   185  
   186  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   187  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   188  			return block
   189  		}
   190  	}
   191  	return dl.genesis
   192  }
   193  
   194  // FastSyncCommitHead manually sets the head block to a given hash.
   195  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   196  	// For now only check that the state trie is correct
   197  	if block := dl.GetBlockByHash(hash); block != nil {
   198  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb), 0)
   199  		return err
   200  	}
   201  	return fmt.Errorf("non existent block: %x", hash[:4])
   202  }
   203  
   204  // GetTd retrieves the block's total difficulty from the canonical chain.
   205  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   206  	dl.lock.RLock()
   207  	defer dl.lock.RUnlock()
   208  
   209  	return dl.ownChainTd[hash]
   210  }
   211  
   212  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   213  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int, contiguousHeaders bool) (i int, err error) {
   214  	dl.lock.Lock()
   215  	defer dl.lock.Unlock()
   216  
   217  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   218  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   219  		return 0, errors.New("unknown parent")
   220  	}
   221  	for i := 1; i < len(headers); i++ {
   222  		if headers[i].ParentHash != headers[i-1].Hash() {
   223  			return i, errors.New("unknown parent")
   224  		}
   225  	}
   226  	// Do a full insert if pre-checks passed
   227  	for i, header := range headers {
   228  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   229  			continue
   230  		}
   231  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   232  			return i, errors.New("unknown parent")
   233  		}
   234  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   235  		dl.ownHeaders[header.Hash()] = header
   236  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   237  	}
   238  	return len(headers), nil
   239  }
   240  
   241  // InsertChain injects a new batch of blocks into the simulated chain.
   242  func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
   243  	dl.lock.Lock()
   244  	defer dl.lock.Unlock()
   245  
   246  	for i, block := range blocks {
   247  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   248  			return i, errors.New("unknown parent")
   249  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   250  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   251  		}
   252  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   253  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   254  			dl.ownHeaders[block.Hash()] = block.Header()
   255  		}
   256  		dl.ownBlocks[block.Hash()] = block
   257  		dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
   258  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   259  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   260  	}
   261  	return len(blocks), nil
   262  }
   263  
   264  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   265  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (i int, err error) {
   266  	dl.lock.Lock()
   267  	defer dl.lock.Unlock()
   268  
   269  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   270  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   271  			return i, errors.New("unknown owner")
   272  		}
   273  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   274  			return i, errors.New("unknown parent")
   275  		}
   276  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   277  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   278  	}
   279  	return len(blocks), nil
   280  }
   281  
   282  // Rollback removes some recently added elements from the chain.
   283  func (dl *downloadTester) Rollback(hashes []common.Hash, fullHeaderChainAvailable bool) {
   284  	dl.lock.Lock()
   285  	defer dl.lock.Unlock()
   286  
   287  	for i := len(hashes) - 1; i >= 0; i-- {
   288  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   289  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   290  		}
   291  		delete(dl.ownChainTd, hashes[i])
   292  		delete(dl.ownHeaders, hashes[i])
   293  		delete(dl.ownReceipts, hashes[i])
   294  		delete(dl.ownBlocks, hashes[i])
   295  	}
   296  }
   297  
   298  // newPeer registers a new block download source into the downloader.
   299  func (dl *downloadTester) newPeer(id string, version int, chain *testChain) error {
   300  	dl.lock.Lock()
   301  	defer dl.lock.Unlock()
   302  
   303  	peer := &downloadTesterPeer{dl: dl, id: id, chain: chain}
   304  	dl.peers[id] = peer
   305  	return dl.downloader.RegisterPeer(id, version, peer)
   306  }
   307  
   308  // dropPeer simulates a hard peer removal from the connection pool.
   309  func (dl *downloadTester) dropPeer(id string) {
   310  	dl.lock.Lock()
   311  	defer dl.lock.Unlock()
   312  
   313  	delete(dl.peers, id)
   314  	dl.downloader.UnregisterPeer(id)
   315  }
   316  
   317  type downloadTesterPeer struct {
   318  	dl            *downloadTester
   319  	id            string
   320  	lock          sync.RWMutex
   321  	chain         *testChain
   322  	missingStates map[common.Hash]bool // State entries that fast sync should not return
   323  }
   324  
   325  // Head constructs a function to retrieve a peer's current head hash
   326  // and total difficulty.
   327  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   328  	b := dlp.chain.headBlock()
   329  	return b.Hash(), dlp.chain.td(b.Hash())
   330  }
   331  
   332  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   333  // origin; associated with a particular peer in the download tester. The returned
   334  // function can be used to retrieve batches of headers from the particular peer.
   335  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   336  	if reverse {
   337  		panic("reverse header requests not supported")
   338  	}
   339  
   340  	result := dlp.chain.headersByHash(origin, amount, skip)
   341  	go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   342  	return nil
   343  }
   344  
   345  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   346  // origin; associated with a particular peer in the download tester. The returned
   347  // function can be used to retrieve batches of headers from the particular peer.
   348  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   349  	if reverse {
   350  		panic("reverse header requests not supported")
   351  	}
   352  
   353  	result := dlp.chain.headersByNumber(origin, amount, skip)
   354  	go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   355  	return nil
   356  }
   357  
   358  // RequestBodies constructs a getBlockBodies method associated with a particular
   359  // peer in the download tester. The returned function can be used to retrieve
   360  // batches of block bodies from the particularly requested peer.
   361  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   362  	txs, uncles, randomness, epochSnarkData := dlp.chain.bodies(hashes)
   363  	go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles, randomness, epochSnarkData)
   364  	return nil
   365  }
   366  
   367  // RequestReceipts constructs a getReceipts method associated with a particular
   368  // peer in the download tester. The returned function can be used to retrieve
   369  // batches of block receipts from the particularly requested peer.
   370  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   371  	receipts := dlp.chain.receipts(hashes)
   372  	go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts)
   373  	return nil
   374  }
   375  
   376  // RequestNodeData constructs a getNodeData method associated with a particular
   377  // peer in the download tester. The returned function can be used to retrieve
   378  // batches of node state data from the particularly requested peer.
   379  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   380  	dlp.dl.lock.RLock()
   381  	defer dlp.dl.lock.RUnlock()
   382  
   383  	results := make([][]byte, 0, len(hashes))
   384  	for _, hash := range hashes {
   385  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   386  			if !dlp.missingStates[hash] {
   387  				results = append(results, data)
   388  			}
   389  		}
   390  	}
   391  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   392  	return nil
   393  }
   394  
   395  // assertOwnChain checks if the local chain contains the correct number of items
   396  // of the various chain components.
   397  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   398  	// Mark this method as a helper to report errors at callsite, not in here
   399  	t.Helper()
   400  
   401  	assertOwnForkedChain(t, tester, 1, []int{length})
   402  }
   403  
   404  // assertOwnForkedChain checks if the local forked chain contains the correct
   405  // number of items of the various chain components.
   406  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   407  	// Mark this method as a helper to report errors at callsite, not in here
   408  	t.Helper()
   409  
   410  	// Initialize the counters for the first fork
   411  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]
   412  
   413  	// Update the counters for each subsequent fork
   414  	for _, length := range lengths[1:] {
   415  		headers += length - common
   416  		blocks += length - common
   417  		receipts += length - common
   418  	}
   419  	if tester.downloader.Mode == LightSync {
   420  		blocks, receipts = 1, 1
   421  	}
   422  	if hs := len(tester.ownHeaders); hs != headers {
   423  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   424  	}
   425  	if bs := len(tester.ownBlocks); bs != blocks {
   426  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   427  	}
   428  	if rs := len(tester.ownReceipts); rs != receipts {
   429  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   430  	}
   431  }
   432  
   433  // Tests that simple synchronization against a canonical chain works correctly.
   434  // In this test common ancestor lookup should be short circuited and not require
   435  // binary searching.
   436  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   437  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   438  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   439  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   440  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   441  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   442  
   443  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   444  	t.Parallel()
   445  
   446  	tester := newTester()
   447  	defer tester.terminate()
   448  
   449  	// Create a small enough block chain to download
   450  	chain := testChainBase.shorten(blockCacheItems - 15)
   451  	tester.newPeer("peer", protocol, chain)
   452  
   453  	// Synchronise with the peer and make sure all relevant data was retrieved
   454  	if err := tester.sync("peer", nil, mode); err != nil {
   455  		t.Fatalf("failed to synchronise blocks: %v", err)
   456  	}
   457  	assertOwnChain(t, tester, chain.len())
   458  }
   459  
   460  // Tests that if a large batch of blocks are being downloaded, it is throttled
   461  // until the cached blocks are retrieved.
   462  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   463  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   464  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   465  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   466  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   467  
   468  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   469  	t.Parallel()
   470  	tester := newTester()
   471  	defer tester.terminate()
   472  
   473  	// Create a long block chain to download and the tester
   474  	targetBlocks := testChainBase.len() - 1
   475  	tester.newPeer("peer", protocol, testChainBase)
   476  
   477  	// Wrap the importer to allow stepping
   478  	blocked, proceed := uint32(0), make(chan struct{})
   479  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   480  		atomic.StoreUint32(&blocked, uint32(len(results)))
   481  		<-proceed
   482  	}
   483  	// Start a synchronisation concurrently
   484  	errc := make(chan error)
   485  	go func() {
   486  		errc <- tester.sync("peer", nil, mode)
   487  	}()
   488  	// Iteratively take some blocks, always checking the retrieval count
   489  	for {
   490  		// Check the retrieval count synchronously (! reason for this ugly block)
   491  		tester.lock.RLock()
   492  		retrieved := len(tester.ownBlocks)
   493  		tester.lock.RUnlock()
   494  		if retrieved >= targetBlocks+1 {
   495  			break
   496  		}
   497  		// Wait a bit for sync to throttle itself
   498  		var cached, frozen int
   499  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   500  			time.Sleep(25 * time.Millisecond)
   501  
   502  			tester.lock.Lock()
   503  			tester.downloader.queue.lock.Lock()
   504  			cached = len(tester.downloader.queue.blockDonePool)
   505  			if mode == FastSync {
   506  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   507  					cached = receipts
   508  				}
   509  			}
   510  			frozen = int(atomic.LoadUint32(&blocked))
   511  			retrieved = len(tester.ownBlocks)
   512  			tester.downloader.queue.lock.Unlock()
   513  			tester.lock.Unlock()
   514  
   515  			if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
   516  				break
   517  			}
   518  		}
   519  		// Make sure we filled up the cache, then exhaust it
   520  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   521  
   522  		tester.lock.RLock()
   523  		retrieved = len(tester.ownBlocks)
   524  		tester.lock.RUnlock()
   525  		if cached != blockCacheItems && cached != blockCacheItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
   526  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   527  		}
   528  		// Permit the blocked blocks to import
   529  		if atomic.LoadUint32(&blocked) > 0 {
   530  			atomic.StoreUint32(&blocked, uint32(0))
   531  			proceed <- struct{}{}
   532  		}
   533  	}
   534  	// Check that we haven't pulled more blocks than available
   535  	assertOwnChain(t, tester, targetBlocks+1)
   536  	if err := <-errc; err != nil {
   537  		t.Fatalf("block synchronization failed: %v", err)
   538  	}
   539  }
   540  
   541  // Tests that simple synchronization against a forked chain works correctly. In
   542  // this test common ancestor lookup should *not* be short circuited, and a full
   543  // binary search should be executed.
   544  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   545  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   546  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   547  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   548  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   549  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   550  
   551  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   552  	t.Parallel()
   553  
   554  	tester := newTester()
   555  	defer tester.terminate()
   556  
   557  	chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
   558  	chainB := testChainForkLightB.shorten(testChainBase.len() + 80)
   559  	tester.newPeer("fork A", protocol, chainA)
   560  	tester.newPeer("fork B", protocol, chainB)
   561  
   562  	// Synchronise with the peer and make sure all blocks were retrieved
   563  	if err := tester.sync("fork A", nil, mode); err != nil {
   564  		t.Fatalf("failed to synchronise blocks: %v", err)
   565  	}
   566  	assertOwnChain(t, tester, chainA.len())
   567  
   568  	// Synchronise with the second peer and make sure that fork is pulled too
   569  	if err := tester.sync("fork B", nil, mode); err != nil {
   570  		t.Fatalf("failed to synchronise blocks: %v", err)
   571  	}
   572  	assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
   573  }
   574  
   575  // Tests that synchronising against a much shorter but much heavyer fork works
   576  // corrently and is not dropped.
   577  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   578  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   579  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   580  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   581  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   582  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   583  
   584  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   585  	t.Parallel()
   586  
   587  	tester := newTester()
   588  	defer tester.terminate()
   589  
   590  	chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
   591  	chainB := testChainForkHeavy.shorten(testChainBase.len() + 80)
   592  	tester.newPeer("light", protocol, chainA)
   593  	tester.newPeer("heavy", protocol, chainB)
   594  
   595  	// Synchronise with the peer and make sure all blocks were retrieved
   596  	if err := tester.sync("light", nil, mode); err != nil {
   597  		t.Fatalf("failed to synchronise blocks: %v", err)
   598  	}
   599  	assertOwnChain(t, tester, chainA.len())
   600  
   601  	// Synchronise with the second peer and make sure that fork is pulled too
   602  	if err := tester.sync("heavy", nil, mode); err != nil {
   603  		t.Fatalf("failed to synchronise blocks: %v", err)
   604  	}
   605  	assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
   606  }
   607  
   608  // Tests that chain forks are contained within a certain interval of the current
   609  // chain head, ensuring that malicious peers cannot waste resources by feeding
   610  // long dead chains.
   611  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   612  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   613  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   614  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   615  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   616  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   617  
   618  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   619  	t.Parallel()
   620  
   621  	tester := newTester()
   622  	defer tester.terminate()
   623  
   624  	chainA := testChainForkLightA
   625  	chainB := testChainForkLightB
   626  	tester.newPeer("original", protocol, chainA)
   627  	tester.newPeer("rewriter", protocol, chainB)
   628  
   629  	// Synchronise with the peer and make sure all blocks were retrieved
   630  	if err := tester.sync("original", nil, mode); err != nil {
   631  		t.Fatalf("failed to synchronise blocks: %v", err)
   632  	}
   633  	assertOwnChain(t, tester, chainA.len())
   634  
   635  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   636  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   637  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   638  	}
   639  }
   640  
   641  // Tests that chain forks are contained within a certain interval of the current
   642  // chain head for short but heavy forks too. These are a bit special because they
   643  // take different ancestor lookup paths.
   644  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   645  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   646  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   647  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   648  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   649  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   650  
   651  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   652  	t.Parallel()
   653  
   654  	tester := newTester()
   655  	defer tester.terminate()
   656  
   657  	// Create a long enough forked chain
   658  	chainA := testChainForkLightA
   659  	chainB := testChainForkHeavy
   660  	tester.newPeer("original", protocol, chainA)
   661  	tester.newPeer("heavy-rewriter", protocol, chainB)
   662  
   663  	// Synchronise with the peer and make sure all blocks were retrieved
   664  	if err := tester.sync("original", nil, mode); err != nil {
   665  		t.Fatalf("failed to synchronise blocks: %v", err)
   666  	}
   667  	assertOwnChain(t, tester, chainA.len())
   668  
   669  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   670  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   671  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   672  	}
   673  }
   674  
   675  // Tests that an inactive downloader will not accept incoming block headers and
   676  // bodies.
   677  func TestInactiveDownloader62(t *testing.T) {
   678  	t.Parallel()
   679  
   680  	tester := newTester()
   681  	defer tester.terminate()
   682  
   683  	// Check that neither block headers nor bodies are accepted
   684  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   685  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   686  	}
   687  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}, []*types.Randomness{}, []*types.EpochSnarkData{}); err != errNoSyncActive {
   688  		t.Errorf("error mismatch: have %v, want  %v", err, errNoSyncActive)
   689  	}
   690  }
   691  
   692  // Tests that an inactive downloader will not accept incoming block headers,
   693  // bodies and receipts.
   694  func TestInactiveDownloader63(t *testing.T) {
   695  	t.Parallel()
   696  
   697  	tester := newTester()
   698  	defer tester.terminate()
   699  
   700  	// Check that neither block headers nor bodies are accepted
   701  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   702  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   703  	}
   704  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}, []*types.Randomness{}, []*types.EpochSnarkData{}); err != errNoSyncActive {
   705  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   706  	}
   707  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   708  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   709  	}
   710  }
   711  
   712  // Tests that a canceled download wipes all previously accumulated state.
   713  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   714  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   715  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   716  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   717  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   718  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   719  
   720  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   721  	t.Parallel()
   722  
   723  	tester := newTester()
   724  	defer tester.terminate()
   725  
   726  	chain := testChainBase.shorten(MaxHeaderFetch)
   727  	tester.newPeer("peer", protocol, chain)
   728  
   729  	// Make sure canceling works with a pristine downloader
   730  	tester.downloader.Cancel()
   731  	if !tester.downloader.queue.Idle() {
   732  		t.Errorf("download queue not idle")
   733  	}
   734  	// Synchronise with the peer, but cancel afterwards
   735  	if err := tester.sync("peer", nil, mode); err != nil {
   736  		t.Fatalf("failed to synchronise blocks: %v", err)
   737  	}
   738  	tester.downloader.Cancel()
   739  	if !tester.downloader.queue.Idle() {
   740  		t.Errorf("download queue not idle")
   741  	}
   742  }
   743  
   744  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   745  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
   746  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   747  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   748  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
   749  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
   750  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
   751  
   752  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   753  	t.Parallel()
   754  
   755  	tester := newTester()
   756  	defer tester.terminate()
   757  
   758  	// Create various peers with various parts of the chain
   759  	targetPeers := 8
   760  	chain := testChainBase.shorten(targetPeers * 100)
   761  
   762  	for i := 0; i < targetPeers; i++ {
   763  		id := fmt.Sprintf("peer #%d", i)
   764  		tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1)))
   765  	}
   766  	if err := tester.sync("peer #0", nil, mode); err != nil {
   767  		t.Fatalf("failed to synchronise blocks: %v", err)
   768  	}
   769  	assertOwnChain(t, tester, chain.len())
   770  }
   771  
   772  // Tests that synchronisations behave well in multi-version protocol environments
   773  // and not wreak havoc on other nodes in the network.
   774  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
   775  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
   776  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
   777  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
   778  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
   779  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
   780  
   781  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
   782  	t.Parallel()
   783  
   784  	tester := newTester()
   785  	defer tester.terminate()
   786  
   787  	// Create a small enough block chain to download
   788  	chain := testChainBase.shorten(blockCacheItems - 15)
   789  
   790  	// Create peers of every type
   791  	tester.newPeer("peer 62", 62, chain)
   792  	tester.newPeer("peer 63", 63, chain)
   793  	tester.newPeer("peer 64", 64, chain)
   794  
   795  	// Synchronise with the requested peer and make sure all blocks were retrieved
   796  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
   797  		t.Fatalf("failed to synchronise blocks: %v", err)
   798  	}
   799  	assertOwnChain(t, tester, chain.len())
   800  
   801  	// Check that no peers have been dropped off
   802  	for _, version := range []int{62, 63, 64} {
   803  		peer := fmt.Sprintf("peer %d", version)
   804  		if _, ok := tester.peers[peer]; !ok {
   805  			t.Errorf("%s dropped", peer)
   806  		}
   807  	}
   808  }
   809  
   810  // Tests that headers are enqueued continuously, preventing malicious nodes from
   811  // stalling the downloader by feeding gapped header chains.
   812  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
   813  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
   814  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
   815  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
   816  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
   817  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
   818  
   819  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
   820  	t.Parallel()
   821  
   822  	tester := newTester()
   823  	defer tester.terminate()
   824  
   825  	chain := testChainBase.shorten(blockCacheItems - 15)
   826  	brokenChain := chain.shorten(chain.len())
   827  	delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2])
   828  	tester.newPeer("attack", protocol, brokenChain)
   829  
   830  	if err := tester.sync("attack", nil, mode); err == nil {
   831  		t.Fatalf("succeeded attacker synchronisation")
   832  	}
   833  	// Synchronise with the valid peer and make sure sync succeeds
   834  	tester.newPeer("valid", protocol, chain)
   835  	if err := tester.sync("valid", nil, mode); err != nil {
   836  		t.Fatalf("failed to synchronise blocks: %v", err)
   837  	}
   838  	assertOwnChain(t, tester, chain.len())
   839  }
   840  
   841  // Tests that if requested headers are shifted (i.e. first is missing), the queue
   842  // detects the invalid numbering.
   843  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
   844  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
   845  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
   846  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
   847  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
   848  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
   849  
   850  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
   851  	t.Parallel()
   852  
   853  	tester := newTester()
   854  	defer tester.terminate()
   855  
   856  	chain := testChainBase.shorten(blockCacheItems - 15)
   857  
   858  	// Attempt a full sync with an attacker feeding shifted headers
   859  	brokenChain := chain.shorten(chain.len())
   860  	delete(brokenChain.headerm, brokenChain.chain[1])
   861  	delete(brokenChain.blockm, brokenChain.chain[1])
   862  	delete(brokenChain.receiptm, brokenChain.chain[1])
   863  	tester.newPeer("attack", protocol, brokenChain)
   864  	if err := tester.sync("attack", nil, mode); err == nil {
   865  		t.Fatalf("succeeded attacker synchronisation")
   866  	}
   867  
   868  	// Synchronise with the valid peer and make sure sync succeeds
   869  	tester.newPeer("valid", protocol, chain)
   870  	if err := tester.sync("valid", nil, mode); err != nil {
   871  		t.Fatalf("failed to synchronise blocks: %v", err)
   872  	}
   873  	assertOwnChain(t, tester, chain.len())
   874  }
   875  
   876  // Tests that upon detecting an invalid header, the recent ones are rolled back
   877  // for various failure scenarios. Afterwards a full sync is attempted to make
   878  // sure no state was corrupted.
   879  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
   880  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
   881  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
   882  
   883  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
   884  	t.Parallel()
   885  
   886  	tester := newTester()
   887  	defer tester.terminate()
   888  
   889  	// Create a small enough block chain to download
   890  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
   891  	chain := testChainBase.shorten(targetBlocks)
   892  
   893  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
   894  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
   895  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
   896  	fastAttackChain := chain.shorten(chain.len())
   897  	delete(fastAttackChain.headerm, fastAttackChain.chain[missing])
   898  	tester.newPeer("fast-attack", protocol, fastAttackChain)
   899  
   900  	if err := tester.sync("fast-attack", nil, mode); err == nil {
   901  		t.Fatalf("succeeded fast attacker synchronisation")
   902  	}
   903  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
   904  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
   905  	}
   906  
   907  	// Attempt to sync with an attacker that feeds junk during the block import phase.
   908  	// This should result in both the last fsHeaderSafetyNet number of headers being
   909  	// rolled back, and also the pivot point being reverted to a non-block status.
   910  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
   911  	blockAttackChain := chain.shorten(chain.len())
   912  	delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in
   913  	delete(blockAttackChain.headerm, blockAttackChain.chain[missing])
   914  	tester.newPeer("block-attack", protocol, blockAttackChain)
   915  
   916  	if err := tester.sync("block-attack", nil, mode); err == nil {
   917  		t.Fatalf("succeeded block attacker synchronisation")
   918  	}
   919  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
   920  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
   921  	}
   922  	if mode == FastSync {
   923  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
   924  			t.Errorf("fast sync pivot block #%d not rolled back", head)
   925  		}
   926  	}
   927  
   928  	// Attempt to sync with an attacker that withholds promised blocks after the
   929  	// fast sync pivot point. This could be a trial to leave the node with a bad
   930  	// but already imported pivot block.
   931  	withholdAttackChain := chain.shorten(chain.len())
   932  	tester.newPeer("withhold-attack", protocol, withholdAttackChain)
   933  	tester.downloader.syncInitHook = func(uint64, uint64) {
   934  		for i := missing; i < withholdAttackChain.len(); i++ {
   935  			delete(withholdAttackChain.headerm, withholdAttackChain.chain[i])
   936  		}
   937  		tester.downloader.syncInitHook = nil
   938  	}
   939  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
   940  		t.Fatalf("succeeded withholding attacker synchronisation")
   941  	}
   942  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
   943  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
   944  	}
   945  	if mode == FastSync {
   946  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
   947  			t.Errorf("fast sync pivot block #%d not rolled back", head)
   948  		}
   949  	}
   950  
   951  	// synchronise with the valid peer and make sure sync succeeds. Since the last rollback
   952  	// should also disable fast syncing for this process, verify that we did a fresh full
   953  	// sync. Note, we can't assert anything about the receipts since we won't purge the
   954  	// database of them, hence we can't use assertOwnChain.
   955  	tester.newPeer("valid", protocol, chain)
   956  	if err := tester.sync("valid", nil, mode); err != nil {
   957  		t.Fatalf("failed to synchronise blocks: %v", err)
   958  	}
   959  	if hs := len(tester.ownHeaders); hs != chain.len() {
   960  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len())
   961  	}
   962  	if mode != LightSync {
   963  		if bs := len(tester.ownBlocks); bs != chain.len() {
   964  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len())
   965  		}
   966  	}
   967  }
   968  
   969  // Tests that a peer advertising an high TD doesn't get to stall the downloader
   970  // afterwards by not sending any useful hashes.
   971  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
   972  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
   973  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
   974  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
   975  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
   976  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
   977  
   978  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
   979  	t.Parallel()
   980  
   981  	tester := newTester()
   982  	defer tester.terminate()
   983  
   984  	chain := testChainBase.shorten(1)
   985  	tester.newPeer("attack", protocol, chain)
   986  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
   987  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
   988  	}
   989  }
   990  
   991  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
   992  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
   993  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
   994  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
   995  
   996  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
   997  	t.Parallel()
   998  
   999  	// Define the disconnection requirement for individual hash fetch errors
  1000  	tests := []struct {
  1001  		result error
  1002  		drop   bool
  1003  	}{
  1004  		{nil, false},                        // Sync succeeded, all is well
  1005  		{errBusy, false},                    // Sync is already in progress, no problem
  1006  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1007  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1008  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1009  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1010  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1011  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1012  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1013  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1014  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1015  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1016  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1017  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1018  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1019  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1020  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1021  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1022  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1023  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1024  	}
  1025  	// Run the tests and check disconnection status
  1026  	tester := newTester()
  1027  	defer tester.terminate()
  1028  	chain := testChainBase.shorten(1)
  1029  
  1030  	for i, tt := range tests {
  1031  		// Register a new peer and ensure it's presence
  1032  		id := fmt.Sprintf("test %d", i)
  1033  		if err := tester.newPeer(id, protocol, chain); err != nil {
  1034  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1035  		}
  1036  		if _, ok := tester.peers[id]; !ok {
  1037  			t.Fatalf("test %d: registered peer not found", i)
  1038  		}
  1039  		// Simulate a synchronisation and check the required result
  1040  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1041  
  1042  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1043  		if _, ok := tester.peers[id]; !ok != tt.drop {
  1044  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1045  		}
  1046  	}
  1047  }
  1048  
  1049  // Tests that synchronisation progress (origin block number, current block number
  1050  // and highest block number) is tracked and updated correctly.
  1051  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1052  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1053  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1054  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1055  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1056  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1057  
  1058  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1059  	t.Parallel()
  1060  
  1061  	tester := newTester()
  1062  	defer tester.terminate()
  1063  	chain := testChainBase.shorten(blockCacheItems - 15)
  1064  
  1065  	// Set a sync init hook to catch progress changes
  1066  	starting := make(chan struct{})
  1067  	progress := make(chan struct{})
  1068  
  1069  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1070  		starting <- struct{}{}
  1071  		<-progress
  1072  	}
  1073  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1074  
  1075  	// Synchronise half the blocks and check initial progress
  1076  	tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2))
  1077  	pending := new(sync.WaitGroup)
  1078  	pending.Add(1)
  1079  
  1080  	go func() {
  1081  		defer pending.Done()
  1082  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1083  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1084  		}
  1085  	}()
  1086  	<-starting
  1087  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1088  		HighestBlock: uint64(chain.len()/2 - 1),
  1089  	})
  1090  	progress <- struct{}{}
  1091  	pending.Wait()
  1092  
  1093  	// Synchronise all the blocks and check continuation progress
  1094  	tester.newPeer("peer-full", protocol, chain)
  1095  	pending.Add(1)
  1096  	go func() {
  1097  		defer pending.Done()
  1098  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1099  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1100  		}
  1101  	}()
  1102  	<-starting
  1103  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1104  		StartingBlock: uint64(chain.len()/2 - 1),
  1105  		CurrentBlock:  uint64(chain.len()/2 - 1),
  1106  		HighestBlock:  uint64(chain.len() - 1),
  1107  	})
  1108  
  1109  	// Check final progress after successful sync
  1110  	progress <- struct{}{}
  1111  	pending.Wait()
  1112  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1113  		StartingBlock: uint64(chain.len()/2 - 1),
  1114  		CurrentBlock:  uint64(chain.len() - 1),
  1115  		HighestBlock:  uint64(chain.len() - 1),
  1116  	})
  1117  }
  1118  
  1119  func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
  1120  	// Mark this method as a helper to report errors at callsite, not in here
  1121  	t.Helper()
  1122  
  1123  	p := d.Progress()
  1124  	p.KnownStates, p.PulledStates = 0, 0
  1125  	want.KnownStates, want.PulledStates = 0, 0
  1126  	if p != want {
  1127  		t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
  1128  	}
  1129  }
  1130  
  1131  // Tests that synchronisation progress (origin block number and highest block
  1132  // number) is tracked and updated correctly in case of a fork (or manual head
  1133  // revertal).
  1134  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1135  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1136  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1137  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1138  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1139  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1140  
  1141  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1142  	t.Parallel()
  1143  
  1144  	tester := newTester()
  1145  	defer tester.terminate()
  1146  	chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHashFetch)
  1147  	chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHashFetch)
  1148  
  1149  	// Set a sync init hook to catch progress changes
  1150  	starting := make(chan struct{})
  1151  	progress := make(chan struct{})
  1152  
  1153  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1154  		starting <- struct{}{}
  1155  		<-progress
  1156  	}
  1157  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1158  
  1159  	// Synchronise with one of the forks and check progress
  1160  	tester.newPeer("fork A", protocol, chainA)
  1161  	pending := new(sync.WaitGroup)
  1162  	pending.Add(1)
  1163  	go func() {
  1164  		defer pending.Done()
  1165  		if err := tester.sync("fork A", nil, mode); err != nil {
  1166  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1167  		}
  1168  	}()
  1169  	<-starting
  1170  
  1171  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1172  		HighestBlock: uint64(chainA.len() - 1),
  1173  	})
  1174  	progress <- struct{}{}
  1175  	pending.Wait()
  1176  
  1177  	// Simulate a successful sync above the fork
  1178  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1179  
  1180  	// Synchronise with the second fork and check progress resets
  1181  	tester.newPeer("fork B", protocol, chainB)
  1182  	pending.Add(1)
  1183  	go func() {
  1184  		defer pending.Done()
  1185  		if err := tester.sync("fork B", nil, mode); err != nil {
  1186  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1187  		}
  1188  	}()
  1189  	<-starting
  1190  	checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
  1191  		StartingBlock: uint64(testChainBase.len()) - 1,
  1192  		CurrentBlock:  uint64(chainA.len() - 1),
  1193  		HighestBlock:  uint64(chainB.len() - 1),
  1194  	})
  1195  
  1196  	// Check final progress after successful sync
  1197  	progress <- struct{}{}
  1198  	pending.Wait()
  1199  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1200  		StartingBlock: uint64(testChainBase.len()) - 1,
  1201  		CurrentBlock:  uint64(chainB.len() - 1),
  1202  		HighestBlock:  uint64(chainB.len() - 1),
  1203  	})
  1204  }
  1205  
  1206  // Tests that if synchronisation is aborted due to some failure, then the progress
  1207  // origin is not updated in the next sync cycle, as it should be considered the
  1208  // continuation of the previous sync and not a new instance.
  1209  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1210  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1211  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1212  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1213  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1214  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1215  
  1216  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1217  	t.Parallel()
  1218  
  1219  	tester := newTester()
  1220  	defer tester.terminate()
  1221  	chain := testChainBase.shorten(blockCacheItems - 15)
  1222  
  1223  	// Set a sync init hook to catch progress changes
  1224  	starting := make(chan struct{})
  1225  	progress := make(chan struct{})
  1226  
  1227  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1228  		starting <- struct{}{}
  1229  		<-progress
  1230  	}
  1231  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1232  
  1233  	// Attempt a full sync with a faulty peer
  1234  	brokenChain := chain.shorten(chain.len())
  1235  	missing := brokenChain.len() / 2
  1236  	delete(brokenChain.headerm, brokenChain.chain[missing])
  1237  	delete(brokenChain.blockm, brokenChain.chain[missing])
  1238  	delete(brokenChain.receiptm, brokenChain.chain[missing])
  1239  	tester.newPeer("faulty", protocol, brokenChain)
  1240  
  1241  	pending := new(sync.WaitGroup)
  1242  	pending.Add(1)
  1243  	go func() {
  1244  		defer pending.Done()
  1245  		if err := tester.sync("faulty", nil, mode); err == nil {
  1246  			panic("succeeded faulty synchronisation")
  1247  		}
  1248  	}()
  1249  	<-starting
  1250  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1251  		HighestBlock: uint64(brokenChain.len() - 1),
  1252  	})
  1253  	progress <- struct{}{}
  1254  	pending.Wait()
  1255  	afterFailedSync := tester.downloader.Progress()
  1256  
  1257  	// Synchronise with a good peer and check that the progress origin remind the same
  1258  	// after a failure
  1259  	tester.newPeer("valid", protocol, chain)
  1260  	pending.Add(1)
  1261  	go func() {
  1262  		defer pending.Done()
  1263  		if err := tester.sync("valid", nil, mode); err != nil {
  1264  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1265  		}
  1266  	}()
  1267  	<-starting
  1268  	checkProgress(t, tester.downloader, "completing", afterFailedSync)
  1269  
  1270  	// Check final progress after successful sync
  1271  	progress <- struct{}{}
  1272  	pending.Wait()
  1273  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1274  		CurrentBlock: uint64(chain.len() - 1),
  1275  		HighestBlock: uint64(chain.len() - 1),
  1276  	})
  1277  }
  1278  
  1279  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1280  // the progress height is successfully reduced at the next sync invocation.
  1281  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1282  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1283  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1284  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1285  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1286  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1287  
  1288  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1289  	t.Parallel()
  1290  
  1291  	tester := newTester()
  1292  	defer tester.terminate()
  1293  	chain := testChainBase.shorten(blockCacheItems - 15)
  1294  
  1295  	// Set a sync init hook to catch progress changes
  1296  	starting := make(chan struct{})
  1297  	progress := make(chan struct{})
  1298  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1299  		starting <- struct{}{}
  1300  		<-progress
  1301  	}
  1302  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1303  
  1304  	// Create and sync with an attacker that promises a higher chain than available.
  1305  	brokenChain := chain.shorten(chain.len())
  1306  	numMissing := 5
  1307  	for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- {
  1308  		delete(brokenChain.headerm, brokenChain.chain[i])
  1309  	}
  1310  	tester.newPeer("attack", protocol, brokenChain)
  1311  
  1312  	pending := new(sync.WaitGroup)
  1313  	pending.Add(1)
  1314  	go func() {
  1315  		defer pending.Done()
  1316  		if err := tester.sync("attack", nil, mode); err == nil {
  1317  			panic("succeeded attacker synchronisation")
  1318  		}
  1319  	}()
  1320  	<-starting
  1321  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1322  		HighestBlock: uint64(brokenChain.len() - 1),
  1323  	})
  1324  	progress <- struct{}{}
  1325  	pending.Wait()
  1326  	afterFailedSync := tester.downloader.Progress()
  1327  
  1328  	// Synchronise with a good peer and check that the progress height has been reduced to
  1329  	// the true value.
  1330  	validChain := chain.shorten(chain.len() - numMissing)
  1331  	tester.newPeer("valid", protocol, validChain)
  1332  	pending.Add(1)
  1333  
  1334  	go func() {
  1335  		defer pending.Done()
  1336  		if err := tester.sync("valid", nil, mode); err != nil {
  1337  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1338  		}
  1339  	}()
  1340  	<-starting
  1341  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1342  		CurrentBlock: afterFailedSync.CurrentBlock,
  1343  		HighestBlock: uint64(validChain.len() - 1),
  1344  	})
  1345  
  1346  	// Check final progress after successful sync.
  1347  	progress <- struct{}{}
  1348  	pending.Wait()
  1349  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1350  		CurrentBlock: uint64(validChain.len() - 1),
  1351  		HighestBlock: uint64(validChain.len() - 1),
  1352  	})
  1353  }
  1354  
  1355  // This test reproduces an issue where unexpected deliveries would
  1356  // block indefinitely if they arrived at the right time.
  1357  func TestDeliverHeadersHang(t *testing.T) {
  1358  	t.Parallel()
  1359  
  1360  	testCases := []struct {
  1361  		protocol int
  1362  		syncMode SyncMode
  1363  	}{
  1364  		{62, FullSync},
  1365  		{63, FullSync},
  1366  		{63, FastSync},
  1367  		{64, FullSync},
  1368  		{64, FastSync},
  1369  		{64, LightSync},
  1370  	}
  1371  	for _, tc := range testCases {
  1372  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1373  			t.Parallel()
  1374  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1375  		})
  1376  	}
  1377  }
  1378  
  1379  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1380  	master := newTester()
  1381  	defer master.terminate()
  1382  	chain := testChainBase.shorten(15)
  1383  
  1384  	for i := 0; i < 200; i++ {
  1385  		tester := newTester()
  1386  		tester.peerDb = master.peerDb
  1387  		tester.newPeer("peer", protocol, chain)
  1388  
  1389  		// Whenever the downloader requests headers, flood it with
  1390  		// a lot of unrequested header deliveries.
  1391  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1392  			peer:   tester.downloader.peers.peers["peer"].peer,
  1393  			tester: tester,
  1394  		}
  1395  		if err := tester.sync("peer", nil, mode); err != nil {
  1396  			t.Errorf("test %d: sync failed: %v", i, err)
  1397  		}
  1398  		tester.terminate()
  1399  	}
  1400  }
  1401  
  1402  type floodingTestPeer struct {
  1403  	peer   Peer
  1404  	tester *downloadTester
  1405  }
  1406  
  1407  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1408  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1409  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1410  }
  1411  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1412  	return ftp.peer.RequestBodies(hashes)
  1413  }
  1414  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1415  	return ftp.peer.RequestReceipts(hashes)
  1416  }
  1417  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1418  	return ftp.peer.RequestNodeData(hashes)
  1419  }
  1420  
  1421  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1422  	deliveriesDone := make(chan struct{}, 500)
  1423  	for i := 0; i < cap(deliveriesDone)-1; i++ {
  1424  		peer := fmt.Sprintf("fake-peer%d", i)
  1425  		go func() {
  1426  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1427  			deliveriesDone <- struct{}{}
  1428  		}()
  1429  	}
  1430  
  1431  	// None of the extra deliveries should block.
  1432  	timeout := time.After(60 * time.Second)
  1433  	launched := false
  1434  	for i := 0; i < cap(deliveriesDone); i++ {
  1435  		select {
  1436  		case <-deliveriesDone:
  1437  			if !launched {
  1438  				// Start delivering the requested headers
  1439  				// after one of the flooding responses has arrived.
  1440  				go func() {
  1441  					ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1442  					deliveriesDone <- struct{}{}
  1443  				}()
  1444  				launched = true
  1445  			}
  1446  		case <-timeout:
  1447  			panic("blocked")
  1448  		}
  1449  	}
  1450  	return nil
  1451  }
  1452  
  1453  func TestRemoteHeaderRequestSpan(t *testing.T) {
  1454  	testCases := []struct {
  1455  		remoteHeight uint64
  1456  		localHeight  uint64
  1457  		expected     []int
  1458  	}{
  1459  		// Remote is way higher. We should ask for the remote head and go backwards
  1460  		{1500, 1000,
  1461  			[]int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
  1462  		},
  1463  		{15000, 13006,
  1464  			[]int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
  1465  		},
  1466  		//Remote is pretty close to us. We don't have to fetch as many
  1467  		{1200, 1150,
  1468  			[]int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
  1469  		},
  1470  		// Remote is equal to us (so on a fork with higher td)
  1471  		// We should get the closest couple of ancestors
  1472  		{1500, 1500,
  1473  			[]int{1497, 1499},
  1474  		},
  1475  		// We're higher than the remote! Odd
  1476  		{1000, 1500,
  1477  			[]int{997, 999},
  1478  		},
  1479  		// Check some weird edgecases that it behaves somewhat rationally
  1480  		{0, 1500,
  1481  			[]int{0, 2},
  1482  		},
  1483  		{6000000, 0,
  1484  			[]int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
  1485  		},
  1486  		{0, 0,
  1487  			[]int{0, 2},
  1488  		},
  1489  	}
  1490  	reqs := func(from, count, span int) []int {
  1491  		var r []int
  1492  		num := from
  1493  		for len(r) < count {
  1494  			r = append(r, num)
  1495  			num += span + 1
  1496  		}
  1497  		return r
  1498  	}
  1499  	for i, tt := range testCases {
  1500  		from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
  1501  		data := reqs(int(from), count, span)
  1502  
  1503  		if max != uint64(data[len(data)-1]) {
  1504  			t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
  1505  		}
  1506  		failed := false
  1507  		if len(data) != len(tt.expected) {
  1508  			failed = true
  1509  			t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
  1510  		} else {
  1511  			for j, n := range data {
  1512  				if n != tt.expected[j] {
  1513  					failed = true
  1514  					break
  1515  				}
  1516  			}
  1517  		}
  1518  		if failed {
  1519  			res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
  1520  			exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
  1521  			fmt.Printf("got: %v\n", res)
  1522  			fmt.Printf("exp: %v\n", exp)
  1523  			t.Errorf("test %d: wrong values", i)
  1524  		}
  1525  	}
  1526  }