github.com/beyonderyue/gochain@v2.2.26+incompatible/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"math/big"
    24  	"sync"
    25  	"sync/atomic"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/gochain-io/gochain/common"
    30  	"github.com/gochain-io/gochain/consensus/clique"
    31  	"github.com/gochain-io/gochain/core"
    32  	"github.com/gochain-io/gochain/core/types"
    33  	"github.com/gochain-io/gochain/crypto"
    34  	"github.com/gochain-io/gochain/ethdb"
    35  	"github.com/gochain-io/gochain/event"
    36  	"github.com/gochain-io/gochain/params"
    37  	"github.com/gochain-io/gochain/trie"
    38  )
    39  
    40  var (
    41  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    42  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    43  )
    44  
    45  // Reduce some of the parameters to make the tester faster.
    46  func init() {
    47  	MaxForkAncestry = uint64(10000)
    48  	blockCacheItems = 1024
    49  	fsHeaderContCheck = 500 * time.Millisecond
    50  }
    51  
    52  // downloadTester is a test simulator for mocking out local block chain.
    53  type downloadTester struct {
    54  	downloader *Downloader
    55  
    56  	genesis *types.Block    // Genesis blocks used by the tester and peers
    57  	stateDb common.Database // Database used by the tester for syncing from peers
    58  	peerDb  common.Database // Database of the peers containing all data
    59  
    60  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    61  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    62  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    63  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    64  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    65  
    66  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    67  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    68  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    69  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    70  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    71  
    72  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    73  
    74  	lock sync.RWMutex
    75  }
    76  
    77  // newTester creates a new downloader test mocker.
    78  func newTester() *downloadTester {
    79  	testdb := ethdb.NewMemDatabase()
    80  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    81  
    82  	tester := &downloadTester{
    83  		genesis:           genesis,
    84  		peerDb:            testdb,
    85  		ownHashes:         []common.Hash{genesis.Hash()},
    86  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    87  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    88  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    89  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    90  		peerHashes:        make(map[string][]common.Hash),
    91  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    92  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    93  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    94  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    95  		peerMissingStates: make(map[string]map[common.Hash]bool),
    96  	}
    97  	tester.stateDb = ethdb.NewMemDatabase()
    98  	tester.stateDb.GlobalTable().Put(genesis.Root().Bytes(), []byte{0x00})
    99  
   100  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
   101  
   102  	return tester
   103  }
   104  
   105  // makeChain creates a chain of n blocks starting at and including parent.
   106  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   107  // contains a transaction and every 5th an uncle to allow testing correct block
   108  // reassembly.
   109  func (dl *downloadTester) makeChain(ctx context.Context, n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   110  	// Generate the block chain
   111  	blocks, receipts := core.GenerateChain(ctx, params.TestChainConfig, parent, clique.NewFaker(), dl.peerDb, n, func(ctx context.Context, i int, block *core.BlockGen) {
   112  		block.SetCoinbase(common.Address{seed})
   113  
   114  		// If a heavy chain is requested, delay blocks to raise difficulty
   115  		if heavy {
   116  			block.SetDifficulty(1)
   117  		}
   118  		// If the block number is multiple of 3, send a bonus transaction to the miner
   119  		if parent == dl.genesis && i%3 == 0 {
   120  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   121  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
   122  			if err != nil {
   123  				panic(err)
   124  			}
   125  			block.AddTx(ctx, tx)
   126  		}
   127  	})
   128  	// Convert the block-chain into a hash-chain and header/block maps
   129  	hashes := make([]common.Hash, n+1)
   130  	hashes[len(hashes)-1] = parent.Hash()
   131  
   132  	headerm := make(map[common.Hash]*types.Header, n+1)
   133  	headerm[parent.Hash()] = parent.Header()
   134  
   135  	blockm := make(map[common.Hash]*types.Block, n+1)
   136  	blockm[parent.Hash()] = parent
   137  
   138  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   139  	receiptm[parent.Hash()] = parentReceipts
   140  
   141  	for i, b := range blocks {
   142  		hashes[len(hashes)-i-2] = b.Hash()
   143  		headerm[b.Hash()] = b.Header()
   144  		blockm[b.Hash()] = b
   145  		receiptm[b.Hash()] = receipts[i]
   146  	}
   147  	return hashes, headerm, blockm, receiptm
   148  }
   149  
   150  // makeChainFork creates two chains of length n, such that h1[:f] and
   151  // h2[:f] are different but have a common suffix of length n-f.
   152  func (dl *downloadTester) makeChainFork(ctx context.Context, n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   153  	// Create the common suffix
   154  	hashes, headers, blocks, receipts := dl.makeChain(ctx, n-f, 0, parent, parentReceipts, false)
   155  
   156  	// Create the forks, making the second heavier if non balanced forks were requested
   157  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(ctx, f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   158  	hashes1 = append(hashes1, hashes[1:]...)
   159  
   160  	heavy := false
   161  	if !balanced {
   162  		heavy = true
   163  	}
   164  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(ctx, f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   165  	hashes2 = append(hashes2, hashes[1:]...)
   166  
   167  	for hash, header := range headers {
   168  		headers1[hash] = header
   169  		headers2[hash] = header
   170  	}
   171  	for hash, block := range blocks {
   172  		blocks1[hash] = block
   173  		blocks2[hash] = block
   174  	}
   175  	for hash, receipt := range receipts {
   176  		receipts1[hash] = receipt
   177  		receipts2[hash] = receipt
   178  	}
   179  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   180  }
   181  
   182  // terminate aborts any operations on the embedded downloader and releases all
   183  // held resources.
   184  func (dl *downloadTester) terminate() {
   185  	dl.downloader.Terminate()
   186  }
   187  
   188  // sync starts synchronizing with a remote peer, blocking until it completes.
   189  func (dl *downloadTester) sync(ctx context.Context, id string, td *big.Int, mode SyncMode) error {
   190  	dl.lock.RLock()
   191  	hash := dl.peerHashes[id][0]
   192  	// If no particular TD was requested, load from the peer's blockchain
   193  	if td == nil {
   194  		td = big.NewInt(1)
   195  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   196  			td = diff
   197  		}
   198  	}
   199  	dl.lock.RUnlock()
   200  
   201  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   202  	err := dl.downloader.synchronise(ctx, id, hash, td, mode)
   203  	select {
   204  	case <-dl.downloader.cancelCh:
   205  		// Ok, downloader fully cancelled after sync cycle
   206  	default:
   207  		// Downloader is still accepting packets, can block a peer up
   208  		panic("downloader active post sync cycle") // panic will be caught by tester
   209  	}
   210  	return err
   211  }
   212  
   213  // HasHeader checks if a header is present in the testers canonical chain.
   214  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   215  	return dl.GetHeaderByHash(hash) != nil
   216  }
   217  
   218  // HasBlock checks if a block is present in the testers canonical chain.
   219  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   220  	return dl.GetBlockByHash(hash) != nil
   221  }
   222  
   223  // GetHeader retrieves a header from the testers canonical chain.
   224  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   225  	dl.lock.RLock()
   226  	defer dl.lock.RUnlock()
   227  
   228  	return dl.ownHeaders[hash]
   229  }
   230  
   231  // GetBlock retrieves a block from the testers canonical chain.
   232  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   233  	dl.lock.RLock()
   234  	defer dl.lock.RUnlock()
   235  
   236  	return dl.ownBlocks[hash]
   237  }
   238  
   239  // CurrentHeader retrieves the current head header from the canonical chain.
   240  func (dl *downloadTester) CurrentHeader() *types.Header {
   241  	dl.lock.RLock()
   242  	defer dl.lock.RUnlock()
   243  
   244  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   245  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   246  			return header
   247  		}
   248  	}
   249  	return dl.genesis.Header()
   250  }
   251  
   252  // CurrentBlock retrieves the current head block from the canonical chain.
   253  func (dl *downloadTester) CurrentBlock() *types.Block {
   254  	dl.lock.RLock()
   255  	defer dl.lock.RUnlock()
   256  
   257  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   258  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   259  			if _, err := dl.stateDb.GlobalTable().Get(block.Root().Bytes()); err == nil {
   260  				return block
   261  			}
   262  		}
   263  	}
   264  	return dl.genesis
   265  }
   266  
   267  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   268  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   269  	dl.lock.RLock()
   270  	defer dl.lock.RUnlock()
   271  
   272  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   273  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   274  			return block
   275  		}
   276  	}
   277  	return dl.genesis
   278  }
   279  
   280  // FastSyncCommitHead manually sets the head block to a given hash.
   281  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   282  	// For now only check that the state trie is correct
   283  	if block := dl.GetBlockByHash(hash); block != nil {
   284  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb.GlobalTable()), 0)
   285  		return err
   286  	}
   287  	return fmt.Errorf("non existent block: %x", hash[:4])
   288  }
   289  
   290  // GetTd retrieves the block's total difficulty from the canonical chain.
   291  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   292  	dl.lock.RLock()
   293  	defer dl.lock.RUnlock()
   294  
   295  	return dl.ownChainTd[hash]
   296  }
   297  
   298  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   299  func (dl *downloadTester) InsertHeaderChain(ctx context.Context, headers []*types.Header, checkFreq int) (int, error) {
   300  	dl.lock.Lock()
   301  	defer dl.lock.Unlock()
   302  
   303  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   304  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   305  		return 0, errors.New("unknown parent")
   306  	}
   307  	for i := 1; i < len(headers); i++ {
   308  		if headers[i].ParentHash != headers[i-1].Hash() {
   309  			return i, errors.New("unknown parent")
   310  		}
   311  	}
   312  	// Do a full insert if pre-checks passed
   313  	for i, header := range headers {
   314  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   315  			continue
   316  		}
   317  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   318  			return i, errors.New("unknown parent")
   319  		}
   320  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   321  		dl.ownHeaders[header.Hash()] = header
   322  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   323  	}
   324  	return len(headers), nil
   325  }
   326  
   327  // InsertChain injects a new batch of blocks into the simulated chain.
   328  func (dl *downloadTester) InsertChain(ctx context.Context, blocks types.Blocks) (int, error) {
   329  	dl.lock.Lock()
   330  	defer dl.lock.Unlock()
   331  
   332  	for i, block := range blocks {
   333  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   334  			return i, errors.New("unknown parent")
   335  		} else if _, err := dl.stateDb.GlobalTable().Get(parent.Root().Bytes()); err != nil {
   336  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   337  		}
   338  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   339  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   340  			dl.ownHeaders[block.Hash()] = block.Header()
   341  		}
   342  		dl.ownBlocks[block.Hash()] = block
   343  		dl.stateDb.GlobalTable().Put(block.Root().Bytes(), []byte{0x00})
   344  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   345  	}
   346  	return len(blocks), nil
   347  }
   348  
   349  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   350  func (dl *downloadTester) InsertReceiptChain(ctx context.Context, blocks types.Blocks, receipts []types.Receipts) (int, error) {
   351  	dl.lock.Lock()
   352  	defer dl.lock.Unlock()
   353  
   354  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   355  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   356  			return i, errors.New("unknown owner")
   357  		}
   358  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   359  			return i, errors.New("unknown parent")
   360  		}
   361  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   362  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   363  	}
   364  	return len(blocks), nil
   365  }
   366  
   367  // Rollback removes some recently added elements from the chain.
   368  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   369  	dl.lock.Lock()
   370  	defer dl.lock.Unlock()
   371  
   372  	for i := len(hashes) - 1; i >= 0; i-- {
   373  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   374  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   375  		}
   376  		delete(dl.ownChainTd, hashes[i])
   377  		delete(dl.ownHeaders, hashes[i])
   378  		delete(dl.ownReceipts, hashes[i])
   379  		delete(dl.ownBlocks, hashes[i])
   380  	}
   381  }
   382  
   383  // newPeer registers a new block download source into the downloader.
   384  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   385  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   386  }
   387  
   388  // newSlowPeer registers a new block download source into the downloader, with a
   389  // specific delay time on processing the network packets sent to it, simulating
   390  // potentially slow network IO.
   391  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   392  	dl.lock.Lock()
   393  	defer dl.lock.Unlock()
   394  
   395  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   396  	if err == nil {
   397  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   398  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   399  		copy(dl.peerHashes[id], hashes)
   400  
   401  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   402  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   403  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   404  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   405  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   406  
   407  		genesis := hashes[len(hashes)-1]
   408  		if header := headers[genesis]; header != nil {
   409  			dl.peerHeaders[id][genesis] = header
   410  			dl.peerChainTds[id][genesis] = header.Difficulty
   411  		}
   412  		if block := blocks[genesis]; block != nil {
   413  			dl.peerBlocks[id][genesis] = block
   414  			dl.peerChainTds[id][genesis] = block.Difficulty()
   415  		}
   416  
   417  		for i := len(hashes) - 2; i >= 0; i-- {
   418  			hash := hashes[i]
   419  
   420  			if header, ok := headers[hash]; ok {
   421  				dl.peerHeaders[id][hash] = header
   422  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   423  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   424  				}
   425  			}
   426  			if block, ok := blocks[hash]; ok {
   427  				dl.peerBlocks[id][hash] = block
   428  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   429  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   430  				}
   431  			}
   432  			if receipt, ok := receipts[hash]; ok {
   433  				dl.peerReceipts[id][hash] = receipt
   434  			}
   435  		}
   436  	}
   437  	return err
   438  }
   439  
   440  // dropPeer simulates a hard peer removal from the connection pool.
   441  func (dl *downloadTester) dropPeer(id string) {
   442  	dl.lock.Lock()
   443  	defer dl.lock.Unlock()
   444  
   445  	delete(dl.peerHashes, id)
   446  	delete(dl.peerHeaders, id)
   447  	delete(dl.peerBlocks, id)
   448  	delete(dl.peerChainTds, id)
   449  
   450  	dl.downloader.UnregisterPeer(id)
   451  }
   452  
   453  type downloadTesterPeer struct {
   454  	dl    *downloadTester
   455  	id    string
   456  	delay time.Duration
   457  	lock  sync.RWMutex
   458  }
   459  
   460  // setDelay is a thread safe setter for the network delay value.
   461  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   462  	dlp.lock.Lock()
   463  	defer dlp.lock.Unlock()
   464  
   465  	dlp.delay = delay
   466  }
   467  
   468  // waitDelay is a thread safe way to sleep for the configured time.
   469  func (dlp *downloadTesterPeer) waitDelay() {
   470  	dlp.lock.RLock()
   471  	delay := dlp.delay
   472  	dlp.lock.RUnlock()
   473  
   474  	time.Sleep(delay)
   475  }
   476  
   477  // Head constructs a function to retrieve a peer's current head hash
   478  // and total difficulty.
   479  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   480  	dlp.dl.lock.RLock()
   481  	defer dlp.dl.lock.RUnlock()
   482  
   483  	return dlp.dl.peerHashes[dlp.id][0], nil
   484  }
   485  
   486  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   487  // origin; associated with a particular peer in the download tester. The returned
   488  // function can be used to retrieve batches of headers from the particular peer.
   489  func (dlp *downloadTesterPeer) RequestHeadersByHash(ctx context.Context, origin common.Hash, amount int, skip int, reverse bool) error {
   490  	// Find the canonical number of the hash
   491  	dlp.dl.lock.RLock()
   492  	number := uint64(0)
   493  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   494  		if hash == origin {
   495  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   496  			break
   497  		}
   498  	}
   499  	dlp.dl.lock.RUnlock()
   500  
   501  	// Use the absolute header fetcher to satisfy the query
   502  	return dlp.RequestHeadersByNumber(ctx, number, amount, skip, reverse)
   503  }
   504  
   505  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   506  // origin; associated with a particular peer in the download tester. The returned
   507  // function can be used to retrieve batches of headers from the particular peer.
   508  func (dlp *downloadTesterPeer) RequestHeadersByNumber(ctx context.Context, origin uint64, amount int, skip int, reverse bool) error {
   509  	dlp.waitDelay()
   510  
   511  	dlp.dl.lock.RLock()
   512  	defer dlp.dl.lock.RUnlock()
   513  
   514  	// Gather the next batch of headers
   515  	hashes := dlp.dl.peerHashes[dlp.id]
   516  	headers := dlp.dl.peerHeaders[dlp.id]
   517  	result := make([]*types.Header, 0, amount)
   518  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   519  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   520  			result = append(result, header)
   521  		}
   522  	}
   523  	// Delay delivery a bit to allow attacks to unfold
   524  	go func() {
   525  		time.Sleep(time.Millisecond)
   526  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   527  	}()
   528  	return nil
   529  }
   530  
   531  // RequestBodies constructs a getBlockBodies method associated with a particular
   532  // peer in the download tester. The returned function can be used to retrieve
   533  // batches of block bodies from the particularly requested peer.
   534  func (dlp *downloadTesterPeer) RequestBodies(ctx context.Context, hashes []common.Hash) error {
   535  	dlp.waitDelay()
   536  
   537  	dlp.dl.lock.RLock()
   538  	defer dlp.dl.lock.RUnlock()
   539  
   540  	blocks := dlp.dl.peerBlocks[dlp.id]
   541  
   542  	transactions := make([][]*types.Transaction, 0, len(hashes))
   543  
   544  	for _, hash := range hashes {
   545  		if block, ok := blocks[hash]; ok {
   546  			transactions = append(transactions, block.Transactions())
   547  		}
   548  	}
   549  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions)
   550  
   551  	return nil
   552  }
   553  
   554  // RequestReceipts constructs a getReceipts method associated with a particular
   555  // peer in the download tester. The returned function can be used to retrieve
   556  // batches of block receipts from the particularly requested peer.
   557  func (dlp *downloadTesterPeer) RequestReceipts(ctx context.Context, hashes []common.Hash) error {
   558  	dlp.waitDelay()
   559  
   560  	dlp.dl.lock.RLock()
   561  	defer dlp.dl.lock.RUnlock()
   562  
   563  	receipts := dlp.dl.peerReceipts[dlp.id]
   564  
   565  	results := make([][]*types.Receipt, 0, len(hashes))
   566  	for _, hash := range hashes {
   567  		if receipt, ok := receipts[hash]; ok {
   568  			results = append(results, receipt)
   569  		}
   570  	}
   571  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   572  
   573  	return nil
   574  }
   575  
   576  // RequestNodeData constructs a getNodeData method associated with a particular
   577  // peer in the download tester. The returned function can be used to retrieve
   578  // batches of node state data from the particularly requested peer.
   579  func (dlp *downloadTesterPeer) RequestNodeData(ctx context.Context, hashes []common.Hash) error {
   580  	dlp.waitDelay()
   581  
   582  	dlp.dl.lock.RLock()
   583  	defer dlp.dl.lock.RUnlock()
   584  
   585  	results := make([][]byte, 0, len(hashes))
   586  	for _, hash := range hashes {
   587  		if data, err := dlp.dl.peerDb.GlobalTable().Get(hash.Bytes()); err == nil {
   588  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   589  				results = append(results, data)
   590  			}
   591  		}
   592  	}
   593  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   594  
   595  	return nil
   596  }
   597  
   598  // assertOwnChain checks if the local chain contains the correct number of items
   599  // of the various chain components.
   600  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   601  	assertOwnForkedChain(t, tester, 1, []int{length})
   602  }
   603  
   604  // assertOwnForkedChain checks if the local forked chain contains the correct
   605  // number of items of the various chain components.
   606  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   607  	// Initialize the counters for the first fork
   608  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks
   609  
   610  	if receipts < 0 {
   611  		receipts = 1
   612  	}
   613  	// Update the counters for each subsequent fork
   614  	for _, length := range lengths[1:] {
   615  		headers += length - common
   616  		blocks += length - common
   617  		receipts += length - common - fsMinFullBlocks
   618  	}
   619  	switch tester.downloader.mode {
   620  	case FullSync:
   621  		receipts = 1
   622  	case LightSync:
   623  		blocks, receipts = 1, 1
   624  	}
   625  	if hs := len(tester.ownHeaders); hs != headers {
   626  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   627  	}
   628  	if bs := len(tester.ownBlocks); bs != blocks {
   629  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   630  	}
   631  	if rs := len(tester.ownReceipts); rs != receipts {
   632  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   633  	}
   634  	// Verify the state trie too for fast syncs
   635  	/*if tester.downloader.mode == FastSync {
   636  		pivot := uint64(0)
   637  		var index int
   638  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   639  			index = pivot
   640  		} else {
   641  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   642  		}
   643  		if index > 0 {
   644  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil {
   645  				t.Fatalf("state reconstruction failed: %v", err)
   646  			}
   647  		}
   648  	}*/
   649  }
   650  
   651  // Tests that simple synchronization against a canonical chain works correctly.
   652  // In this test common ancestor lookup should be short circuited and not require
   653  // binary searching.
   654  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   655  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   656  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   657  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   658  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   659  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   660  
   661  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   662  	ctx := context.Background()
   663  	t.Parallel()
   664  
   665  	tester := newTester()
   666  	defer tester.terminate()
   667  
   668  	// Create a small enough block chain to download
   669  	targetBlocks := blockCacheItems - 15
   670  	hashes, headers, blocks, receipts := tester.makeChain(ctx, targetBlocks, 0, tester.genesis, nil, false)
   671  
   672  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   673  
   674  	// Synchronise with the peer and make sure all relevant data was retrieved
   675  	if err := tester.sync(ctx, "peer", nil, mode); err != nil {
   676  		t.Fatalf("failed to synchronise blocks: %v", err)
   677  	}
   678  	assertOwnChain(t, tester, targetBlocks+1)
   679  }
   680  
   681  // Tests that if a large batch of blocks are being downloaded, it is throttled
   682  // until the cached blocks are retrieved.
   683  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   684  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   685  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   686  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   687  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   688  
   689  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   690  	ctx := context.Background()
   691  	t.Parallel()
   692  	tester := newTester()
   693  	defer tester.terminate()
   694  
   695  	// Create a long block chain to download and the tester
   696  	targetBlocks := 8 * blockCacheItems
   697  	hashes, headers, blocks, receipts := tester.makeChain(ctx, targetBlocks, 0, tester.genesis, nil, false)
   698  
   699  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   700  
   701  	// Wrap the importer to allow stepping
   702  	blocked, proceed := uint32(0), make(chan struct{})
   703  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   704  		atomic.StoreUint32(&blocked, uint32(len(results)))
   705  		<-proceed
   706  	}
   707  	// Start a synchronisation concurrently
   708  	errc := make(chan error)
   709  	go func() {
   710  		errc <- tester.sync(ctx, "peer", nil, mode)
   711  	}()
   712  	// Iteratively take some blocks, always checking the retrieval count
   713  	for {
   714  		// Check the retrieval count synchronously (! reason for this ugly block)
   715  		tester.lock.RLock()
   716  		retrieved := len(tester.ownBlocks)
   717  		tester.lock.RUnlock()
   718  		if retrieved >= targetBlocks+1 {
   719  			break
   720  		}
   721  		// Wait a bit for sync to throttle itself
   722  		var cached, frozen int
   723  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   724  			time.Sleep(25 * time.Millisecond)
   725  
   726  			tester.lock.Lock()
   727  			tester.downloader.queue.lock.Lock()
   728  			cached = len(tester.downloader.queue.blockDonePool)
   729  			if mode == FastSync {
   730  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   731  					//if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   732  					cached = receipts
   733  					//}
   734  				}
   735  			}
   736  			frozen = int(atomic.LoadUint32(&blocked))
   737  			retrieved = len(tester.ownBlocks)
   738  			tester.downloader.queue.lock.Unlock()
   739  			tester.lock.Unlock()
   740  
   741  			if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
   742  				break
   743  			}
   744  		}
   745  		// Make sure we filled up the cache, then exhaust it
   746  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   747  
   748  		tester.lock.RLock()
   749  		retrieved = len(tester.ownBlocks)
   750  		tester.lock.RUnlock()
   751  		if cached != blockCacheItems && cached != blockCacheItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
   752  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   753  		}
   754  		// Permit the blocked blocks to import
   755  		if atomic.LoadUint32(&blocked) > 0 {
   756  			atomic.StoreUint32(&blocked, uint32(0))
   757  			proceed <- struct{}{}
   758  		}
   759  	}
   760  	// Check that we haven't pulled more blocks than available
   761  	assertOwnChain(t, tester, targetBlocks+1)
   762  	if err := <-errc; err != nil {
   763  		t.Fatalf("block synchronization failed: %v", err)
   764  	}
   765  }
   766  
   767  // Tests that simple synchronization against a forked chain works correctly. In
   768  // this test common ancestor lookup should *not* be short circuited, and a full
   769  // binary search should be executed.
   770  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   771  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   772  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   773  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   774  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   775  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   776  
   777  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   778  	ctx := context.Background()
   779  	t.Parallel()
   780  
   781  	tester := newTester()
   782  	defer tester.terminate()
   783  
   784  	// Create a long enough forked chain
   785  	common, fork := MaxHashFetch, 2*MaxHashFetch
   786  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(ctx, common+fork, fork, tester.genesis, nil, true)
   787  
   788  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   789  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   790  
   791  	// Synchronise with the peer and make sure all blocks were retrieved
   792  	if err := tester.sync(ctx, "fork A", nil, mode); err != nil {
   793  		t.Fatalf("failed to synchronise blocks: %v", err)
   794  	}
   795  	assertOwnChain(t, tester, common+fork+1)
   796  
   797  	// Synchronise with the second peer and make sure that fork is pulled too
   798  	if err := tester.sync(ctx, "fork B", nil, mode); err != nil {
   799  		t.Fatalf("failed to synchronise blocks: %v", err)
   800  	}
   801  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   802  }
   803  
   804  // Tests that synchronising against a much shorter but much heavyer fork works
   805  // corrently and is not dropped.
   806  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   807  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   808  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   809  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   810  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   811  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   812  
   813  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   814  	ctx := context.Background()
   815  	t.Parallel()
   816  
   817  	tester := newTester()
   818  	defer tester.terminate()
   819  
   820  	// Create a long enough forked chain
   821  	common, fork := MaxHashFetch, 4*MaxHashFetch
   822  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(ctx, common+fork, fork, tester.genesis, nil, false)
   823  
   824  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   825  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   826  
   827  	// Synchronise with the peer and make sure all blocks were retrieved
   828  	if err := tester.sync(ctx, "light", nil, mode); err != nil {
   829  		t.Fatalf("failed to synchronise blocks: %v", err)
   830  	}
   831  	assertOwnChain(t, tester, common+fork+1)
   832  
   833  	// Synchronise with the second peer and make sure that fork is pulled too
   834  	if err := tester.sync(ctx, "heavy", nil, mode); err != nil {
   835  		t.Fatalf("failed to synchronise blocks: %v", err)
   836  	}
   837  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   838  }
   839  
   840  // Tests that chain forks are contained within a certain interval of the current
   841  // chain head, ensuring that malicious peers cannot waste resources by feeding
   842  // long dead chains.
   843  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   844  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   845  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   846  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   847  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   848  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   849  
   850  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   851  	ctx := context.Background()
   852  	t.Parallel()
   853  
   854  	tester := newTester()
   855  	defer tester.terminate()
   856  
   857  	// Create a long enough forked chain
   858  	common, fork := 13, int(MaxForkAncestry+17)
   859  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(ctx, common+fork, fork, tester.genesis, nil, true)
   860  
   861  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   862  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   863  
   864  	// Synchronise with the peer and make sure all blocks were retrieved
   865  	if err := tester.sync(ctx, "original", nil, mode); err != nil {
   866  		t.Fatalf("failed to synchronise blocks: %v", err)
   867  	}
   868  	assertOwnChain(t, tester, common+fork+1)
   869  
   870  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   871  	if err := tester.sync(ctx, "rewriter", nil, mode); err != errInvalidAncestor {
   872  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   873  	}
   874  }
   875  
   876  // Tests that chain forks are contained within a certain interval of the current
   877  // chain head for short but heavy forks too. These are a bit special because they
   878  // take different ancestor lookup paths.
   879  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   880  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   881  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   882  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   883  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   884  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   885  
   886  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   887  	ctx := context.Background()
   888  	t.Parallel()
   889  
   890  	tester := newTester()
   891  	defer tester.terminate()
   892  
   893  	// Create a long enough forked chain
   894  	common, fork := 13, int(MaxForkAncestry+17)
   895  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(ctx, common+fork, fork, tester.genesis, nil, false)
   896  
   897  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   898  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   899  
   900  	// Synchronise with the peer and make sure all blocks were retrieved
   901  	if err := tester.sync(ctx, "original", nil, mode); err != nil {
   902  		t.Fatalf("failed to synchronise blocks: %v", err)
   903  	}
   904  	assertOwnChain(t, tester, common+fork+1)
   905  
   906  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   907  	if err := tester.sync(ctx, "heavy-rewriter", nil, mode); err != errInvalidAncestor {
   908  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   909  	}
   910  }
   911  
   912  // Tests that an inactive downloader will not accept incoming block headers and
   913  // bodies.
   914  func TestInactiveDownloader62(t *testing.T) {
   915  	t.Parallel()
   916  
   917  	tester := newTester()
   918  	defer tester.terminate()
   919  
   920  	// Check that neither block headers nor bodies are accepted
   921  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   922  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   923  	}
   924  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}); err != errNoSyncActive {
   925  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   926  	}
   927  }
   928  
   929  // Tests that an inactive downloader will not accept incoming block headers,
   930  // bodies and receipts.
   931  func TestInactiveDownloader63(t *testing.T) {
   932  	t.Parallel()
   933  
   934  	tester := newTester()
   935  	defer tester.terminate()
   936  
   937  	// Check that neither block headers nor bodies are accepted
   938  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   939  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   940  	}
   941  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}); err != errNoSyncActive {
   942  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   943  	}
   944  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   945  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   946  	}
   947  }
   948  
   949  // Tests that a canceled download wipes all previously accumulated state.
   950  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   951  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   952  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   953  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   954  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   955  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   956  
   957  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   958  	ctx := context.Background()
   959  	t.Parallel()
   960  
   961  	tester := newTester()
   962  	defer tester.terminate()
   963  
   964  	// Create a small enough block chain to download and the tester
   965  	targetBlocks := blockCacheItems - 15
   966  	if targetBlocks >= MaxHashFetch {
   967  		targetBlocks = MaxHashFetch - 15
   968  	}
   969  	if targetBlocks >= MaxHeaderFetch {
   970  		targetBlocks = MaxHeaderFetch - 15
   971  	}
   972  	hashes, headers, blocks, receipts := tester.makeChain(ctx, targetBlocks, 0, tester.genesis, nil, false)
   973  
   974  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   975  
   976  	// Make sure canceling works with a pristine downloader
   977  	tester.downloader.Cancel()
   978  	if !tester.downloader.queue.Idle() {
   979  		t.Errorf("download queue not idle")
   980  	}
   981  	// Synchronise with the peer, but cancel afterwards
   982  	if err := tester.sync(ctx, "peer", nil, mode); err != nil {
   983  		t.Fatalf("failed to synchronise blocks: %v", err)
   984  	}
   985  	tester.downloader.Cancel()
   986  	if !tester.downloader.queue.Idle() {
   987  		t.Errorf("download queue not idle")
   988  	}
   989  }
   990  
   991  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   992  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
   993  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   994  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   995  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
   996  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
   997  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
   998  
   999  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
  1000  	ctx := context.Background()
  1001  	t.Parallel()
  1002  
  1003  	tester := newTester()
  1004  	defer tester.terminate()
  1005  
  1006  	// Create various peers with various parts of the chain
  1007  	targetPeers := 8
  1008  	targetBlocks := targetPeers*blockCacheItems - 15
  1009  	hashes, headers, blocks, receipts := tester.makeChain(ctx, targetBlocks, 0, tester.genesis, nil, false)
  1010  
  1011  	for i := 0; i < targetPeers; i++ {
  1012  		id := fmt.Sprintf("peer #%d", i)
  1013  		tester.newPeer(id, protocol, hashes[i*blockCacheItems:], headers, blocks, receipts)
  1014  	}
  1015  	if err := tester.sync(ctx, "peer #0", nil, mode); err != nil {
  1016  		t.Fatalf("failed to synchronise blocks: %v", err)
  1017  	}
  1018  	assertOwnChain(t, tester, targetBlocks+1)
  1019  }
  1020  
  1021  // Tests that synchronisations behave well in multi-version protocol environments
  1022  // and not wreak havoc on other nodes in the network.
  1023  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1024  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1025  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1026  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1027  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1028  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1029  
  1030  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1031  	ctx := context.Background()
  1032  	t.Parallel()
  1033  
  1034  	tester := newTester()
  1035  	defer tester.terminate()
  1036  
  1037  	// Create a small enough block chain to download
  1038  	targetBlocks := blockCacheItems - 15
  1039  	hashes, headers, blocks, receipts := tester.makeChain(ctx, targetBlocks, 0, tester.genesis, nil, false)
  1040  
  1041  	// Create peers of every type
  1042  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1043  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1044  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1045  
  1046  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1047  	if err := tester.sync(ctx, fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1048  		t.Fatalf("failed to synchronise blocks: %v", err)
  1049  	}
  1050  	assertOwnChain(t, tester, targetBlocks+1)
  1051  
  1052  	// Check that no peers have been dropped off
  1053  	for _, version := range []int{62, 63, 64} {
  1054  		peer := fmt.Sprintf("peer %d", version)
  1055  		if _, ok := tester.peerHashes[peer]; !ok {
  1056  			t.Errorf("%s dropped", peer)
  1057  		}
  1058  	}
  1059  }
  1060  
  1061  // Tests that if a block is empty (e.g. header only), no body request should be
  1062  // made, and instead the header should be assembled into a whole block in itself.
  1063  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1064  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1065  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1066  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1067  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1068  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1069  
  1070  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1071  	ctx := context.Background()
  1072  	t.Parallel()
  1073  
  1074  	tester := newTester()
  1075  	defer tester.terminate()
  1076  
  1077  	// Create a block chain to download
  1078  	targetBlocks := 2*blockCacheItems - 15
  1079  	hashes, headers, blocks, receipts := tester.makeChain(ctx, targetBlocks, 0, tester.genesis, nil, false)
  1080  
  1081  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1082  
  1083  	// Instrument the downloader to signal body requests
  1084  	bodiesHave, receiptsHave := int32(0), int32(0)
  1085  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1086  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1087  	}
  1088  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1089  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1090  	}
  1091  	// Synchronise with the peer and make sure all blocks were retrieved
  1092  	if err := tester.sync(ctx, "peer", nil, mode); err != nil {
  1093  		t.Fatalf("failed to synchronise blocks: %v", err)
  1094  	}
  1095  	assertOwnChain(t, tester, targetBlocks+1)
  1096  
  1097  	// Validate the number of block bodies that should have been requested
  1098  	bodiesNeeded, receiptsNeeded := 0, 0
  1099  	for _, block := range blocks {
  1100  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1101  			bodiesNeeded++
  1102  		}
  1103  	}
  1104  	for _, receipt := range receipts {
  1105  		if mode == FastSync && len(receipt) > 0 {
  1106  			receiptsNeeded++
  1107  		}
  1108  	}
  1109  	if int(bodiesHave) != bodiesNeeded {
  1110  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1111  	}
  1112  	if int(receiptsHave) != receiptsNeeded {
  1113  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1114  	}
  1115  }
  1116  
  1117  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1118  // stalling the downloader by feeding gapped header chains.
  1119  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1120  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1121  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1122  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1123  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1124  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1125  
  1126  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1127  	ctx := context.Background()
  1128  	t.Parallel()
  1129  
  1130  	tester := newTester()
  1131  	defer tester.terminate()
  1132  
  1133  	// Create a small enough block chain to download
  1134  	targetBlocks := blockCacheItems - 15
  1135  	hashes, headers, blocks, receipts := tester.makeChain(ctx, targetBlocks, 0, tester.genesis, nil, false)
  1136  
  1137  	// Attempt a full sync with an attacker feeding gapped headers
  1138  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1139  	missing := targetBlocks / 2
  1140  	delete(tester.peerHeaders["attack"], hashes[missing])
  1141  
  1142  	if err := tester.sync(ctx, "attack", nil, mode); err == nil {
  1143  		t.Fatalf("succeeded attacker synchronisation")
  1144  	}
  1145  	// Synchronise with the valid peer and make sure sync succeeds
  1146  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1147  	if err := tester.sync(ctx, "valid", nil, mode); err != nil {
  1148  		t.Fatalf("failed to synchronise blocks: %v", err)
  1149  	}
  1150  	assertOwnChain(t, tester, targetBlocks+1)
  1151  }
  1152  
  1153  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1154  // detects the invalid numbering.
  1155  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1156  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1157  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1158  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1159  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1160  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1161  
  1162  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1163  	ctx := context.Background()
  1164  	t.Parallel()
  1165  
  1166  	tester := newTester()
  1167  	defer tester.terminate()
  1168  
  1169  	// Create a small enough block chain to download
  1170  	targetBlocks := blockCacheItems - 15
  1171  	hashes, headers, blocks, receipts := tester.makeChain(ctx, targetBlocks, 0, tester.genesis, nil, false)
  1172  
  1173  	// Attempt a full sync with an attacker feeding shifted headers
  1174  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1175  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1176  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1177  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1178  
  1179  	if err := tester.sync(ctx, "attack", nil, mode); err == nil {
  1180  		t.Fatalf("succeeded attacker synchronisation")
  1181  	}
  1182  	// Synchronise with the valid peer and make sure sync succeeds
  1183  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1184  	if err := tester.sync(ctx, "valid", nil, mode); err != nil {
  1185  		t.Fatalf("failed to synchronise blocks: %v", err)
  1186  	}
  1187  	assertOwnChain(t, tester, targetBlocks+1)
  1188  }
  1189  
  1190  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1191  // for various failure scenarios. Afterwards a full sync is attempted to make
  1192  // sure no state was corrupted.
  1193  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1194  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1195  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1196  
  1197  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1198  	ctx := context.Background()
  1199  	t.Parallel()
  1200  
  1201  	tester := newTester()
  1202  	defer tester.terminate()
  1203  
  1204  	// Create a small enough block chain to download
  1205  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1206  	hashes, headers, blocks, receipts := tester.makeChain(ctx, targetBlocks, 0, tester.genesis, nil, false)
  1207  
  1208  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1209  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1210  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1211  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1212  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1213  
  1214  	if err := tester.sync(ctx, "fast-attack", nil, mode); err == nil {
  1215  		t.Fatalf("succeeded fast attacker synchronisation")
  1216  	}
  1217  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1218  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1219  	}
  1220  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1221  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1222  	// rolled back, and also the pivot point being reverted to a non-block status.
  1223  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1224  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1225  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1226  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1227  
  1228  	if err := tester.sync(ctx, "block-attack", nil, mode); err == nil {
  1229  		t.Fatalf("succeeded block attacker synchronisation")
  1230  	}
  1231  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1232  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1233  	}
  1234  	if mode == FastSync {
  1235  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1236  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1237  		}
  1238  	}
  1239  	// Attempt to sync with an attacker that withholds promised blocks after the
  1240  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1241  	// but already imported pivot block.
  1242  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1243  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1244  
  1245  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1246  		for i := missing; i <= len(hashes); i++ {
  1247  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1248  		}
  1249  		tester.downloader.syncInitHook = nil
  1250  	}
  1251  
  1252  	if err := tester.sync(ctx, "withhold-attack", nil, mode); err == nil {
  1253  		t.Fatalf("succeeded withholding attacker synchronisation")
  1254  	}
  1255  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1256  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1257  	}
  1258  	if mode == FastSync {
  1259  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1260  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1261  		}
  1262  	}
  1263  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1264  	// rollback should also disable fast syncing for this process, verify that we
  1265  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1266  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1267  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1268  	if err := tester.sync(ctx, "valid", nil, mode); err != nil {
  1269  		t.Fatalf("failed to synchronise blocks: %v", err)
  1270  	}
  1271  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1272  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1273  	}
  1274  	if mode != LightSync {
  1275  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1276  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1277  		}
  1278  	}
  1279  }
  1280  
  1281  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1282  // afterwards by not sending any useful hashes.
  1283  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1284  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1285  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1286  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1287  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1288  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1289  
  1290  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1291  	ctx := context.Background()
  1292  	t.Parallel()
  1293  
  1294  	tester := newTester()
  1295  	defer tester.terminate()
  1296  
  1297  	hashes, headers, blocks, receipts := tester.makeChain(ctx, 0, 0, tester.genesis, nil, false)
  1298  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1299  
  1300  	if err := tester.sync(ctx, "attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1301  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1302  	}
  1303  }
  1304  
  1305  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1306  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1307  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1308  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1309  
  1310  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1311  	ctx := context.Background()
  1312  	t.Parallel()
  1313  
  1314  	// Define the disconnection requirement for individual hash fetch errors
  1315  	tests := []struct {
  1316  		result error
  1317  		drop   bool
  1318  	}{
  1319  		{nil, false},                        // Sync succeeded, all is well
  1320  		{errBusy, false},                    // Sync is already in progress, no problem
  1321  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1322  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1323  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1324  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1325  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1326  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1327  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1328  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1329  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1330  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1331  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1332  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1333  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1334  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1335  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1336  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1337  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1338  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1339  	}
  1340  	// Run the tests and check disconnection status
  1341  	tester := newTester()
  1342  	defer tester.terminate()
  1343  
  1344  	for i, tt := range tests {
  1345  		// Register a new peer and ensure it's presence
  1346  		id := fmt.Sprintf("test %d", i)
  1347  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1348  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1349  		}
  1350  		if _, ok := tester.peerHashes[id]; !ok {
  1351  			t.Fatalf("test %d: registered peer not found", i)
  1352  		}
  1353  		// Simulate a synchronisation and check the required result
  1354  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1355  
  1356  		tester.downloader.Synchronise(ctx, id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1357  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1358  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1359  		}
  1360  	}
  1361  }
  1362  
  1363  // Tests that synchronisation progress (origin block number, current block number
  1364  // and highest block number) is tracked and updated correctly.
  1365  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1366  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1367  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1368  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1369  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1370  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1371  
  1372  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1373  	ctx := context.Background()
  1374  	t.Parallel()
  1375  
  1376  	tester := newTester()
  1377  	defer tester.terminate()
  1378  
  1379  	// Create a small enough block chain to download
  1380  	targetBlocks := blockCacheItems - 15
  1381  	hashes, headers, blocks, receipts := tester.makeChain(ctx, targetBlocks, 0, tester.genesis, nil, false)
  1382  
  1383  	// Set a sync init hook to catch progress changes
  1384  	starting := make(chan struct{})
  1385  	progress := make(chan struct{})
  1386  
  1387  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1388  		starting <- struct{}{}
  1389  		<-progress
  1390  	}
  1391  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1392  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1393  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1394  	}
  1395  	// Synchronise half the blocks and check initial progress
  1396  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1397  	pending := new(sync.WaitGroup)
  1398  	pending.Add(1)
  1399  
  1400  	go func() {
  1401  		defer pending.Done()
  1402  		if err := tester.sync(ctx, "peer-half", nil, mode); err != nil {
  1403  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1404  		}
  1405  	}()
  1406  	<-starting
  1407  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1408  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1409  	}
  1410  	progress <- struct{}{}
  1411  	pending.Wait()
  1412  
  1413  	// Synchronise all the blocks and check continuation progress
  1414  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1415  	pending.Add(1)
  1416  
  1417  	go func() {
  1418  		defer pending.Done()
  1419  		if err := tester.sync(ctx, "peer-full", nil, mode); err != nil {
  1420  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1421  		}
  1422  	}()
  1423  	<-starting
  1424  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1425  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1426  	}
  1427  	progress <- struct{}{}
  1428  	pending.Wait()
  1429  
  1430  	// Check final progress after successful sync
  1431  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1432  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1433  	}
  1434  }
  1435  
  1436  // Tests that synchronisation progress (origin block number and highest block
  1437  // number) is tracked and updated correctly in case of a fork (or manual head
  1438  // revertal).
  1439  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1440  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1441  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1442  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1443  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1444  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1445  
  1446  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1447  	ctx := context.Background()
  1448  	t.Parallel()
  1449  
  1450  	tester := newTester()
  1451  	defer tester.terminate()
  1452  
  1453  	// Create a forked chain to simulate origin revertal
  1454  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1455  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(ctx, common+fork, fork, tester.genesis, nil, true)
  1456  
  1457  	// Set a sync init hook to catch progress changes
  1458  	starting := make(chan struct{})
  1459  	progress := make(chan struct{})
  1460  
  1461  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1462  		starting <- struct{}{}
  1463  		<-progress
  1464  	}
  1465  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1466  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1467  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1468  	}
  1469  	// Synchronise with one of the forks and check progress
  1470  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1471  	pending := new(sync.WaitGroup)
  1472  	pending.Add(1)
  1473  
  1474  	go func() {
  1475  		defer pending.Done()
  1476  		if err := tester.sync(ctx, "fork A", nil, mode); err != nil {
  1477  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1478  		}
  1479  	}()
  1480  	<-starting
  1481  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1482  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1483  	}
  1484  	progress <- struct{}{}
  1485  	pending.Wait()
  1486  
  1487  	// Simulate a successful sync above the fork
  1488  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1489  
  1490  	// Synchronise with the second fork and check progress resets
  1491  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1492  	pending.Add(1)
  1493  
  1494  	go func() {
  1495  		defer pending.Done()
  1496  		if err := tester.sync(ctx, "fork B", nil, mode); err != nil {
  1497  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1498  		}
  1499  	}()
  1500  	<-starting
  1501  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1502  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1503  	}
  1504  	progress <- struct{}{}
  1505  	pending.Wait()
  1506  
  1507  	// Check final progress after successful sync
  1508  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1509  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1510  	}
  1511  }
  1512  
  1513  // Tests that if synchronisation is aborted due to some failure, then the progress
  1514  // origin is not updated in the next sync cycle, as it should be considered the
  1515  // continuation of the previous sync and not a new instance.
  1516  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1517  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1518  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1519  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1520  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1521  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1522  
  1523  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1524  	ctx := context.Background()
  1525  	t.Parallel()
  1526  
  1527  	tester := newTester()
  1528  	defer tester.terminate()
  1529  
  1530  	// Create a small enough block chain to download
  1531  	targetBlocks := blockCacheItems - 15
  1532  	hashes, headers, blocks, receipts := tester.makeChain(ctx, targetBlocks, 0, tester.genesis, nil, false)
  1533  
  1534  	// Set a sync init hook to catch progress changes
  1535  	starting := make(chan struct{})
  1536  	progress := make(chan struct{})
  1537  
  1538  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1539  		starting <- struct{}{}
  1540  		<-progress
  1541  	}
  1542  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1543  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1544  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1545  	}
  1546  	// Attempt a full sync with a faulty peer
  1547  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1548  	missing := targetBlocks / 2
  1549  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1550  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1551  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1552  
  1553  	pending := new(sync.WaitGroup)
  1554  	pending.Add(1)
  1555  
  1556  	go func() {
  1557  		defer pending.Done()
  1558  		if err := tester.sync(ctx, "faulty", nil, mode); err == nil {
  1559  			panic("succeeded faulty synchronisation")
  1560  		}
  1561  	}()
  1562  	<-starting
  1563  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1564  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1565  	}
  1566  	progress <- struct{}{}
  1567  	pending.Wait()
  1568  
  1569  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1570  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1571  	pending.Add(1)
  1572  
  1573  	go func() {
  1574  		defer pending.Done()
  1575  		if err := tester.sync(ctx, "valid", nil, mode); err != nil {
  1576  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1577  		}
  1578  	}()
  1579  	<-starting
  1580  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1581  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1582  	}
  1583  	progress <- struct{}{}
  1584  	pending.Wait()
  1585  
  1586  	// Check final progress after successful sync
  1587  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1588  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1589  	}
  1590  }
  1591  
  1592  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1593  // the progress height is successfully reduced at the next sync invocation.
  1594  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1595  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1596  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1597  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1598  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1599  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1600  
  1601  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1602  	ctx := context.Background()
  1603  	t.Parallel()
  1604  
  1605  	tester := newTester()
  1606  	defer tester.terminate()
  1607  
  1608  	// Create a small block chain
  1609  	targetBlocks := blockCacheItems - 15
  1610  	hashes, headers, blocks, receipts := tester.makeChain(ctx, targetBlocks+3, 0, tester.genesis, nil, false)
  1611  
  1612  	// Set a sync init hook to catch progress changes
  1613  	starting := make(chan struct{})
  1614  	progress := make(chan struct{})
  1615  
  1616  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1617  		starting <- struct{}{}
  1618  		<-progress
  1619  	}
  1620  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1621  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1622  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1623  	}
  1624  	//  Create and sync with an attacker that promises a higher chain than available
  1625  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1626  	for i := 1; i < 3; i++ {
  1627  		delete(tester.peerHeaders["attack"], hashes[i])
  1628  		delete(tester.peerBlocks["attack"], hashes[i])
  1629  		delete(tester.peerReceipts["attack"], hashes[i])
  1630  	}
  1631  
  1632  	pending := new(sync.WaitGroup)
  1633  	pending.Add(1)
  1634  
  1635  	go func() {
  1636  		defer pending.Done()
  1637  		if err := tester.sync(ctx, "attack", nil, mode); err == nil {
  1638  			panic("succeeded attacker synchronisation")
  1639  		}
  1640  	}()
  1641  	<-starting
  1642  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1643  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1644  	}
  1645  	progress <- struct{}{}
  1646  	pending.Wait()
  1647  
  1648  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1649  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1650  	pending.Add(1)
  1651  
  1652  	go func() {
  1653  		defer pending.Done()
  1654  		if err := tester.sync(ctx, "valid", nil, mode); err != nil {
  1655  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1656  		}
  1657  	}()
  1658  	<-starting
  1659  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1660  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1661  	}
  1662  	progress <- struct{}{}
  1663  	pending.Wait()
  1664  
  1665  	// Check final progress after successful sync
  1666  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1667  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1668  	}
  1669  }
  1670  
  1671  // This test reproduces an issue where unexpected deliveries would
  1672  // block indefinitely if they arrived at the right time.
  1673  // We use data driven subtests to manage this so that it will be parallel on its own
  1674  // and not with the other tests, avoiding intermittent failures.
  1675  func TestDeliverHeadersHang(t *testing.T) {
  1676  	testCases := []struct {
  1677  		protocol int
  1678  		syncMode SyncMode
  1679  	}{
  1680  		{62, FullSync},
  1681  		{63, FullSync},
  1682  		{63, FastSync},
  1683  		{64, FullSync},
  1684  		{64, FastSync},
  1685  		{64, LightSync},
  1686  	}
  1687  	for _, tc := range testCases {
  1688  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1689  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1690  		})
  1691  	}
  1692  }
  1693  
  1694  type floodingTestPeer struct {
  1695  	peer   Peer
  1696  	tester *downloadTester
  1697  	pend   sync.WaitGroup
  1698  }
  1699  
  1700  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1701  func (ftp *floodingTestPeer) RequestHeadersByHash(ctx context.Context, hash common.Hash, count int, skip int, reverse bool) error {
  1702  	return ftp.peer.RequestHeadersByHash(ctx, hash, count, skip, reverse)
  1703  }
  1704  func (ftp *floodingTestPeer) RequestBodies(ctx context.Context, hashes []common.Hash) error {
  1705  	return ftp.peer.RequestBodies(ctx, hashes)
  1706  }
  1707  func (ftp *floodingTestPeer) RequestReceipts(ctx context.Context, hashes []common.Hash) error {
  1708  	return ftp.peer.RequestReceipts(ctx, hashes)
  1709  }
  1710  func (ftp *floodingTestPeer) RequestNodeData(ctx context.Context, hashes []common.Hash) error {
  1711  	return ftp.peer.RequestNodeData(ctx, hashes)
  1712  }
  1713  
  1714  func (ftp *floodingTestPeer) RequestHeadersByNumber(ctx context.Context, from uint64, count, skip int, reverse bool) error {
  1715  	deliveriesDone := make(chan struct{}, 500)
  1716  	for i := 0; i < cap(deliveriesDone); i++ {
  1717  		peer := fmt.Sprintf("fake-peer%d", i)
  1718  		ftp.pend.Add(1)
  1719  
  1720  		go func() {
  1721  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1722  			deliveriesDone <- struct{}{}
  1723  			ftp.pend.Done()
  1724  		}()
  1725  	}
  1726  	// Deliver the actual requested headers.
  1727  	go ftp.peer.RequestHeadersByNumber(context.Background(), from, count, skip, reverse)
  1728  	// None of the extra deliveries should block.
  1729  	timeout := time.After(60 * time.Second)
  1730  	for i := 0; i < cap(deliveriesDone); i++ {
  1731  		select {
  1732  		case <-deliveriesDone:
  1733  		case <-timeout:
  1734  			panic("blocked")
  1735  		}
  1736  	}
  1737  	return nil
  1738  }
  1739  
  1740  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1741  	ctx := context.Background()
  1742  	t.Parallel()
  1743  
  1744  	master := newTester()
  1745  	defer master.terminate()
  1746  
  1747  	hashes, headers, blocks, receipts := master.makeChain(ctx, 5, 0, master.genesis, nil, false)
  1748  	for i := 0; i < 200; i++ {
  1749  		tester := newTester()
  1750  		tester.peerDb = master.peerDb
  1751  
  1752  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1753  		// Whenever the downloader requests headers, flood it with
  1754  		// a lot of unrequested header deliveries.
  1755  		testPeer := &floodingTestPeer{
  1756  			peer:   tester.downloader.peers.peers["peer"].peer,
  1757  			tester: tester,
  1758  		}
  1759  		tester.downloader.peers.peers["peer"].peer = testPeer
  1760  		if err := tester.sync(ctx, "peer", nil, mode); err != nil {
  1761  			t.Errorf("test %d: sync failed: %v", i, err)
  1762  		}
  1763  		tester.terminate()
  1764  
  1765  		// Hack to mitigate racey use of WaitGroup
  1766  		time.Sleep(100 * time.Millisecond)
  1767  		// Flush all goroutines to prevent messing with subsequent tests
  1768  		testPeer.pend.Wait()
  1769  	}
  1770  }