github.com/ylsgit/go-ethereum@v1.6.5/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/common"
    29  	"github.com/ethereum/go-ethereum/core"
    30  	"github.com/ethereum/go-ethereum/core/state"
    31  	"github.com/ethereum/go-ethereum/core/types"
    32  	"github.com/ethereum/go-ethereum/crypto"
    33  	"github.com/ethereum/go-ethereum/ethdb"
    34  	"github.com/ethereum/go-ethereum/event"
    35  	"github.com/ethereum/go-ethereum/params"
    36  	"github.com/ethereum/go-ethereum/trie"
    37  )
    38  
    39  var (
    40  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    41  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    42  )
    43  
    44  // Reduce some of the parameters to make the tester faster.
    45  func init() {
    46  	MaxForkAncestry = uint64(10000)
    47  	blockCacheLimit = 1024
    48  	fsCriticalTrials = 10
    49  }
    50  
    51  // downloadTester is a test simulator for mocking out local block chain.
    52  type downloadTester struct {
    53  	downloader *Downloader
    54  
    55  	genesis *types.Block   // Genesis blocks used by the tester and peers
    56  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    57  	peerDb  ethdb.Database // Database of the peers containing all data
    58  
    59  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    60  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    61  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    62  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    63  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    64  
    65  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    66  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    67  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    68  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    69  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    70  
    71  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    72  
    73  	lock sync.RWMutex
    74  }
    75  
    76  // newTester creates a new downloader test mocker.
    77  func newTester() *downloadTester {
    78  	testdb, _ := ethdb.NewMemDatabase()
    79  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    80  
    81  	tester := &downloadTester{
    82  		genesis:           genesis,
    83  		peerDb:            testdb,
    84  		ownHashes:         []common.Hash{genesis.Hash()},
    85  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    86  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    87  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    88  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    89  		peerHashes:        make(map[string][]common.Hash),
    90  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    91  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    92  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    93  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    94  		peerMissingStates: make(map[string]map[common.Hash]bool),
    95  	}
    96  	tester.stateDb, _ = ethdb.NewMemDatabase()
    97  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
    98  
    99  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader,
   100  		tester.getBlock, tester.headHeader, tester.headBlock, tester.headFastBlock, tester.commitHeadBlock, tester.getTd,
   101  		tester.insertHeaders, tester.insertBlocks, tester.insertReceipts, tester.rollback, tester.dropPeer)
   102  
   103  	return tester
   104  }
   105  
   106  // makeChain creates a chain of n blocks starting at and including parent.
   107  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   108  // contains a transaction and every 5th an uncle to allow testing correct block
   109  // reassembly.
   110  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   111  	// Generate the block chain
   112  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, dl.peerDb, n, func(i int, block *core.BlockGen) {
   113  		block.SetCoinbase(common.Address{seed})
   114  
   115  		// If a heavy chain is requested, delay blocks to raise difficulty
   116  		if heavy {
   117  			block.OffsetTime(-1)
   118  		}
   119  		// If the block number is multiple of 3, send a bonus transaction to the miner
   120  		if parent == dl.genesis && i%3 == 0 {
   121  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   122  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), new(big.Int).SetUint64(params.TxGas), nil, nil), signer, testKey)
   123  			if err != nil {
   124  				panic(err)
   125  			}
   126  			block.AddTx(tx)
   127  		}
   128  		// If the block number is a multiple of 5, add a bonus uncle to the block
   129  		if i > 0 && i%5 == 0 {
   130  			block.AddUncle(&types.Header{
   131  				ParentHash: block.PrevBlock(i - 1).Hash(),
   132  				Number:     big.NewInt(block.Number().Int64() - 1),
   133  			})
   134  		}
   135  	})
   136  	// Convert the block-chain into a hash-chain and header/block maps
   137  	hashes := make([]common.Hash, n+1)
   138  	hashes[len(hashes)-1] = parent.Hash()
   139  
   140  	headerm := make(map[common.Hash]*types.Header, n+1)
   141  	headerm[parent.Hash()] = parent.Header()
   142  
   143  	blockm := make(map[common.Hash]*types.Block, n+1)
   144  	blockm[parent.Hash()] = parent
   145  
   146  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   147  	receiptm[parent.Hash()] = parentReceipts
   148  
   149  	for i, b := range blocks {
   150  		hashes[len(hashes)-i-2] = b.Hash()
   151  		headerm[b.Hash()] = b.Header()
   152  		blockm[b.Hash()] = b
   153  		receiptm[b.Hash()] = receipts[i]
   154  	}
   155  	return hashes, headerm, blockm, receiptm
   156  }
   157  
   158  // makeChainFork creates two chains of length n, such that h1[:f] and
   159  // h2[:f] are different but have a common suffix of length n-f.
   160  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   161  	// Create the common suffix
   162  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   163  
   164  	// Create the forks, making the second heavyer if non balanced forks were requested
   165  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   166  	hashes1 = append(hashes1, hashes[1:]...)
   167  
   168  	heavy := false
   169  	if !balanced {
   170  		heavy = true
   171  	}
   172  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   173  	hashes2 = append(hashes2, hashes[1:]...)
   174  
   175  	for hash, header := range headers {
   176  		headers1[hash] = header
   177  		headers2[hash] = header
   178  	}
   179  	for hash, block := range blocks {
   180  		blocks1[hash] = block
   181  		blocks2[hash] = block
   182  	}
   183  	for hash, receipt := range receipts {
   184  		receipts1[hash] = receipt
   185  		receipts2[hash] = receipt
   186  	}
   187  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   188  }
   189  
   190  // terminate aborts any operations on the embedded downloader and releases all
   191  // held resources.
   192  func (dl *downloadTester) terminate() {
   193  	dl.downloader.Terminate()
   194  }
   195  
   196  // sync starts synchronizing with a remote peer, blocking until it completes.
   197  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   198  	dl.lock.RLock()
   199  	hash := dl.peerHashes[id][0]
   200  	// If no particular TD was requested, load from the peer's blockchain
   201  	if td == nil {
   202  		td = big.NewInt(1)
   203  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   204  			td = diff
   205  		}
   206  	}
   207  	dl.lock.RUnlock()
   208  
   209  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   210  	err := dl.downloader.synchronise(id, hash, td, mode)
   211  	select {
   212  	case <-dl.downloader.cancelCh:
   213  		// Ok, downloader fully cancelled after sync cycle
   214  	default:
   215  		// Downloader is still accepting packets, can block a peer up
   216  		panic("downloader active post sync cycle") // panic will be caught by tester
   217  	}
   218  	return err
   219  }
   220  
   221  // hasHeader checks if a header is present in the testers canonical chain.
   222  func (dl *downloadTester) hasHeader(hash common.Hash) bool {
   223  	return dl.getHeader(hash) != nil
   224  }
   225  
   226  // hasBlock checks if a block and associated state is present in the testers canonical chain.
   227  func (dl *downloadTester) hasBlock(hash common.Hash) bool {
   228  	block := dl.getBlock(hash)
   229  	if block == nil {
   230  		return false
   231  	}
   232  	_, err := dl.stateDb.Get(block.Root().Bytes())
   233  	return err == nil
   234  }
   235  
   236  // getHeader retrieves a header from the testers canonical chain.
   237  func (dl *downloadTester) getHeader(hash common.Hash) *types.Header {
   238  	dl.lock.RLock()
   239  	defer dl.lock.RUnlock()
   240  
   241  	return dl.ownHeaders[hash]
   242  }
   243  
   244  // getBlock retrieves a block from the testers canonical chain.
   245  func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
   246  	dl.lock.RLock()
   247  	defer dl.lock.RUnlock()
   248  
   249  	return dl.ownBlocks[hash]
   250  }
   251  
   252  // headHeader retrieves the current head header from the canonical chain.
   253  func (dl *downloadTester) headHeader() *types.Header {
   254  	dl.lock.RLock()
   255  	defer dl.lock.RUnlock()
   256  
   257  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   258  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   259  			return header
   260  		}
   261  	}
   262  	return dl.genesis.Header()
   263  }
   264  
   265  // headBlock retrieves the current head block from the canonical chain.
   266  func (dl *downloadTester) headBlock() *types.Block {
   267  	dl.lock.RLock()
   268  	defer dl.lock.RUnlock()
   269  
   270  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   271  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   272  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   273  				return block
   274  			}
   275  		}
   276  	}
   277  	return dl.genesis
   278  }
   279  
   280  // headFastBlock retrieves the current head fast-sync block from the canonical chain.
   281  func (dl *downloadTester) headFastBlock() *types.Block {
   282  	dl.lock.RLock()
   283  	defer dl.lock.RUnlock()
   284  
   285  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   286  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   287  			return block
   288  		}
   289  	}
   290  	return dl.genesis
   291  }
   292  
   293  // commitHeadBlock manually sets the head block to a given hash.
   294  func (dl *downloadTester) commitHeadBlock(hash common.Hash) error {
   295  	// For now only check that the state trie is correct
   296  	if block := dl.getBlock(hash); block != nil {
   297  		_, err := trie.NewSecure(block.Root(), dl.stateDb, 0)
   298  		return err
   299  	}
   300  	return fmt.Errorf("non existent block: %x", hash[:4])
   301  }
   302  
   303  // getTd retrieves the block's total difficulty from the canonical chain.
   304  func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
   305  	dl.lock.RLock()
   306  	defer dl.lock.RUnlock()
   307  
   308  	return dl.ownChainTd[hash]
   309  }
   310  
   311  // insertHeaders injects a new batch of headers into the simulated chain.
   312  func (dl *downloadTester) insertHeaders(headers []*types.Header, checkFreq int) (int, error) {
   313  	dl.lock.Lock()
   314  	defer dl.lock.Unlock()
   315  
   316  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   317  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   318  		return 0, errors.New("unknown parent")
   319  	}
   320  	for i := 1; i < len(headers); i++ {
   321  		if headers[i].ParentHash != headers[i-1].Hash() {
   322  			return i, errors.New("unknown parent")
   323  		}
   324  	}
   325  	// Do a full insert if pre-checks passed
   326  	for i, header := range headers {
   327  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   328  			continue
   329  		}
   330  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   331  			return i, errors.New("unknown parent")
   332  		}
   333  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   334  		dl.ownHeaders[header.Hash()] = header
   335  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   336  	}
   337  	return len(headers), nil
   338  }
   339  
   340  // insertBlocks injects a new batch of blocks into the simulated chain.
   341  func (dl *downloadTester) insertBlocks(blocks types.Blocks) (int, error) {
   342  	dl.lock.Lock()
   343  	defer dl.lock.Unlock()
   344  
   345  	for i, block := range blocks {
   346  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   347  			return i, errors.New("unknown parent")
   348  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   349  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   350  		}
   351  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   352  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   353  			dl.ownHeaders[block.Hash()] = block.Header()
   354  		}
   355  		dl.ownBlocks[block.Hash()] = block
   356  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   357  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   358  	}
   359  	return len(blocks), nil
   360  }
   361  
   362  // insertReceipts injects a new batch of receipts into the simulated chain.
   363  func (dl *downloadTester) insertReceipts(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   364  	dl.lock.Lock()
   365  	defer dl.lock.Unlock()
   366  
   367  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   368  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   369  			return i, errors.New("unknown owner")
   370  		}
   371  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   372  			return i, errors.New("unknown parent")
   373  		}
   374  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   375  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   376  	}
   377  	return len(blocks), nil
   378  }
   379  
   380  // rollback removes some recently added elements from the chain.
   381  func (dl *downloadTester) rollback(hashes []common.Hash) {
   382  	dl.lock.Lock()
   383  	defer dl.lock.Unlock()
   384  
   385  	for i := len(hashes) - 1; i >= 0; i-- {
   386  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   387  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   388  		}
   389  		delete(dl.ownChainTd, hashes[i])
   390  		delete(dl.ownHeaders, hashes[i])
   391  		delete(dl.ownReceipts, hashes[i])
   392  		delete(dl.ownBlocks, hashes[i])
   393  	}
   394  }
   395  
   396  // newPeer registers a new block download source into the downloader.
   397  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   398  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   399  }
   400  
   401  // newSlowPeer registers a new block download source into the downloader, with a
   402  // specific delay time on processing the network packets sent to it, simulating
   403  // potentially slow network IO.
   404  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   405  	dl.lock.Lock()
   406  	defer dl.lock.Unlock()
   407  
   408  	var err error
   409  	switch version {
   410  	case 62:
   411  		err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil, nil)
   412  	case 63:
   413  		err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
   414  	case 64:
   415  		err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
   416  	}
   417  	if err == nil {
   418  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   419  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   420  		copy(dl.peerHashes[id], hashes)
   421  
   422  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   423  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   424  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   425  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   426  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   427  
   428  		genesis := hashes[len(hashes)-1]
   429  		if header := headers[genesis]; header != nil {
   430  			dl.peerHeaders[id][genesis] = header
   431  			dl.peerChainTds[id][genesis] = header.Difficulty
   432  		}
   433  		if block := blocks[genesis]; block != nil {
   434  			dl.peerBlocks[id][genesis] = block
   435  			dl.peerChainTds[id][genesis] = block.Difficulty()
   436  		}
   437  
   438  		for i := len(hashes) - 2; i >= 0; i-- {
   439  			hash := hashes[i]
   440  
   441  			if header, ok := headers[hash]; ok {
   442  				dl.peerHeaders[id][hash] = header
   443  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   444  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   445  				}
   446  			}
   447  			if block, ok := blocks[hash]; ok {
   448  				dl.peerBlocks[id][hash] = block
   449  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   450  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   451  				}
   452  			}
   453  			if receipt, ok := receipts[hash]; ok {
   454  				dl.peerReceipts[id][hash] = receipt
   455  			}
   456  		}
   457  	}
   458  	return err
   459  }
   460  
   461  // dropPeer simulates a hard peer removal from the connection pool.
   462  func (dl *downloadTester) dropPeer(id string) {
   463  	dl.lock.Lock()
   464  	defer dl.lock.Unlock()
   465  
   466  	delete(dl.peerHashes, id)
   467  	delete(dl.peerHeaders, id)
   468  	delete(dl.peerBlocks, id)
   469  	delete(dl.peerChainTds, id)
   470  
   471  	dl.downloader.UnregisterPeer(id)
   472  }
   473  
   474  // peerCurrentHeadFn constructs a function to retrieve a peer's current head hash
   475  // and total difficulty.
   476  func (dl *downloadTester) peerCurrentHeadFn(id string) func() (common.Hash, *big.Int) {
   477  	return func() (common.Hash, *big.Int) {
   478  		dl.lock.RLock()
   479  		defer dl.lock.RUnlock()
   480  
   481  		return dl.peerHashes[id][0], nil
   482  	}
   483  }
   484  
   485  // peerGetRelHeadersFn constructs a GetBlockHeaders function based on a hashed
   486  // origin; associated with a particular peer in the download tester. The returned
   487  // function can be used to retrieve batches of headers from the particular peer.
   488  func (dl *downloadTester) peerGetRelHeadersFn(id string, delay time.Duration) func(common.Hash, int, int, bool) error {
   489  	return func(origin common.Hash, amount int, skip int, reverse bool) error {
   490  		// Find the canonical number of the hash
   491  		dl.lock.RLock()
   492  		number := uint64(0)
   493  		for num, hash := range dl.peerHashes[id] {
   494  			if hash == origin {
   495  				number = uint64(len(dl.peerHashes[id]) - num - 1)
   496  				break
   497  			}
   498  		}
   499  		dl.lock.RUnlock()
   500  
   501  		// Use the absolute header fetcher to satisfy the query
   502  		return dl.peerGetAbsHeadersFn(id, delay)(number, amount, skip, reverse)
   503  	}
   504  }
   505  
   506  // peerGetAbsHeadersFn constructs a GetBlockHeaders function based on a numbered
   507  // origin; associated with a particular peer in the download tester. The returned
   508  // function can be used to retrieve batches of headers from the particular peer.
   509  func (dl *downloadTester) peerGetAbsHeadersFn(id string, delay time.Duration) func(uint64, int, int, bool) error {
   510  	return func(origin uint64, amount int, skip int, reverse bool) error {
   511  		time.Sleep(delay)
   512  
   513  		dl.lock.RLock()
   514  		defer dl.lock.RUnlock()
   515  
   516  		// Gather the next batch of headers
   517  		hashes := dl.peerHashes[id]
   518  		headers := dl.peerHeaders[id]
   519  		result := make([]*types.Header, 0, amount)
   520  		for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   521  			if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   522  				result = append(result, header)
   523  			}
   524  		}
   525  		// Delay delivery a bit to allow attacks to unfold
   526  		go func() {
   527  			time.Sleep(time.Millisecond)
   528  			dl.downloader.DeliverHeaders(id, result)
   529  		}()
   530  		return nil
   531  	}
   532  }
   533  
   534  // peerGetBodiesFn constructs a getBlockBodies method associated with a particular
   535  // peer in the download tester. The returned function can be used to retrieve
   536  // batches of block bodies from the particularly requested peer.
   537  func (dl *downloadTester) peerGetBodiesFn(id string, delay time.Duration) func([]common.Hash) error {
   538  	return func(hashes []common.Hash) error {
   539  		time.Sleep(delay)
   540  
   541  		dl.lock.RLock()
   542  		defer dl.lock.RUnlock()
   543  
   544  		blocks := dl.peerBlocks[id]
   545  
   546  		transactions := make([][]*types.Transaction, 0, len(hashes))
   547  		uncles := make([][]*types.Header, 0, len(hashes))
   548  
   549  		for _, hash := range hashes {
   550  			if block, ok := blocks[hash]; ok {
   551  				transactions = append(transactions, block.Transactions())
   552  				uncles = append(uncles, block.Uncles())
   553  			}
   554  		}
   555  		go dl.downloader.DeliverBodies(id, transactions, uncles)
   556  
   557  		return nil
   558  	}
   559  }
   560  
   561  // peerGetReceiptsFn constructs a getReceipts method associated with a particular
   562  // peer in the download tester. The returned function can be used to retrieve
   563  // batches of block receipts from the particularly requested peer.
   564  func (dl *downloadTester) peerGetReceiptsFn(id string, delay time.Duration) func([]common.Hash) error {
   565  	return func(hashes []common.Hash) error {
   566  		time.Sleep(delay)
   567  
   568  		dl.lock.RLock()
   569  		defer dl.lock.RUnlock()
   570  
   571  		receipts := dl.peerReceipts[id]
   572  
   573  		results := make([][]*types.Receipt, 0, len(hashes))
   574  		for _, hash := range hashes {
   575  			if receipt, ok := receipts[hash]; ok {
   576  				results = append(results, receipt)
   577  			}
   578  		}
   579  		go dl.downloader.DeliverReceipts(id, results)
   580  
   581  		return nil
   582  	}
   583  }
   584  
   585  // peerGetNodeDataFn constructs a getNodeData method associated with a particular
   586  // peer in the download tester. The returned function can be used to retrieve
   587  // batches of node state data from the particularly requested peer.
   588  func (dl *downloadTester) peerGetNodeDataFn(id string, delay time.Duration) func([]common.Hash) error {
   589  	return func(hashes []common.Hash) error {
   590  		time.Sleep(delay)
   591  
   592  		dl.lock.RLock()
   593  		defer dl.lock.RUnlock()
   594  
   595  		results := make([][]byte, 0, len(hashes))
   596  		for _, hash := range hashes {
   597  			if data, err := dl.peerDb.Get(hash.Bytes()); err == nil {
   598  				if !dl.peerMissingStates[id][hash] {
   599  					results = append(results, data)
   600  				}
   601  			}
   602  		}
   603  		go dl.downloader.DeliverNodeData(id, results)
   604  
   605  		return nil
   606  	}
   607  }
   608  
   609  // assertOwnChain checks if the local chain contains the correct number of items
   610  // of the various chain components.
   611  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   612  	assertOwnForkedChain(t, tester, 1, []int{length})
   613  }
   614  
   615  // assertOwnForkedChain checks if the local forked chain contains the correct
   616  // number of items of the various chain components.
   617  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   618  	// Initialize the counters for the first fork
   619  	headers, blocks := lengths[0], lengths[0]
   620  
   621  	minReceipts, maxReceipts := lengths[0]-fsMinFullBlocks-fsPivotInterval, lengths[0]-fsMinFullBlocks
   622  	if minReceipts < 0 {
   623  		minReceipts = 1
   624  	}
   625  	if maxReceipts < 0 {
   626  		maxReceipts = 1
   627  	}
   628  	// Update the counters for each subsequent fork
   629  	for _, length := range lengths[1:] {
   630  		headers += length - common
   631  		blocks += length - common
   632  
   633  		minReceipts += length - common - fsMinFullBlocks - fsPivotInterval
   634  		maxReceipts += length - common - fsMinFullBlocks
   635  	}
   636  	switch tester.downloader.mode {
   637  	case FullSync:
   638  		minReceipts, maxReceipts = 1, 1
   639  	case LightSync:
   640  		blocks, minReceipts, maxReceipts = 1, 1, 1
   641  	}
   642  	if hs := len(tester.ownHeaders); hs != headers {
   643  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   644  	}
   645  	if bs := len(tester.ownBlocks); bs != blocks {
   646  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   647  	}
   648  	if rs := len(tester.ownReceipts); rs < minReceipts || rs > maxReceipts {
   649  		t.Fatalf("synchronised receipts mismatch: have %v, want between [%v, %v]", rs, minReceipts, maxReceipts)
   650  	}
   651  	// Verify the state trie too for fast syncs
   652  	if tester.downloader.mode == FastSync {
   653  		var index int
   654  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   655  			index = pivot
   656  		} else {
   657  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   658  		}
   659  		if index > 0 {
   660  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, tester.stateDb); statedb == nil || err != nil {
   661  				t.Fatalf("state reconstruction failed: %v", err)
   662  			}
   663  		}
   664  	}
   665  }
   666  
   667  // Tests that simple synchronization against a canonical chain works correctly.
   668  // In this test common ancestor lookup should be short circuited and not require
   669  // binary searching.
   670  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   671  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   672  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   673  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   674  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   675  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   676  
   677  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   678  	t.Parallel()
   679  
   680  	tester := newTester()
   681  	defer tester.terminate()
   682  
   683  	// Create a small enough block chain to download
   684  	targetBlocks := blockCacheLimit - 15
   685  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   686  
   687  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   688  
   689  	// Synchronise with the peer and make sure all relevant data was retrieved
   690  	if err := tester.sync("peer", nil, mode); err != nil {
   691  		t.Fatalf("failed to synchronise blocks: %v", err)
   692  	}
   693  	assertOwnChain(t, tester, targetBlocks+1)
   694  }
   695  
   696  // Tests that if a large batch of blocks are being downloaded, it is throttled
   697  // until the cached blocks are retrieved.
   698  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   699  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   700  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   701  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   702  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   703  
   704  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   705  	tester := newTester()
   706  	defer tester.terminate()
   707  
   708  	// Create a long block chain to download and the tester
   709  	targetBlocks := 8 * blockCacheLimit
   710  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   711  
   712  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   713  
   714  	// Wrap the importer to allow stepping
   715  	blocked, proceed := uint32(0), make(chan struct{})
   716  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   717  		atomic.StoreUint32(&blocked, uint32(len(results)))
   718  		<-proceed
   719  	}
   720  	// Start a synchronisation concurrently
   721  	errc := make(chan error)
   722  	go func() {
   723  		errc <- tester.sync("peer", nil, mode)
   724  	}()
   725  	// Iteratively take some blocks, always checking the retrieval count
   726  	for {
   727  		// Check the retrieval count synchronously (! reason for this ugly block)
   728  		tester.lock.RLock()
   729  		retrieved := len(tester.ownBlocks)
   730  		tester.lock.RUnlock()
   731  		if retrieved >= targetBlocks+1 {
   732  			break
   733  		}
   734  		// Wait a bit for sync to throttle itself
   735  		var cached, frozen int
   736  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   737  			time.Sleep(25 * time.Millisecond)
   738  
   739  			tester.lock.Lock()
   740  			tester.downloader.queue.lock.Lock()
   741  			cached = len(tester.downloader.queue.blockDonePool)
   742  			if mode == FastSync {
   743  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   744  					if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   745  						cached = receipts
   746  					}
   747  				}
   748  			}
   749  			frozen = int(atomic.LoadUint32(&blocked))
   750  			retrieved = len(tester.ownBlocks)
   751  			tester.downloader.queue.lock.Unlock()
   752  			tester.lock.Unlock()
   753  
   754  			if cached == blockCacheLimit || retrieved+cached+frozen == targetBlocks+1 {
   755  				break
   756  			}
   757  		}
   758  		// Make sure we filled up the cache, then exhaust it
   759  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   760  
   761  		tester.lock.RLock()
   762  		retrieved = len(tester.ownBlocks)
   763  		tester.lock.RUnlock()
   764  		if cached != blockCacheLimit && retrieved+cached+frozen != targetBlocks+1 {
   765  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheLimit, retrieved, frozen, targetBlocks+1)
   766  		}
   767  		// Permit the blocked blocks to import
   768  		if atomic.LoadUint32(&blocked) > 0 {
   769  			atomic.StoreUint32(&blocked, uint32(0))
   770  			proceed <- struct{}{}
   771  		}
   772  	}
   773  	// Check that we haven't pulled more blocks than available
   774  	assertOwnChain(t, tester, targetBlocks+1)
   775  	if err := <-errc; err != nil {
   776  		t.Fatalf("block synchronization failed: %v", err)
   777  	}
   778  }
   779  
   780  // Tests that simple synchronization against a forked chain works correctly. In
   781  // this test common ancestor lookup should *not* be short circuited, and a full
   782  // binary search should be executed.
   783  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   784  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   785  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   786  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   787  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   788  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   789  
   790  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   791  	t.Parallel()
   792  
   793  	tester := newTester()
   794  	defer tester.terminate()
   795  
   796  	// Create a long enough forked chain
   797  	common, fork := MaxHashFetch, 2*MaxHashFetch
   798  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   799  
   800  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   801  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   802  
   803  	// Synchronise with the peer and make sure all blocks were retrieved
   804  	if err := tester.sync("fork A", nil, mode); err != nil {
   805  		t.Fatalf("failed to synchronise blocks: %v", err)
   806  	}
   807  	assertOwnChain(t, tester, common+fork+1)
   808  
   809  	// Synchronise with the second peer and make sure that fork is pulled too
   810  	if err := tester.sync("fork B", nil, mode); err != nil {
   811  		t.Fatalf("failed to synchronise blocks: %v", err)
   812  	}
   813  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   814  }
   815  
   816  // Tests that synchronising against a much shorter but much heavyer fork works
   817  // corrently and is not dropped.
   818  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   819  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   820  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   821  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   822  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   823  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   824  
   825  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   826  	t.Parallel()
   827  
   828  	tester := newTester()
   829  	defer tester.terminate()
   830  
   831  	// Create a long enough forked chain
   832  	common, fork := MaxHashFetch, 4*MaxHashFetch
   833  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   834  
   835  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   836  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   837  
   838  	// Synchronise with the peer and make sure all blocks were retrieved
   839  	if err := tester.sync("light", nil, mode); err != nil {
   840  		t.Fatalf("failed to synchronise blocks: %v", err)
   841  	}
   842  	assertOwnChain(t, tester, common+fork+1)
   843  
   844  	// Synchronise with the second peer and make sure that fork is pulled too
   845  	if err := tester.sync("heavy", nil, mode); err != nil {
   846  		t.Fatalf("failed to synchronise blocks: %v", err)
   847  	}
   848  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   849  }
   850  
   851  // Tests that chain forks are contained within a certain interval of the current
   852  // chain head, ensuring that malicious peers cannot waste resources by feeding
   853  // long dead chains.
   854  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   855  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   856  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   857  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   858  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   859  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   860  
   861  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   862  	t.Parallel()
   863  
   864  	tester := newTester()
   865  	defer tester.terminate()
   866  
   867  	// Create a long enough forked chain
   868  	common, fork := 13, int(MaxForkAncestry+17)
   869  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   870  
   871  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   872  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   873  
   874  	// Synchronise with the peer and make sure all blocks were retrieved
   875  	if err := tester.sync("original", nil, mode); err != nil {
   876  		t.Fatalf("failed to synchronise blocks: %v", err)
   877  	}
   878  	assertOwnChain(t, tester, common+fork+1)
   879  
   880  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   881  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   882  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   883  	}
   884  }
   885  
   886  // Tests that chain forks are contained within a certain interval of the current
   887  // chain head for short but heavy forks too. These are a bit special because they
   888  // take different ancestor lookup paths.
   889  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   890  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   891  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   892  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   893  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   894  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   895  
   896  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   897  	t.Parallel()
   898  
   899  	tester := newTester()
   900  	defer tester.terminate()
   901  
   902  	// Create a long enough forked chain
   903  	common, fork := 13, int(MaxForkAncestry+17)
   904  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   905  
   906  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   907  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   908  
   909  	// Synchronise with the peer and make sure all blocks were retrieved
   910  	if err := tester.sync("original", nil, mode); err != nil {
   911  		t.Fatalf("failed to synchronise blocks: %v", err)
   912  	}
   913  	assertOwnChain(t, tester, common+fork+1)
   914  
   915  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   916  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   917  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   918  	}
   919  }
   920  
   921  // Tests that an inactive downloader will not accept incoming block headers and
   922  // bodies.
   923  func TestInactiveDownloader62(t *testing.T) {
   924  	t.Parallel()
   925  
   926  	tester := newTester()
   927  	defer tester.terminate()
   928  
   929  	// Check that neither block headers nor bodies are accepted
   930  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   931  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   932  	}
   933  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   934  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   935  	}
   936  }
   937  
   938  // Tests that an inactive downloader will not accept incoming block headers,
   939  // bodies and receipts.
   940  func TestInactiveDownloader63(t *testing.T) {
   941  	t.Parallel()
   942  
   943  	tester := newTester()
   944  	defer tester.terminate()
   945  
   946  	// Check that neither block headers nor bodies are accepted
   947  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   948  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   949  	}
   950  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   951  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   952  	}
   953  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   954  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   955  	}
   956  }
   957  
   958  // Tests that a canceled download wipes all previously accumulated state.
   959  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   960  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   961  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   962  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   963  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   964  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   965  
   966  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   967  	t.Parallel()
   968  
   969  	tester := newTester()
   970  	defer tester.terminate()
   971  
   972  	// Create a small enough block chain to download and the tester
   973  	targetBlocks := blockCacheLimit - 15
   974  	if targetBlocks >= MaxHashFetch {
   975  		targetBlocks = MaxHashFetch - 15
   976  	}
   977  	if targetBlocks >= MaxHeaderFetch {
   978  		targetBlocks = MaxHeaderFetch - 15
   979  	}
   980  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   981  
   982  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   983  
   984  	// Make sure canceling works with a pristine downloader
   985  	tester.downloader.Cancel()
   986  	if !tester.downloader.queue.Idle() {
   987  		t.Errorf("download queue not idle")
   988  	}
   989  	// Synchronise with the peer, but cancel afterwards
   990  	if err := tester.sync("peer", nil, mode); err != nil {
   991  		t.Fatalf("failed to synchronise blocks: %v", err)
   992  	}
   993  	tester.downloader.Cancel()
   994  	if !tester.downloader.queue.Idle() {
   995  		t.Errorf("download queue not idle")
   996  	}
   997  }
   998  
   999  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
  1000  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
  1001  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
  1002  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
  1003  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
  1004  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
  1005  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
  1006  
  1007  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
  1008  	t.Parallel()
  1009  
  1010  	tester := newTester()
  1011  	defer tester.terminate()
  1012  
  1013  	// Create various peers with various parts of the chain
  1014  	targetPeers := 8
  1015  	targetBlocks := targetPeers*blockCacheLimit - 15
  1016  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1017  
  1018  	for i := 0; i < targetPeers; i++ {
  1019  		id := fmt.Sprintf("peer #%d", i)
  1020  		tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks, receipts)
  1021  	}
  1022  	if err := tester.sync("peer #0", nil, mode); err != nil {
  1023  		t.Fatalf("failed to synchronise blocks: %v", err)
  1024  	}
  1025  	assertOwnChain(t, tester, targetBlocks+1)
  1026  }
  1027  
  1028  // Tests that synchronisations behave well in multi-version protocol environments
  1029  // and not wreak havoc on other nodes in the network.
  1030  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1031  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1032  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1033  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1034  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1035  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1036  
  1037  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1038  	t.Parallel()
  1039  
  1040  	tester := newTester()
  1041  	defer tester.terminate()
  1042  
  1043  	// Create a small enough block chain to download
  1044  	targetBlocks := blockCacheLimit - 15
  1045  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1046  
  1047  	// Create peers of every type
  1048  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1049  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1050  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1051  
  1052  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1053  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1054  		t.Fatalf("failed to synchronise blocks: %v", err)
  1055  	}
  1056  	assertOwnChain(t, tester, targetBlocks+1)
  1057  
  1058  	// Check that no peers have been dropped off
  1059  	for _, version := range []int{62, 63, 64} {
  1060  		peer := fmt.Sprintf("peer %d", version)
  1061  		if _, ok := tester.peerHashes[peer]; !ok {
  1062  			t.Errorf("%s dropped", peer)
  1063  		}
  1064  	}
  1065  }
  1066  
  1067  // Tests that if a block is empty (e.g. header only), no body request should be
  1068  // made, and instead the header should be assembled into a whole block in itself.
  1069  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1070  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1071  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1072  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1073  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1074  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1075  
  1076  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1077  	t.Parallel()
  1078  
  1079  	tester := newTester()
  1080  	defer tester.terminate()
  1081  
  1082  	// Create a block chain to download
  1083  	targetBlocks := 2*blockCacheLimit - 15
  1084  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1085  
  1086  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1087  
  1088  	// Instrument the downloader to signal body requests
  1089  	bodiesHave, receiptsHave := int32(0), int32(0)
  1090  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1091  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1092  	}
  1093  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1094  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1095  	}
  1096  	// Synchronise with the peer and make sure all blocks were retrieved
  1097  	if err := tester.sync("peer", nil, mode); err != nil {
  1098  		t.Fatalf("failed to synchronise blocks: %v", err)
  1099  	}
  1100  	assertOwnChain(t, tester, targetBlocks+1)
  1101  
  1102  	// Validate the number of block bodies that should have been requested
  1103  	bodiesNeeded, receiptsNeeded := 0, 0
  1104  	for _, block := range blocks {
  1105  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1106  			bodiesNeeded++
  1107  		}
  1108  	}
  1109  	for hash, receipt := range receipts {
  1110  		if mode == FastSync && len(receipt) > 0 && headers[hash].Number.Uint64() <= tester.downloader.queue.fastSyncPivot {
  1111  			receiptsNeeded++
  1112  		}
  1113  	}
  1114  	if int(bodiesHave) != bodiesNeeded {
  1115  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1116  	}
  1117  	if int(receiptsHave) != receiptsNeeded {
  1118  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1119  	}
  1120  }
  1121  
  1122  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1123  // stalling the downloader by feeding gapped header chains.
  1124  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1125  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1126  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1127  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1128  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1129  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1130  
  1131  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1132  	t.Parallel()
  1133  
  1134  	tester := newTester()
  1135  	defer tester.terminate()
  1136  
  1137  	// Create a small enough block chain to download
  1138  	targetBlocks := blockCacheLimit - 15
  1139  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1140  
  1141  	// Attempt a full sync with an attacker feeding gapped headers
  1142  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1143  	missing := targetBlocks / 2
  1144  	delete(tester.peerHeaders["attack"], hashes[missing])
  1145  
  1146  	if err := tester.sync("attack", nil, mode); err == nil {
  1147  		t.Fatalf("succeeded attacker synchronisation")
  1148  	}
  1149  	// Synchronise with the valid peer and make sure sync succeeds
  1150  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1151  	if err := tester.sync("valid", nil, mode); err != nil {
  1152  		t.Fatalf("failed to synchronise blocks: %v", err)
  1153  	}
  1154  	assertOwnChain(t, tester, targetBlocks+1)
  1155  }
  1156  
  1157  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1158  // detects the invalid numbering.
  1159  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1160  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1161  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1162  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1163  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1164  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1165  
  1166  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1167  	tester := newTester()
  1168  	defer tester.terminate()
  1169  
  1170  	// Create a small enough block chain to download
  1171  	targetBlocks := blockCacheLimit - 15
  1172  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1173  
  1174  	// Attempt a full sync with an attacker feeding shifted headers
  1175  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1176  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1177  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1178  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1179  
  1180  	if err := tester.sync("attack", nil, mode); err == nil {
  1181  		t.Fatalf("succeeded attacker synchronisation")
  1182  	}
  1183  	// Synchronise with the valid peer and make sure sync succeeds
  1184  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1185  	if err := tester.sync("valid", nil, mode); err != nil {
  1186  		t.Fatalf("failed to synchronise blocks: %v", err)
  1187  	}
  1188  	assertOwnChain(t, tester, targetBlocks+1)
  1189  }
  1190  
  1191  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1192  // for various failure scenarios. Afterwards a full sync is attempted to make
  1193  // sure no state was corrupted.
  1194  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1195  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1196  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1197  
  1198  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1199  	tester := newTester()
  1200  	defer tester.terminate()
  1201  
  1202  	// Create a small enough block chain to download
  1203  	targetBlocks := 3*fsHeaderSafetyNet + fsPivotInterval + fsMinFullBlocks
  1204  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1205  
  1206  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1207  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1208  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1209  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1210  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1211  
  1212  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1213  		t.Fatalf("succeeded fast attacker synchronisation")
  1214  	}
  1215  	if head := tester.headHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1216  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1217  	}
  1218  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1219  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1220  	// rolled back, and also the pivot point being reverted to a non-block status.
  1221  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1222  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1223  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1224  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1225  
  1226  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1227  		t.Fatalf("succeeded block attacker synchronisation")
  1228  	}
  1229  	if head := tester.headHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1230  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1231  	}
  1232  	if mode == FastSync {
  1233  		if head := tester.headBlock().NumberU64(); head != 0 {
  1234  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1235  		}
  1236  	}
  1237  	// Attempt to sync with an attacker that withholds promised blocks after the
  1238  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1239  	// but already imported pivot block.
  1240  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1241  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1242  
  1243  	tester.downloader.fsPivotFails = 0
  1244  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1245  		for i := missing; i <= len(hashes); i++ {
  1246  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1247  		}
  1248  		tester.downloader.syncInitHook = nil
  1249  	}
  1250  
  1251  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1252  		t.Fatalf("succeeded withholding attacker synchronisation")
  1253  	}
  1254  	if head := tester.headHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1255  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1256  	}
  1257  	if mode == FastSync {
  1258  		if head := tester.headBlock().NumberU64(); head != 0 {
  1259  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1260  		}
  1261  	}
  1262  	tester.downloader.fsPivotFails = fsCriticalTrials
  1263  
  1264  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1265  	// rollback should also disable fast syncing for this process, verify that we
  1266  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1267  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1268  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1269  	if err := tester.sync("valid", nil, mode); err != nil {
  1270  		t.Fatalf("failed to synchronise blocks: %v", err)
  1271  	}
  1272  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1273  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1274  	}
  1275  	if mode != LightSync {
  1276  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1277  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1278  		}
  1279  	}
  1280  }
  1281  
  1282  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1283  // afterwards by not sending any useful hashes.
  1284  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1285  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1286  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1287  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1288  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1289  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1290  
  1291  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1292  	t.Parallel()
  1293  
  1294  	tester := newTester()
  1295  	defer tester.terminate()
  1296  
  1297  	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1298  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1299  
  1300  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1301  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1302  	}
  1303  }
  1304  
  1305  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1306  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1307  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1308  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1309  
  1310  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1311  	// Define the disconnection requirement for individual hash fetch errors
  1312  	tests := []struct {
  1313  		result error
  1314  		drop   bool
  1315  	}{
  1316  		{nil, false},                        // Sync succeeded, all is well
  1317  		{errBusy, false},                    // Sync is already in progress, no problem
  1318  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1319  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1320  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1321  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1322  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1323  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1324  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1325  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1326  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1327  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1328  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1329  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1330  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1331  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1332  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1333  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1334  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1335  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1336  	}
  1337  	// Run the tests and check disconnection status
  1338  	tester := newTester()
  1339  	defer tester.terminate()
  1340  
  1341  	for i, tt := range tests {
  1342  		// Register a new peer and ensure it's presence
  1343  		id := fmt.Sprintf("test %d", i)
  1344  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1345  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1346  		}
  1347  		if _, ok := tester.peerHashes[id]; !ok {
  1348  			t.Fatalf("test %d: registered peer not found", i)
  1349  		}
  1350  		// Simulate a synchronisation and check the required result
  1351  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1352  
  1353  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1354  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1355  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1356  		}
  1357  	}
  1358  }
  1359  
  1360  // Tests that synchronisation progress (origin block number, current block number
  1361  // and highest block number) is tracked and updated correctly.
  1362  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1363  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1364  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1365  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1366  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1367  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1368  
  1369  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1370  	t.Parallel()
  1371  
  1372  	tester := newTester()
  1373  	defer tester.terminate()
  1374  
  1375  	// Create a small enough block chain to download
  1376  	targetBlocks := blockCacheLimit - 15
  1377  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1378  
  1379  	// Set a sync init hook to catch progress changes
  1380  	starting := make(chan struct{})
  1381  	progress := make(chan struct{})
  1382  
  1383  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1384  		starting <- struct{}{}
  1385  		<-progress
  1386  	}
  1387  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1388  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1389  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1390  	}
  1391  	// Synchronise half the blocks and check initial progress
  1392  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1393  	pending := new(sync.WaitGroup)
  1394  	pending.Add(1)
  1395  
  1396  	go func() {
  1397  		defer pending.Done()
  1398  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1399  			t.Fatalf("failed to synchronise blocks: %v", err)
  1400  		}
  1401  	}()
  1402  	<-starting
  1403  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1404  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1405  	}
  1406  	progress <- struct{}{}
  1407  	pending.Wait()
  1408  
  1409  	// Synchronise all the blocks and check continuation progress
  1410  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1411  	pending.Add(1)
  1412  
  1413  	go func() {
  1414  		defer pending.Done()
  1415  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1416  			t.Fatalf("failed to synchronise blocks: %v", err)
  1417  		}
  1418  	}()
  1419  	<-starting
  1420  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1421  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1422  	}
  1423  	progress <- struct{}{}
  1424  	pending.Wait()
  1425  
  1426  	// Check final progress after successful sync
  1427  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1428  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1429  	}
  1430  }
  1431  
  1432  // Tests that synchronisation progress (origin block number and highest block
  1433  // number) is tracked and updated correctly in case of a fork (or manual head
  1434  // revertal).
  1435  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1436  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1437  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1438  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1439  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1440  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1441  
  1442  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1443  	t.Parallel()
  1444  
  1445  	tester := newTester()
  1446  	defer tester.terminate()
  1447  
  1448  	// Create a forked chain to simulate origin revertal
  1449  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1450  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1451  
  1452  	// Set a sync init hook to catch progress changes
  1453  	starting := make(chan struct{})
  1454  	progress := make(chan struct{})
  1455  
  1456  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1457  		starting <- struct{}{}
  1458  		<-progress
  1459  	}
  1460  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1461  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1462  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1463  	}
  1464  	// Synchronise with one of the forks and check progress
  1465  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1466  	pending := new(sync.WaitGroup)
  1467  	pending.Add(1)
  1468  
  1469  	go func() {
  1470  		defer pending.Done()
  1471  		if err := tester.sync("fork A", nil, mode); err != nil {
  1472  			t.Fatalf("failed to synchronise blocks: %v", err)
  1473  		}
  1474  	}()
  1475  	<-starting
  1476  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1477  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1478  	}
  1479  	progress <- struct{}{}
  1480  	pending.Wait()
  1481  
  1482  	// Simulate a successful sync above the fork
  1483  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1484  
  1485  	// Synchronise with the second fork and check progress resets
  1486  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1487  	pending.Add(1)
  1488  
  1489  	go func() {
  1490  		defer pending.Done()
  1491  		if err := tester.sync("fork B", nil, mode); err != nil {
  1492  			t.Fatalf("failed to synchronise blocks: %v", err)
  1493  		}
  1494  	}()
  1495  	<-starting
  1496  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1497  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1498  	}
  1499  	progress <- struct{}{}
  1500  	pending.Wait()
  1501  
  1502  	// Check final progress after successful sync
  1503  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1504  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1505  	}
  1506  }
  1507  
  1508  // Tests that if synchronisation is aborted due to some failure, then the progress
  1509  // origin is not updated in the next sync cycle, as it should be considered the
  1510  // continuation of the previous sync and not a new instance.
  1511  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1512  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1513  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1514  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1515  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1516  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1517  
  1518  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1519  	t.Parallel()
  1520  
  1521  	tester := newTester()
  1522  	defer tester.terminate()
  1523  
  1524  	// Create a small enough block chain to download
  1525  	targetBlocks := blockCacheLimit - 15
  1526  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1527  
  1528  	// Set a sync init hook to catch progress changes
  1529  	starting := make(chan struct{})
  1530  	progress := make(chan struct{})
  1531  
  1532  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1533  		starting <- struct{}{}
  1534  		<-progress
  1535  	}
  1536  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1537  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1538  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1539  	}
  1540  	// Attempt a full sync with a faulty peer
  1541  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1542  	missing := targetBlocks / 2
  1543  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1544  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1545  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1546  
  1547  	pending := new(sync.WaitGroup)
  1548  	pending.Add(1)
  1549  
  1550  	go func() {
  1551  		defer pending.Done()
  1552  		if err := tester.sync("faulty", nil, mode); err == nil {
  1553  			t.Fatalf("succeeded faulty synchronisation")
  1554  		}
  1555  	}()
  1556  	<-starting
  1557  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1558  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1559  	}
  1560  	progress <- struct{}{}
  1561  	pending.Wait()
  1562  
  1563  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1564  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1565  	pending.Add(1)
  1566  
  1567  	go func() {
  1568  		defer pending.Done()
  1569  		if err := tester.sync("valid", nil, mode); err != nil {
  1570  			t.Fatalf("failed to synchronise blocks: %v", err)
  1571  		}
  1572  	}()
  1573  	<-starting
  1574  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1575  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1576  	}
  1577  	progress <- struct{}{}
  1578  	pending.Wait()
  1579  
  1580  	// Check final progress after successful sync
  1581  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1582  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1583  	}
  1584  }
  1585  
  1586  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1587  // the progress height is successfully reduced at the next sync invocation.
  1588  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1589  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1590  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1591  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1592  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1593  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1594  
  1595  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1596  	t.Parallel()
  1597  
  1598  	tester := newTester()
  1599  	defer tester.terminate()
  1600  
  1601  	// Create a small block chain
  1602  	targetBlocks := blockCacheLimit - 15
  1603  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1604  
  1605  	// Set a sync init hook to catch progress changes
  1606  	starting := make(chan struct{})
  1607  	progress := make(chan struct{})
  1608  
  1609  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1610  		starting <- struct{}{}
  1611  		<-progress
  1612  	}
  1613  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1614  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1615  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1616  	}
  1617  	//  Create and sync with an attacker that promises a higher chain than available
  1618  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1619  	for i := 1; i < 3; i++ {
  1620  		delete(tester.peerHeaders["attack"], hashes[i])
  1621  		delete(tester.peerBlocks["attack"], hashes[i])
  1622  		delete(tester.peerReceipts["attack"], hashes[i])
  1623  	}
  1624  
  1625  	pending := new(sync.WaitGroup)
  1626  	pending.Add(1)
  1627  
  1628  	go func() {
  1629  		defer pending.Done()
  1630  		if err := tester.sync("attack", nil, mode); err == nil {
  1631  			t.Fatalf("succeeded attacker synchronisation")
  1632  		}
  1633  	}()
  1634  	<-starting
  1635  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1636  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1637  	}
  1638  	progress <- struct{}{}
  1639  	pending.Wait()
  1640  
  1641  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1642  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1643  	pending.Add(1)
  1644  
  1645  	go func() {
  1646  		defer pending.Done()
  1647  		if err := tester.sync("valid", nil, mode); err != nil {
  1648  			t.Fatalf("failed to synchronise blocks: %v", err)
  1649  		}
  1650  	}()
  1651  	<-starting
  1652  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1653  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1654  	}
  1655  	progress <- struct{}{}
  1656  	pending.Wait()
  1657  
  1658  	// Check final progress after successful sync
  1659  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1660  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1661  	}
  1662  }
  1663  
  1664  // This test reproduces an issue where unexpected deliveries would
  1665  // block indefinitely if they arrived at the right time.
  1666  func TestDeliverHeadersHang62(t *testing.T)      { testDeliverHeadersHang(t, 62, FullSync) }
  1667  func TestDeliverHeadersHang63Full(t *testing.T)  { testDeliverHeadersHang(t, 63, FullSync) }
  1668  func TestDeliverHeadersHang63Fast(t *testing.T)  { testDeliverHeadersHang(t, 63, FastSync) }
  1669  func TestDeliverHeadersHang64Full(t *testing.T)  { testDeliverHeadersHang(t, 64, FullSync) }
  1670  func TestDeliverHeadersHang64Fast(t *testing.T)  { testDeliverHeadersHang(t, 64, FastSync) }
  1671  func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64, LightSync) }
  1672  
  1673  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1674  	t.Parallel()
  1675  
  1676  	master := newTester()
  1677  	defer master.terminate()
  1678  
  1679  	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1680  	fakeHeads := []*types.Header{{}, {}, {}, {}}
  1681  	for i := 0; i < 200; i++ {
  1682  		tester := newTester()
  1683  		tester.peerDb = master.peerDb
  1684  
  1685  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1686  		// Whenever the downloader requests headers, flood it with
  1687  		// a lot of unrequested header deliveries.
  1688  		tester.downloader.peers.peers["peer"].getAbsHeaders = func(from uint64, count, skip int, reverse bool) error {
  1689  			deliveriesDone := make(chan struct{}, 500)
  1690  			for i := 0; i < cap(deliveriesDone); i++ {
  1691  				peer := fmt.Sprintf("fake-peer%d", i)
  1692  				go func() {
  1693  					tester.downloader.DeliverHeaders(peer, fakeHeads)
  1694  					deliveriesDone <- struct{}{}
  1695  				}()
  1696  			}
  1697  			// Deliver the actual requested headers.
  1698  			impl := tester.peerGetAbsHeadersFn("peer", 0)
  1699  			go impl(from, count, skip, reverse)
  1700  			// None of the extra deliveries should block.
  1701  			timeout := time.After(15 * time.Second)
  1702  			for i := 0; i < cap(deliveriesDone); i++ {
  1703  				select {
  1704  				case <-deliveriesDone:
  1705  				case <-timeout:
  1706  					panic("blocked")
  1707  				}
  1708  			}
  1709  			return nil
  1710  		}
  1711  		if err := tester.sync("peer", nil, mode); err != nil {
  1712  			t.Errorf("sync failed: %v", err)
  1713  		}
  1714  		tester.terminate()
  1715  	}
  1716  }
  1717  
  1718  // Tests that if fast sync aborts in the critical section, it can restart a few
  1719  // times before giving up.
  1720  func TestFastCriticalRestartsFail63(t *testing.T) { testFastCriticalRestarts(t, 63, false) }
  1721  func TestFastCriticalRestartsFail64(t *testing.T) { testFastCriticalRestarts(t, 64, false) }
  1722  func TestFastCriticalRestartsCont63(t *testing.T) { testFastCriticalRestarts(t, 63, true) }
  1723  func TestFastCriticalRestartsCont64(t *testing.T) { testFastCriticalRestarts(t, 64, true) }
  1724  
  1725  func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
  1726  	tester := newTester()
  1727  	defer tester.terminate()
  1728  
  1729  	// Create a large enough blockchin to actually fast sync on
  1730  	targetBlocks := fsMinFullBlocks + 2*fsPivotInterval - 15
  1731  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1732  
  1733  	// Create a tester peer with a critical section header missing (force failures)
  1734  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1735  	delete(tester.peerHeaders["peer"], hashes[fsMinFullBlocks-1])
  1736  	tester.downloader.dropPeer = func(id string) {} // We reuse the same "faulty" peer throughout the test
  1737  
  1738  	// Remove all possible pivot state roots and slow down replies (test failure resets later)
  1739  	for i := 0; i < fsPivotInterval; i++ {
  1740  		tester.peerMissingStates["peer"][headers[hashes[fsMinFullBlocks+i]].Root] = true
  1741  	}
  1742  	tester.downloader.peers.peers["peer"].getNodeData = tester.peerGetNodeDataFn("peer", 500*time.Millisecond) // Enough to reach the critical section
  1743  
  1744  	// Synchronise with the peer a few times and make sure they fail until the retry limit
  1745  	for i := 0; i < int(fsCriticalTrials)-1; i++ {
  1746  		// Attempt a sync and ensure it fails properly
  1747  		if err := tester.sync("peer", nil, FastSync); err == nil {
  1748  			t.Fatalf("failing fast sync succeeded: %v", err)
  1749  		}
  1750  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1751  
  1752  		// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
  1753  		if i == 0 {
  1754  			if tester.downloader.fsPivotLock == nil {
  1755  				time.Sleep(400 * time.Millisecond) // Make sure the first huge timeout expires too
  1756  				t.Fatalf("pivot block not locked in after critical section failure")
  1757  			}
  1758  			tester.lock.Lock()
  1759  			tester.peerHeaders["peer"][hashes[fsMinFullBlocks-1]] = headers[hashes[fsMinFullBlocks-1]]
  1760  			tester.peerMissingStates["peer"] = map[common.Hash]bool{tester.downloader.fsPivotLock.Root: true}
  1761  			tester.downloader.peers.peers["peer"].getNodeData = tester.peerGetNodeDataFn("peer", 0)
  1762  			tester.lock.Unlock()
  1763  		}
  1764  	}
  1765  	// Return all nodes if we're testing fast sync progression
  1766  	if progress {
  1767  		tester.lock.Lock()
  1768  		tester.peerMissingStates["peer"] = map[common.Hash]bool{}
  1769  		tester.lock.Unlock()
  1770  
  1771  		if err := tester.sync("peer", nil, FastSync); err != nil {
  1772  			t.Fatalf("failed to synchronise blocks in progressed fast sync: %v", err)
  1773  		}
  1774  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1775  
  1776  		if fails := atomic.LoadUint32(&tester.downloader.fsPivotFails); fails != 1 {
  1777  			t.Fatalf("progressed pivot trial count mismatch: have %v, want %v", fails, 1)
  1778  		}
  1779  		assertOwnChain(t, tester, targetBlocks+1)
  1780  	} else {
  1781  		if err := tester.sync("peer", nil, FastSync); err == nil {
  1782  			t.Fatalf("succeeded to synchronise blocks in failed fast sync")
  1783  		}
  1784  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1785  
  1786  		if fails := atomic.LoadUint32(&tester.downloader.fsPivotFails); fails != fsCriticalTrials {
  1787  			t.Fatalf("failed pivot trial count mismatch: have %v, want %v", fails, fsCriticalTrials)
  1788  		}
  1789  	}
  1790  	// Retry limit exhausted, downloader will switch to full sync, should succeed
  1791  	if err := tester.sync("peer", nil, FastSync); err != nil {
  1792  		t.Fatalf("failed to synchronise blocks in slow sync: %v", err)
  1793  	}
  1794  	// Note, we can't assert the chain here because the test asserter assumes sync
  1795  	// completed using a single mode of operation, whereas fast-then-slow can result
  1796  	// in arbitrary intermediate state that's not cleanly verifiable.
  1797  }